massgen 0.1.0a2__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (111) hide show
  1. massgen/__init__.py +1 -1
  2. massgen/agent_config.py +17 -0
  3. massgen/api_params_handler/_api_params_handler_base.py +1 -0
  4. massgen/api_params_handler/_chat_completions_api_params_handler.py +8 -1
  5. massgen/api_params_handler/_claude_api_params_handler.py +8 -1
  6. massgen/api_params_handler/_gemini_api_params_handler.py +73 -0
  7. massgen/api_params_handler/_response_api_params_handler.py +8 -1
  8. massgen/backend/base.py +31 -0
  9. massgen/backend/{base_with_mcp.py → base_with_custom_tool_and_mcp.py} +282 -11
  10. massgen/backend/chat_completions.py +182 -92
  11. massgen/backend/claude.py +115 -18
  12. massgen/backend/claude_code.py +378 -14
  13. massgen/backend/docs/CLAUDE_API_RESEARCH.md +3 -3
  14. massgen/backend/gemini.py +1275 -1607
  15. massgen/backend/gemini_mcp_manager.py +545 -0
  16. massgen/backend/gemini_trackers.py +344 -0
  17. massgen/backend/gemini_utils.py +43 -0
  18. massgen/backend/response.py +129 -70
  19. massgen/cli.py +643 -132
  20. massgen/config_builder.py +381 -32
  21. massgen/configs/README.md +111 -80
  22. massgen/configs/basic/multi/three_agents_default.yaml +1 -1
  23. massgen/configs/basic/single/single_agent.yaml +1 -1
  24. massgen/configs/providers/openai/gpt5_nano.yaml +3 -3
  25. massgen/configs/tools/custom_tools/claude_code_custom_tool_example.yaml +32 -0
  26. massgen/configs/tools/custom_tools/claude_code_custom_tool_example_no_path.yaml +28 -0
  27. massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +40 -0
  28. massgen/configs/tools/custom_tools/claude_code_custom_tool_with_wrong_mcp_example.yaml +38 -0
  29. massgen/configs/tools/custom_tools/claude_code_wrong_custom_tool_with_mcp_example.yaml +38 -0
  30. massgen/configs/tools/custom_tools/claude_custom_tool_example.yaml +24 -0
  31. massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +22 -0
  32. massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +35 -0
  33. massgen/configs/tools/custom_tools/claude_custom_tool_with_wrong_mcp_example.yaml +33 -0
  34. massgen/configs/tools/custom_tools/claude_wrong_custom_tool_with_mcp_example.yaml +33 -0
  35. massgen/configs/tools/custom_tools/gemini_custom_tool_example.yaml +24 -0
  36. massgen/configs/tools/custom_tools/gemini_custom_tool_example_no_path.yaml +22 -0
  37. massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +35 -0
  38. massgen/configs/tools/custom_tools/gemini_custom_tool_with_wrong_mcp_example.yaml +33 -0
  39. massgen/configs/tools/custom_tools/gemini_wrong_custom_tool_with_mcp_example.yaml +33 -0
  40. massgen/configs/tools/custom_tools/github_issue_market_analysis.yaml +94 -0
  41. massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_example.yaml +24 -0
  42. massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_example_no_path.yaml +22 -0
  43. massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +35 -0
  44. massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_wrong_mcp_example.yaml +33 -0
  45. massgen/configs/tools/custom_tools/gpt5_nano_wrong_custom_tool_with_mcp_example.yaml +33 -0
  46. massgen/configs/tools/custom_tools/gpt_oss_custom_tool_example.yaml +25 -0
  47. massgen/configs/tools/custom_tools/gpt_oss_custom_tool_example_no_path.yaml +23 -0
  48. massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +34 -0
  49. massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_wrong_mcp_example.yaml +34 -0
  50. massgen/configs/tools/custom_tools/gpt_oss_wrong_custom_tool_with_mcp_example.yaml +34 -0
  51. massgen/configs/tools/custom_tools/grok3_mini_custom_tool_example.yaml +24 -0
  52. massgen/configs/tools/custom_tools/grok3_mini_custom_tool_example_no_path.yaml +22 -0
  53. massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +35 -0
  54. massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_wrong_mcp_example.yaml +33 -0
  55. massgen/configs/tools/custom_tools/grok3_mini_wrong_custom_tool_with_mcp_example.yaml +33 -0
  56. massgen/configs/tools/custom_tools/qwen_api_custom_tool_example.yaml +25 -0
  57. massgen/configs/tools/custom_tools/qwen_api_custom_tool_example_no_path.yaml +23 -0
  58. massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +36 -0
  59. massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_wrong_mcp_example.yaml +34 -0
  60. massgen/configs/tools/custom_tools/qwen_api_wrong_custom_tool_with_mcp_example.yaml +34 -0
  61. massgen/configs/tools/custom_tools/qwen_local_custom_tool_example.yaml +24 -0
  62. massgen/configs/tools/custom_tools/qwen_local_custom_tool_example_no_path.yaml +22 -0
  63. massgen/configs/tools/custom_tools/qwen_local_custom_tool_with_mcp_example.yaml +35 -0
  64. massgen/configs/tools/custom_tools/qwen_local_custom_tool_with_wrong_mcp_example.yaml +33 -0
  65. massgen/configs/tools/custom_tools/qwen_local_wrong_custom_tool_with_mcp_example.yaml +33 -0
  66. massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +1 -1
  67. massgen/configs/voting/gemini_gpt_voting_sensitivity.yaml +67 -0
  68. massgen/formatter/_chat_completions_formatter.py +104 -0
  69. massgen/formatter/_claude_formatter.py +120 -0
  70. massgen/formatter/_gemini_formatter.py +448 -0
  71. massgen/formatter/_response_formatter.py +88 -0
  72. massgen/frontend/coordination_ui.py +4 -2
  73. massgen/logger_config.py +35 -3
  74. massgen/message_templates.py +56 -6
  75. massgen/orchestrator.py +179 -10
  76. massgen/stream_chunk/base.py +3 -0
  77. massgen/tests/custom_tools_example.py +392 -0
  78. massgen/tests/mcp_test_server.py +17 -7
  79. massgen/tests/test_config_builder.py +423 -0
  80. massgen/tests/test_custom_tools.py +401 -0
  81. massgen/tests/test_tools.py +127 -0
  82. massgen/tool/README.md +935 -0
  83. massgen/tool/__init__.py +39 -0
  84. massgen/tool/_async_helpers.py +70 -0
  85. massgen/tool/_basic/__init__.py +8 -0
  86. massgen/tool/_basic/_two_num_tool.py +24 -0
  87. massgen/tool/_code_executors/__init__.py +10 -0
  88. massgen/tool/_code_executors/_python_executor.py +74 -0
  89. massgen/tool/_code_executors/_shell_executor.py +61 -0
  90. massgen/tool/_exceptions.py +39 -0
  91. massgen/tool/_file_handlers/__init__.py +10 -0
  92. massgen/tool/_file_handlers/_file_operations.py +218 -0
  93. massgen/tool/_manager.py +634 -0
  94. massgen/tool/_registered_tool.py +88 -0
  95. massgen/tool/_result.py +66 -0
  96. massgen/tool/_self_evolution/_github_issue_analyzer.py +369 -0
  97. massgen/tool/docs/builtin_tools.md +681 -0
  98. massgen/tool/docs/exceptions.md +794 -0
  99. massgen/tool/docs/execution_results.md +691 -0
  100. massgen/tool/docs/manager.md +887 -0
  101. massgen/tool/docs/workflow_toolkits.md +529 -0
  102. massgen/tool/workflow_toolkits/__init__.py +57 -0
  103. massgen/tool/workflow_toolkits/base.py +55 -0
  104. massgen/tool/workflow_toolkits/new_answer.py +126 -0
  105. massgen/tool/workflow_toolkits/vote.py +167 -0
  106. {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/METADATA +89 -131
  107. {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/RECORD +111 -36
  108. {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/WHEEL +0 -0
  109. {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/entry_points.txt +0 -0
  110. {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/licenses/LICENSE +0 -0
  111. {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/top_level.txt +0 -0
@@ -31,10 +31,10 @@ from ..logger_config import log_backend_agent_message, log_stream_chunk, logger
31
31
 
32
32
  # Local imports
33
33
  from .base import FilesystemSupport, StreamChunk
34
- from .base_with_mcp import MCPBackend
34
+ from .base_with_custom_tool_and_mcp import CustomToolAndMCPBackend
35
35
 
36
36
 
37
- class ChatCompletionsBackend(MCPBackend):
37
+ class ChatCompletionsBackend(CustomToolAndMCPBackend):
38
38
  """Complete OpenAI-compatible Chat Completions API backend.
39
39
 
40
40
  Can be used directly with any OpenAI-compatible provider by setting provider name.
@@ -67,14 +67,14 @@ class ChatCompletionsBackend(MCPBackend):
67
67
  async for chunk in super().stream_with_tools(messages, tools, **kwargs):
68
68
  yield chunk
69
69
 
70
- async def _stream_with_mcp_tools(
70
+ async def _stream_with_custom_and_mcp_tools(
71
71
  self,
72
72
  current_messages: List[Dict[str, Any]],
73
73
  tools: List[Dict[str, Any]],
74
74
  client,
75
75
  **kwargs,
76
76
  ) -> AsyncGenerator[StreamChunk, None]:
77
- """Recursively stream MCP responses, executing function calls as needed."""
77
+ """Recursively stream responses, executing custom and MCP tool calls as needed."""
78
78
 
79
79
  # Build API params for this iteration
80
80
  all_params = {**self.config, **kwargs}
@@ -193,14 +193,28 @@ class ChatCompletionsBackend(MCPBackend):
193
193
 
194
194
  # Execute any captured function calls
195
195
  if captured_function_calls and response_completed:
196
- # Check if any of the function calls are NOT MCP functions
197
- non_mcp_functions = [call for call in captured_function_calls if call["name"] not in self._mcp_functions]
196
+ # Categorize function calls
197
+ mcp_calls = []
198
+ custom_calls = []
199
+ provider_calls = []
198
200
 
199
- if non_mcp_functions:
200
- logger.info(f"Non-MCP function calls detected (will be ignored in MCP execution): {[call['name'] for call in non_mcp_functions]}")
201
+ for call in captured_function_calls:
202
+ if call["name"] in self._mcp_functions:
203
+ mcp_calls.append(call)
204
+ elif call["name"] in self._custom_tool_names:
205
+ custom_calls.append(call)
206
+ else:
207
+ provider_calls.append(call)
208
+
209
+ # If there are provider calls (non-MCP, non-custom), let API handle them
210
+ if provider_calls:
211
+ logger.info(f"Provider function calls detected: {[call['name'] for call in provider_calls]}. Ending local processing.")
212
+ yield StreamChunk(type="done")
213
+ return
201
214
 
202
215
  # Check circuit breaker status before executing MCP functions
203
- if not await self._check_circuit_breaker_before_execution():
216
+ if mcp_calls and not await self._check_circuit_breaker_before_execution():
217
+ logger.warning("All MCP servers blocked by circuit breaker")
204
218
  yield StreamChunk(
205
219
  type="mcp_status",
206
220
  status="mcp_blocked",
@@ -210,9 +224,10 @@ class ChatCompletionsBackend(MCPBackend):
210
224
  yield StreamChunk(type="done")
211
225
  return
212
226
 
213
- # Execute only MCP function calls
214
- mcp_functions_executed = False
227
+ # Initialize for execution
228
+ functions_executed = False
215
229
  updated_messages = current_messages.copy()
230
+ processed_call_ids = set() # Track processed calls
216
231
 
217
232
  # Check if planning mode is enabled - block MCP tool execution during planning
218
233
  if self.is_planning_mode_enabled():
@@ -252,11 +267,90 @@ class ChatCompletionsBackend(MCPBackend):
252
267
  }
253
268
  updated_messages.append(assistant_message)
254
269
 
255
- # Execute functions and collect results
256
- tool_results = []
257
- for call in captured_function_calls:
270
+ # Execute custom tools first
271
+ for call in custom_calls:
272
+ try:
273
+ # Yield custom tool call status
274
+ yield StreamChunk(
275
+ type="custom_tool_status",
276
+ status="custom_tool_called",
277
+ content=f"🔧 [Custom Tool] Calling {call['name']}...",
278
+ source=f"custom_{call['name']}",
279
+ )
280
+
281
+ # Yield custom tool arguments
282
+ yield StreamChunk(
283
+ type="custom_tool_status",
284
+ status="function_call",
285
+ content=f"Arguments for Calling {call['name']}: {call['arguments']}",
286
+ source=f"custom_{call['name']}",
287
+ )
288
+
289
+ # Execute custom tool
290
+ result = await self._execute_custom_tool(call)
291
+
292
+ # Add function result to messages
293
+ function_output_msg = {
294
+ "role": "tool",
295
+ "tool_call_id": call["call_id"],
296
+ "content": str(result),
297
+ }
298
+ updated_messages.append(function_output_msg)
299
+
300
+ # Yield custom tool results
301
+ yield StreamChunk(
302
+ type="custom_tool_status",
303
+ status="function_call_output",
304
+ content=f"Results for Calling {call['name']}: {str(result)}",
305
+ source=f"custom_{call['name']}",
306
+ )
307
+
308
+ # Yield custom tool response status
309
+ yield StreamChunk(
310
+ type="custom_tool_status",
311
+ status="custom_tool_response",
312
+ content=f"✅ [Custom Tool] {call['name']} completed",
313
+ source=f"custom_{call['name']}",
314
+ )
315
+
316
+ processed_call_ids.add(call["call_id"])
317
+ functions_executed = True
318
+ logger.info(f"Executed custom tool: {call['name']}")
319
+
320
+ except Exception as e:
321
+ logger.error(f"Error executing custom tool {call['name']}: {e}")
322
+ error_msg = f"Error executing {call['name']}: {str(e)}"
323
+
324
+ # Yield error with arguments shown
325
+ yield StreamChunk(
326
+ type="custom_tool_status",
327
+ status="function_call",
328
+ content=f"Arguments for Calling {call['name']}: {call['arguments']}",
329
+ source=f"custom_{call['name']}",
330
+ )
331
+
332
+ yield StreamChunk(
333
+ type="custom_tool_status",
334
+ status="custom_tool_error",
335
+ content=f"❌ [Custom Tool Error] {error_msg}",
336
+ source=f"custom_{call['name']}",
337
+ )
338
+
339
+ # Add error result to messages
340
+ error_output_msg = {
341
+ "role": "tool",
342
+ "tool_call_id": call["call_id"],
343
+ "content": error_msg,
344
+ }
345
+ updated_messages.append(error_output_msg)
346
+ processed_call_ids.add(call["call_id"])
347
+ functions_executed = True
348
+
349
+ # Execute MCP function calls
350
+ mcp_functions_executed = False
351
+ for call in mcp_calls:
258
352
  function_name = call["name"]
259
- if self.is_mcp_tool_call(function_name):
353
+ if function_name in self._mcp_functions:
260
354
  yield StreamChunk(
261
355
  type="mcp_status",
262
356
  status="mcp_tool_called",
@@ -264,7 +358,7 @@ class ChatCompletionsBackend(MCPBackend):
264
358
  source=f"mcp_{function_name}",
265
359
  )
266
360
 
267
- # Yield detailed MCP status as StreamChunk (similar to gemini.py)
361
+ # Yield detailed MCP status as StreamChunk
268
362
  tools_info = f" ({len(self._mcp_functions)} tools available)" if self._mcp_functions else ""
269
363
  yield StreamChunk(
270
364
  type="mcp_status",
@@ -275,51 +369,40 @@ class ChatCompletionsBackend(MCPBackend):
275
369
 
276
370
  try:
277
371
  # Execute MCP function with retry and exponential backoff
278
- (
279
- result_str,
280
- result_obj,
281
- ) = await self._execute_mcp_function_with_retry(function_name, call["arguments"])
372
+ result_str, result_obj = await self._execute_mcp_function_with_retry(function_name, call["arguments"])
282
373
 
283
374
  # Check if function failed after all retries
284
375
  if isinstance(result_str, str) and result_str.startswith("Error:"):
285
376
  # Log failure but still create tool response
286
377
  logger.warning(f"MCP function {function_name} failed after retries: {result_str}")
287
- tool_results.append(
288
- {
289
- "tool_call_id": call["call_id"],
290
- "content": result_str,
291
- "success": False,
292
- },
293
- )
294
- else:
295
- # Yield MCP success status as StreamChunk (similar to gemini.py)
296
- yield StreamChunk(
297
- type="mcp_status",
298
- status="mcp_tools_success",
299
- content=f"MCP tool call succeeded (call #{self._mcp_tool_calls_count})",
300
- source=f"mcp_{function_name}",
301
- )
302
378
 
303
- tool_results.append(
304
- {
305
- "tool_call_id": call["call_id"],
306
- "content": result_str,
307
- "success": True,
308
- "result_obj": result_obj,
309
- },
310
- )
379
+ # Add error result to messages
380
+ function_output_msg = {
381
+ "role": "tool",
382
+ "tool_call_id": call["call_id"],
383
+ "content": result_str,
384
+ }
385
+ updated_messages.append(function_output_msg)
386
+
387
+ processed_call_ids.add(call["call_id"])
388
+ mcp_functions_executed = True
389
+ continue
311
390
 
312
391
  except Exception as e:
313
392
  # Only catch unexpected non-MCP system errors
314
393
  logger.error(f"Unexpected error in MCP function execution: {e}")
315
394
  error_msg = f"Error executing {function_name}: {str(e)}"
316
- tool_results.append(
317
- {
318
- "tool_call_id": call["call_id"],
319
- "content": error_msg,
320
- "success": False,
321
- },
322
- )
395
+
396
+ # Add error result to messages
397
+ function_output_msg = {
398
+ "role": "tool",
399
+ "tool_call_id": call["call_id"],
400
+ "content": error_msg,
401
+ }
402
+ updated_messages.append(function_output_msg)
403
+
404
+ processed_call_ids.add(call["call_id"])
405
+ mcp_functions_executed = True
323
406
  continue
324
407
 
325
408
  # Yield function_call status
@@ -330,60 +413,66 @@ class ChatCompletionsBackend(MCPBackend):
330
413
  source=f"mcp_{function_name}",
331
414
  )
332
415
 
333
- logger.info(f"Executed MCP function {function_name} (stdio/streamable-http)")
334
- mcp_functions_executed = True
335
- else:
336
- # For non-MCP functions, add a dummy tool result to maintain message consistency
337
- logger.info(f"Non-MCP function {function_name} detected, creating placeholder response")
338
- tool_results.append(
339
- {
340
- "tool_call_id": call["call_id"],
341
- "content": f"Function {function_name} is not available in this MCP session.",
342
- "success": False,
343
- },
416
+ # Add function output to messages and yield status chunk
417
+ function_output_msg = {
418
+ "role": "tool",
419
+ "tool_call_id": call["call_id"],
420
+ "content": str(result_str),
421
+ }
422
+ updated_messages.append(function_output_msg)
423
+
424
+ # Yield function_call_output status with preview
425
+ result_text = str(result_str)
426
+ if hasattr(result_obj, "content") and result_obj.content:
427
+ if isinstance(result_obj.content, list) and len(result_obj.content) > 0:
428
+ first_item = result_obj.content[0]
429
+ if hasattr(first_item, "text"):
430
+ result_text = first_item.text
431
+
432
+ yield StreamChunk(
433
+ type="mcp_status",
434
+ status="function_call_output",
435
+ content=f"Results for Calling {function_name}: {result_text}",
436
+ source=f"mcp_{function_name}",
344
437
  )
345
438
 
346
- # Add all tool response messages after the assistant message
347
- for result in tool_results:
348
- # Yield function_call_output status with preview
349
- result_text = str(result["content"])
350
- if result.get("success") and hasattr(result.get("result_obj"), "content") and result["result_obj"].content:
351
- obj = result["result_obj"]
352
- if isinstance(obj.content, list) and len(obj.content) > 0:
353
- first_item = obj.content[0]
354
- if hasattr(first_item, "text"):
355
- result_text = first_item.text
439
+ logger.info(f"Executed MCP function {function_name} (stdio/streamable-http)")
440
+ processed_call_ids.add(call["call_id"])
356
441
 
357
- yield StreamChunk(
358
- type="mcp_status",
359
- status="function_call_output",
360
- content=f"Results for Calling {function_name}: {result_text}",
361
- source=f"mcp_{function_name}",
362
- )
442
+ # Yield MCP tool response status
443
+ yield StreamChunk(
444
+ type="mcp_status",
445
+ status="mcp_tool_response",
446
+ content=f"✅ [MCP Tool] {function_name} completed",
447
+ source=f"mcp_{function_name}",
448
+ )
363
449
 
364
- function_output_msg = {
365
- "role": "tool",
366
- "tool_call_id": result["tool_call_id"],
367
- "content": result["content"],
368
- }
369
- updated_messages.append(function_output_msg)
450
+ mcp_functions_executed = True
451
+ functions_executed = True
370
452
 
371
- yield StreamChunk(
372
- type="mcp_status",
373
- status="mcp_tool_response",
374
- content=f" [MCP Tool] {function_name} completed",
375
- source=f"mcp_{function_name}",
376
- )
453
+ # Ensure all captured function calls have results to prevent hanging
454
+ for call in captured_function_calls:
455
+ if call["call_id"] not in processed_call_ids:
456
+ logger.warning(f"Tool call {call['call_id']} for function {call['name']} was not processed - adding error result")
457
+
458
+ # Add missing function call and error result to messages
459
+ error_output_msg = {
460
+ "role": "tool",
461
+ "tool_call_id": call["call_id"],
462
+ "content": f"Error: Tool call {call['call_id']} for function {call['name']} was not processed. This may indicate a validation or execution error.",
463
+ }
464
+ updated_messages.append(error_output_msg)
465
+ mcp_functions_executed = True
377
466
 
378
467
  # Trim history after function executions to bound memory usage
379
- if mcp_functions_executed:
468
+ if functions_executed or mcp_functions_executed:
380
469
  updated_messages = self._trim_message_history(updated_messages)
381
470
 
382
471
  # Recursive call with updated messages
383
- async for chunk in self._stream_with_mcp_tools(updated_messages, tools, client, **kwargs):
472
+ async for chunk in self._stream_with_custom_and_mcp_tools(updated_messages, tools, client, **kwargs):
384
473
  yield chunk
385
474
  else:
386
- # No MCP functions were executed, we're done
475
+ # No functions were executed, we're done
387
476
  yield StreamChunk(type="done")
388
477
  return
389
478
 
@@ -395,6 +484,7 @@ class ChatCompletionsBackend(MCPBackend):
395
484
  content="✅ [MCP] Session completed",
396
485
  source="mcp_session",
397
486
  )
487
+ yield StreamChunk(type="done")
398
488
  return
399
489
 
400
490
  async def _process_stream(self, stream, all_params, agent_id) -> AsyncGenerator[StreamChunk, None]:
massgen/backend/claude.py CHANGED
@@ -37,10 +37,10 @@ from ..formatter import ClaudeFormatter
37
37
  from ..logger_config import log_backend_agent_message, log_stream_chunk, logger
38
38
  from ..mcp_tools.backend_utils import MCPErrorHandler
39
39
  from .base import FilesystemSupport, StreamChunk
40
- from .base_with_mcp import MCPBackend, UploadFileError
40
+ from .base_with_custom_tool_and_mcp import CustomToolAndMCPBackend, UploadFileError
41
41
 
42
42
 
43
- class ClaudeBackend(MCPBackend):
43
+ class ClaudeBackend(CustomToolAndMCPBackend):
44
44
  """Claude backend using Anthropic's Messages API with full multi-tool support."""
45
45
 
46
46
  def __init__(self, api_key: Optional[str] = None, **kwargs):
@@ -469,7 +469,7 @@ class ClaudeBackend(MCPBackend):
469
469
  "Claude Files API upload left unresolved file_pending_upload marker " f"(message {msg_idx}, item {item_idx}, source {identifier}).",
470
470
  )
471
471
 
472
- async def _stream_without_mcp_tools(
472
+ async def _stream_without_custom_and_mcp_tools(
473
473
  self,
474
474
  messages: List[Dict[str, Any]],
475
475
  tools: List[Dict[str, Any]],
@@ -501,6 +501,8 @@ class ClaudeBackend(MCPBackend):
501
501
  name = tool.get("function", {}).get("name") if "function" in tool else tool.get("name")
502
502
  if name and name in self._mcp_function_names:
503
503
  continue
504
+ if name and name in self._custom_tool_names:
505
+ continue
504
506
  elif tool.get("type") == "mcp":
505
507
  continue
506
508
  non_mcp_tools.append(tool)
@@ -519,14 +521,14 @@ class ClaudeBackend(MCPBackend):
519
521
  async for chunk in self._process_stream(stream, all_params, agent_id):
520
522
  yield chunk
521
523
 
522
- async def _stream_with_mcp_tools(
524
+ async def _stream_with_custom_and_mcp_tools(
523
525
  self,
524
526
  current_messages: List[Dict[str, Any]],
525
527
  tools: List[Dict[str, Any]],
526
528
  client,
527
529
  **kwargs,
528
530
  ) -> AsyncGenerator[StreamChunk, None]:
529
- """Recursively stream responses, executing MCP function calls when detected."""
531
+ """Recursively stream responses, executing MCP and custom tool function calls when detected."""
530
532
 
531
533
  # Build API params for this iteration
532
534
  all_params = {**self.config, **kwargs}
@@ -554,6 +556,7 @@ class ClaudeBackend(MCPBackend):
554
556
  content = ""
555
557
  current_tool_uses: Dict[str, Dict[str, Any]] = {}
556
558
  mcp_tool_calls: List[Dict[str, Any]] = []
559
+ custom_tool_calls: List[Dict[str, Any]] = []
557
560
  response_completed = False
558
561
 
559
562
  async for event in stream:
@@ -657,8 +660,8 @@ class ClaudeBackend(MCPBackend):
657
660
  elif event.type == "message_delta":
658
661
  pass
659
662
  elif event.type == "message_stop":
660
- # Identify MCP and non-MCP tool calls among current_tool_uses
661
- non_mcp_tool_calls = []
663
+ # Identify MCP, custom, and non-MCP/non-custom tool calls among current_tool_uses
664
+ non_mcp_non_custom_tool_calls = []
662
665
  if current_tool_uses:
663
666
  for tool_use in current_tool_uses.values():
664
667
  tool_name = tool_use.get("name", "")
@@ -683,8 +686,19 @@ class ClaudeBackend(MCPBackend):
683
686
  },
684
687
  },
685
688
  )
689
+ elif self.is_custom_tool_call(tool_name):
690
+ custom_tool_calls.append(
691
+ {
692
+ "id": tool_use["id"],
693
+ "type": "function",
694
+ "function": {
695
+ "name": tool_name,
696
+ "arguments": parsed_input,
697
+ },
698
+ },
699
+ )
686
700
  else:
687
- non_mcp_tool_calls.append(
701
+ non_mcp_non_custom_tool_calls.append(
688
702
  {
689
703
  "id": tool_use["id"],
690
704
  "type": "function",
@@ -694,10 +708,10 @@ class ClaudeBackend(MCPBackend):
694
708
  },
695
709
  },
696
710
  )
697
- # Emit non-MCP tool calls for the caller to execute
698
- if non_mcp_tool_calls:
699
- log_stream_chunk("backend.claude", "tool_calls", non_mcp_tool_calls, agent_id)
700
- yield StreamChunk(type="tool_calls", tool_calls=non_mcp_tool_calls)
711
+ # Emit non-MCP/non-custom tool calls for the caller to execute
712
+ if non_mcp_non_custom_tool_calls:
713
+ log_stream_chunk("backend.claude", "tool_calls", non_mcp_non_custom_tool_calls, agent_id)
714
+ yield StreamChunk(type="tool_calls", tool_calls=non_mcp_non_custom_tool_calls)
701
715
  response_completed = True
702
716
  break
703
717
  except Exception as event_error:
@@ -706,8 +720,8 @@ class ClaudeBackend(MCPBackend):
706
720
  yield StreamChunk(type="error", error=error_msg)
707
721
  continue
708
722
 
709
- # If we captured MCP tool calls, execute them and recurse
710
- if response_completed and mcp_tool_calls:
723
+ # If we captured MCP or custom tool calls, execute them and recurse
724
+ if response_completed and (mcp_tool_calls or custom_tool_calls):
711
725
  # Circuit breaker pre-execution check using base class method
712
726
  if not await self._check_circuit_breaker_before_execution():
713
727
  yield StreamChunk(
@@ -721,11 +735,12 @@ class ClaudeBackend(MCPBackend):
721
735
 
722
736
  updated_messages = current_messages.copy()
723
737
 
724
- # Build assistant message with tool_use blocks for all MCP tool calls
738
+ # Build assistant message with tool_use blocks for all MCP and custom tool calls
725
739
  assistant_content = []
726
740
  if content: # Add text content if any
727
741
  assistant_content.append({"type": "text", "text": content})
728
742
 
743
+ # Add tool_use blocks for MCP tools
729
744
  for tool_call in mcp_tool_calls:
730
745
  tool_name = tool_call["function"]["name"]
731
746
  tool_args = tool_call["function"]["arguments"]
@@ -740,10 +755,92 @@ class ClaudeBackend(MCPBackend):
740
755
  },
741
756
  )
742
757
 
758
+ # Add tool_use blocks for custom tools
759
+ for tool_call in custom_tool_calls:
760
+ tool_name = tool_call["function"]["name"]
761
+ tool_args = tool_call["function"]["arguments"]
762
+ tool_id = tool_call["id"]
763
+
764
+ assistant_content.append(
765
+ {
766
+ "type": "tool_use",
767
+ "id": tool_id,
768
+ "name": tool_name,
769
+ "input": tool_args,
770
+ },
771
+ )
772
+
743
773
  # Append the assistant message with tool uses
744
774
  updated_messages.append({"role": "assistant", "content": assistant_content})
745
775
 
746
- # Now execute the MCP tool calls and append results
776
+ # First execute custom tool calls and append results
777
+ for tool_call in custom_tool_calls:
778
+ function_name = tool_call["function"]["name"]
779
+
780
+ # Yield custom tool call status
781
+ yield StreamChunk(
782
+ type="custom_tool_status",
783
+ status="custom_tool_called",
784
+ content=f"🔧 [Custom Tool] Calling {function_name}...",
785
+ source=f"custom_{function_name}",
786
+ )
787
+
788
+ try:
789
+ # Execute custom function
790
+ result_str = await self._execute_custom_tool(
791
+ {
792
+ "name": function_name,
793
+ "arguments": json.dumps(tool_call["function"]["arguments"])
794
+ if isinstance(tool_call["function"].get("arguments"), (dict, list))
795
+ else tool_call["function"].get("arguments", "{}"),
796
+ "call_id": tool_call["id"],
797
+ },
798
+ )
799
+ if not result_str or result_str.startswith("Error:"):
800
+ logger.warning(f"Custom function {function_name} failed: {result_str or 'unknown error'}")
801
+ result_str = result_str or "Tool execution failed"
802
+ except Exception as e:
803
+ logger.error(f"Unexpected error in custom function execution: {e}")
804
+ result_str = f"Error executing custom tool: {str(e)}"
805
+
806
+ # Build tool result message
807
+ tool_result_msg = {
808
+ "role": "user",
809
+ "content": [
810
+ {
811
+ "type": "tool_result",
812
+ "tool_use_id": tool_call["id"],
813
+ "content": result_str,
814
+ },
815
+ ],
816
+ }
817
+
818
+ # Append to updated_messages
819
+ updated_messages.append(tool_result_msg)
820
+
821
+ yield StreamChunk(
822
+ type="custom_tool_status",
823
+ status="function_call",
824
+ content=f"Arguments for Calling {function_name}: {json.dumps(tool_call['function'].get('arguments', {}))}",
825
+ source=f"custom_{function_name}",
826
+ )
827
+
828
+ yield StreamChunk(
829
+ type="custom_tool_status",
830
+ status="function_call_output",
831
+ content=f"Results for Calling {function_name}: {result_str}",
832
+ source=f"custom_{function_name}",
833
+ )
834
+
835
+ logger.info(f"Executed custom function {function_name}")
836
+ yield StreamChunk(
837
+ type="custom_tool_status",
838
+ status="custom_tool_response",
839
+ content=f"✅ [Custom Tool] {function_name} completed",
840
+ source=f"custom_{function_name}",
841
+ )
842
+
843
+ # Then execute MCP tool calls and append results
747
844
  for tool_call in mcp_tool_calls:
748
845
  function_name = tool_call["function"]["name"]
749
846
 
@@ -825,8 +922,8 @@ class ClaudeBackend(MCPBackend):
825
922
  # Trim updated_messages using base class method
826
923
  updated_messages = self._trim_message_history(updated_messages)
827
924
 
828
- # After processing all MCP calls, recurse: async for chunk in self._stream_mcp_recursive(updated_messages, tools, client, **kwargs): yield chunk
829
- async for chunk in self._stream_with_mcp_tools(updated_messages, tools, client, **kwargs):
925
+ # After processing all tool calls, recurse
926
+ async for chunk in self._stream_with_custom_and_mcp_tools(updated_messages, tools, client, **kwargs):
830
927
  yield chunk
831
928
  return
832
929
  else: