massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Test Azure OpenAI backend functionality.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import sys
|
|
8
|
+
from unittest.mock import AsyncMock, MagicMock, patch
|
|
9
|
+
|
|
10
|
+
import pytest
|
|
11
|
+
|
|
12
|
+
# Add the parent directory to sys.path to allow relative imports
|
|
13
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
|
14
|
+
|
|
15
|
+
from backend.azure_openai import AzureOpenAIBackend # noqa: E402
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class TestAzureOpenAIBackend:
|
|
19
|
+
"""Test Azure OpenAI backend functionality."""
|
|
20
|
+
|
|
21
|
+
def test_init_with_env_vars(self):
|
|
22
|
+
"""Test initialization with environment variables."""
|
|
23
|
+
with patch.dict(
|
|
24
|
+
os.environ,
|
|
25
|
+
{
|
|
26
|
+
"AZURE_OPENAI_API_KEY": "test-key",
|
|
27
|
+
"AZURE_OPENAI_ENDPOINT": "https://test.openai.azure.com/",
|
|
28
|
+
"AZURE_OPENAI_API_VERSION": "2024-02-15-preview",
|
|
29
|
+
},
|
|
30
|
+
):
|
|
31
|
+
backend = AzureOpenAIBackend()
|
|
32
|
+
assert backend.api_key == "test-key"
|
|
33
|
+
assert backend.azure_endpoint == "https://test.openai.azure.com"
|
|
34
|
+
assert backend.api_version == "2024-02-15-preview"
|
|
35
|
+
|
|
36
|
+
def test_init_with_kwargs(self):
|
|
37
|
+
"""Test initialization with keyword arguments."""
|
|
38
|
+
backend = AzureOpenAIBackend(
|
|
39
|
+
api_key="custom-key",
|
|
40
|
+
base_url="https://custom.openai.azure.com/",
|
|
41
|
+
api_version="2024-01-01",
|
|
42
|
+
)
|
|
43
|
+
assert backend.api_key == "custom-key"
|
|
44
|
+
assert backend.azure_endpoint == "https://custom.openai.azure.com"
|
|
45
|
+
assert backend.api_version == "2024-01-01"
|
|
46
|
+
|
|
47
|
+
def test_init_missing_api_key(self):
|
|
48
|
+
"""Test initialization fails without API key."""
|
|
49
|
+
with patch.dict(os.environ, {}, clear=True):
|
|
50
|
+
with pytest.raises(ValueError, match="Azure OpenAI endpoint URL is required"):
|
|
51
|
+
AzureOpenAIBackend()
|
|
52
|
+
|
|
53
|
+
def test_init_missing_endpoint(self):
|
|
54
|
+
"""Test initialization fails without endpoint."""
|
|
55
|
+
with patch.dict(os.environ, {"AZURE_OPENAI_API_KEY": "test-key"}, clear=True):
|
|
56
|
+
with pytest.raises(ValueError, match="Azure OpenAI endpoint URL is required"):
|
|
57
|
+
AzureOpenAIBackend()
|
|
58
|
+
|
|
59
|
+
def test_init_missing_api_key_with_endpoint(self):
|
|
60
|
+
"""Test initialization fails without API key when endpoint is provided."""
|
|
61
|
+
with patch.dict(os.environ, {}, clear=True):
|
|
62
|
+
with pytest.raises(ValueError, match="Azure OpenAI API key is required"):
|
|
63
|
+
AzureOpenAIBackend(base_url="https://test.openai.azure.com/")
|
|
64
|
+
|
|
65
|
+
def test_base_url_normalization(self):
|
|
66
|
+
"""Test base URL is properly normalized."""
|
|
67
|
+
backend = AzureOpenAIBackend(api_key="test-key", base_url="https://test.openai.azure.com")
|
|
68
|
+
assert backend.azure_endpoint == "https://test.openai.azure.com"
|
|
69
|
+
|
|
70
|
+
backend2 = AzureOpenAIBackend(api_key="test-key", base_url="https://test2.openai.azure.com/")
|
|
71
|
+
assert backend2.azure_endpoint == "https://test2.openai.azure.com"
|
|
72
|
+
|
|
73
|
+
def test_get_provider_name(self):
|
|
74
|
+
"""Test provider name is correct."""
|
|
75
|
+
backend = AzureOpenAIBackend(api_key="test-key", base_url="https://test.openai.azure.com/")
|
|
76
|
+
assert backend.get_provider_name() == "Azure OpenAI"
|
|
77
|
+
|
|
78
|
+
def test_estimate_tokens(self):
|
|
79
|
+
"""Test token estimation."""
|
|
80
|
+
backend = AzureOpenAIBackend(api_key="test-key", base_url="https://test.openai.azure.com/")
|
|
81
|
+
text = "This is a test message with several words."
|
|
82
|
+
estimated = backend.estimate_tokens(text)
|
|
83
|
+
assert estimated > 0
|
|
84
|
+
assert isinstance(estimated, (int, float))
|
|
85
|
+
|
|
86
|
+
def test_calculate_cost(self):
|
|
87
|
+
"""Test cost calculation."""
|
|
88
|
+
backend = AzureOpenAIBackend(api_key="test-key", base_url="https://test.openai.azure.com/")
|
|
89
|
+
|
|
90
|
+
# Test GPT-4 cost calculation
|
|
91
|
+
cost = backend.calculate_cost(1000, 500, "gpt-4o")
|
|
92
|
+
assert cost > 0
|
|
93
|
+
assert isinstance(cost, float)
|
|
94
|
+
|
|
95
|
+
# Test GPT-3.5 cost calculation
|
|
96
|
+
cost2 = backend.calculate_cost(1000, 500, "gpt-3.5-turbo")
|
|
97
|
+
assert cost2 > 0
|
|
98
|
+
assert cost2 < cost # GPT-3.5 should be cheaper than GPT-4
|
|
99
|
+
|
|
100
|
+
@pytest.mark.asyncio
|
|
101
|
+
async def test_stream_with_tools_missing_model(self):
|
|
102
|
+
"""Test stream_with_tools fails without model parameter."""
|
|
103
|
+
backend = AzureOpenAIBackend(api_key="test-key", base_url="https://test.openai.azure.com/")
|
|
104
|
+
|
|
105
|
+
messages = [{"role": "user", "content": "Hello"}]
|
|
106
|
+
tools = []
|
|
107
|
+
|
|
108
|
+
# The validation happens at the beginning of the method, before any API calls
|
|
109
|
+
# So we don't need to mock the client for this test
|
|
110
|
+
try:
|
|
111
|
+
async for chunk in backend.stream_with_tools(messages, tools):
|
|
112
|
+
# If we get here, the validation didn't work as expected
|
|
113
|
+
# Check if it's an error chunk
|
|
114
|
+
if chunk.type == "error" and "deployment name" in chunk.error:
|
|
115
|
+
# This is the expected behavior - validation error is yielded as a chunk
|
|
116
|
+
return
|
|
117
|
+
else:
|
|
118
|
+
# Unexpected - validation should have failed
|
|
119
|
+
pytest.fail(f"Expected validation error, but got chunk: {chunk}")
|
|
120
|
+
except ValueError as e:
|
|
121
|
+
# This is the expected behavior - validation error is raised
|
|
122
|
+
if "deployment name" in str(e):
|
|
123
|
+
return
|
|
124
|
+
else:
|
|
125
|
+
pytest.fail(f"Unexpected ValueError: {e}")
|
|
126
|
+
except Exception as e:
|
|
127
|
+
pytest.fail(f"Unexpected exception: {e}")
|
|
128
|
+
|
|
129
|
+
@pytest.mark.asyncio
|
|
130
|
+
async def test_stream_with_tools_with_model(self):
|
|
131
|
+
"""Test stream_with_tools works with model parameter."""
|
|
132
|
+
backend = AzureOpenAIBackend(api_key="test-key", base_url="https://test.openai.azure.com/")
|
|
133
|
+
|
|
134
|
+
messages = [{"role": "user", "content": "Hello"}]
|
|
135
|
+
tools = []
|
|
136
|
+
|
|
137
|
+
# Mock the client and create a mock stream response
|
|
138
|
+
mock_chunk = MagicMock()
|
|
139
|
+
mock_chunk.choices = [MagicMock()]
|
|
140
|
+
mock_chunk.choices[0].delta = MagicMock()
|
|
141
|
+
mock_chunk.choices[0].delta.content = "Hello"
|
|
142
|
+
mock_chunk.choices[0].finish_reason = "stop"
|
|
143
|
+
|
|
144
|
+
mock_stream = [mock_chunk]
|
|
145
|
+
|
|
146
|
+
with patch.object(backend, "client") as mock_client:
|
|
147
|
+
mock_client.chat.completions.create = AsyncMock(return_value=mock_stream)
|
|
148
|
+
|
|
149
|
+
# Test that it doesn't raise an error with model parameter
|
|
150
|
+
try:
|
|
151
|
+
async for chunk in backend.stream_with_tools(messages, tools, model="gpt-4"):
|
|
152
|
+
# Just consume the stream
|
|
153
|
+
pass
|
|
154
|
+
except Exception as e:
|
|
155
|
+
# If there's an error, it should not be about missing model
|
|
156
|
+
assert "deployment name" not in str(e)
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Tests for backend capabilities registry.
|
|
4
|
+
|
|
5
|
+
These tests ensure the capabilities registry is consistent and valid.
|
|
6
|
+
Run with: uv run pytest massgen/tests/test_backend_capabilities.py -v
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import pytest
|
|
10
|
+
|
|
11
|
+
from massgen.backend.capabilities import (
|
|
12
|
+
BACKEND_CAPABILITIES,
|
|
13
|
+
get_all_backend_types,
|
|
14
|
+
get_backends_with_capability,
|
|
15
|
+
get_capabilities,
|
|
16
|
+
has_capability,
|
|
17
|
+
validate_backend_config,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TestBackendCapabilitiesRegistry:
|
|
22
|
+
"""Test the capabilities registry structure and validity."""
|
|
23
|
+
|
|
24
|
+
def test_all_backends_have_required_fields(self):
|
|
25
|
+
"""Ensure all backend entries have required fields."""
|
|
26
|
+
for backend_type, caps in BACKEND_CAPABILITIES.items():
|
|
27
|
+
assert caps.backend_type == backend_type, f"{backend_type}: backend_type mismatch"
|
|
28
|
+
assert caps.provider_name, f"{backend_type}: provider_name is empty"
|
|
29
|
+
assert caps.supported_capabilities is not None, f"{backend_type}: supported_capabilities is None"
|
|
30
|
+
assert caps.builtin_tools is not None, f"{backend_type}: builtin_tools is None"
|
|
31
|
+
assert caps.filesystem_support in ["none", "native", "mcp"], f"{backend_type}: invalid filesystem_support"
|
|
32
|
+
assert caps.models, f"{backend_type}: models list is empty"
|
|
33
|
+
assert caps.default_model, f"{backend_type}: default_model is empty"
|
|
34
|
+
|
|
35
|
+
def test_default_model_in_models_list(self):
|
|
36
|
+
"""Ensure default model exists in models list."""
|
|
37
|
+
for backend_type, caps in BACKEND_CAPABILITIES.items():
|
|
38
|
+
assert caps.default_model in caps.models, f"{backend_type}: default_model '{caps.default_model}' not in models list"
|
|
39
|
+
|
|
40
|
+
def test_filesystem_support_values(self):
|
|
41
|
+
"""Ensure filesystem_support has valid values."""
|
|
42
|
+
valid_values = {"none", "native", "mcp"}
|
|
43
|
+
for backend_type, caps in BACKEND_CAPABILITIES.items():
|
|
44
|
+
assert caps.filesystem_support in valid_values, f"{backend_type}: filesystem_support '{caps.filesystem_support}' " f"not in {valid_values}"
|
|
45
|
+
|
|
46
|
+
def test_no_empty_backend_types(self):
|
|
47
|
+
"""Ensure no backend has an empty backend_type."""
|
|
48
|
+
for backend_type, caps in BACKEND_CAPABILITIES.items():
|
|
49
|
+
assert backend_type, "Found backend with empty backend_type"
|
|
50
|
+
assert caps.backend_type, f"Backend {backend_type} has empty backend_type field"
|
|
51
|
+
|
|
52
|
+
def test_capability_strings_are_valid(self):
|
|
53
|
+
"""Ensure capability strings follow conventions."""
|
|
54
|
+
valid_capabilities = {
|
|
55
|
+
"web_search",
|
|
56
|
+
"code_execution",
|
|
57
|
+
"bash",
|
|
58
|
+
"multimodal", # Legacy - being phased out
|
|
59
|
+
"vision", # Legacy - use image_understanding
|
|
60
|
+
"mcp",
|
|
61
|
+
"filesystem_native",
|
|
62
|
+
"filesystem_mcp",
|
|
63
|
+
"reasoning",
|
|
64
|
+
"image_generation",
|
|
65
|
+
"image_understanding",
|
|
66
|
+
"audio_generation",
|
|
67
|
+
"audio_understanding",
|
|
68
|
+
"video_generation",
|
|
69
|
+
"video_understanding",
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
for backend_type, caps in BACKEND_CAPABILITIES.items():
|
|
73
|
+
for cap in caps.supported_capabilities:
|
|
74
|
+
assert cap in valid_capabilities, f"{backend_type}: unknown capability '{cap}'. " f"Valid capabilities: {valid_capabilities}"
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class TestCapabilityQueries:
|
|
78
|
+
"""Test capability query functions."""
|
|
79
|
+
|
|
80
|
+
def test_get_capabilities_existing_backend(self):
|
|
81
|
+
"""Test getting capabilities for existing backends."""
|
|
82
|
+
caps = get_capabilities("openai")
|
|
83
|
+
assert caps is not None
|
|
84
|
+
assert caps.backend_type == "openai"
|
|
85
|
+
assert caps.provider_name == "OpenAI"
|
|
86
|
+
|
|
87
|
+
def test_get_capabilities_nonexistent_backend(self):
|
|
88
|
+
"""Test getting capabilities for non-existent backend."""
|
|
89
|
+
caps = get_capabilities("nonexistent_backend")
|
|
90
|
+
assert caps is None
|
|
91
|
+
|
|
92
|
+
def test_has_capability_true(self):
|
|
93
|
+
"""Test checking for existing capability."""
|
|
94
|
+
# OpenAI has web_search
|
|
95
|
+
assert has_capability("openai", "web_search") is True
|
|
96
|
+
|
|
97
|
+
def test_has_capability_false(self):
|
|
98
|
+
"""Test checking for non-existent capability."""
|
|
99
|
+
# LM Studio doesn't have web_search
|
|
100
|
+
assert has_capability("lmstudio", "web_search") is False
|
|
101
|
+
|
|
102
|
+
def test_has_capability_nonexistent_backend(self):
|
|
103
|
+
"""Test checking capability on non-existent backend."""
|
|
104
|
+
assert has_capability("nonexistent", "web_search") is False
|
|
105
|
+
|
|
106
|
+
def test_get_all_backend_types(self):
|
|
107
|
+
"""Test getting all backend types."""
|
|
108
|
+
backend_types = get_all_backend_types()
|
|
109
|
+
assert len(backend_types) > 0
|
|
110
|
+
assert "openai" in backend_types
|
|
111
|
+
assert "claude" in backend_types
|
|
112
|
+
assert "gemini" in backend_types
|
|
113
|
+
|
|
114
|
+
def test_get_backends_with_capability(self):
|
|
115
|
+
"""Test getting backends by capability."""
|
|
116
|
+
web_search_backends = get_backends_with_capability("web_search")
|
|
117
|
+
assert "openai" in web_search_backends
|
|
118
|
+
assert "gemini" in web_search_backends
|
|
119
|
+
assert "grok" in web_search_backends
|
|
120
|
+
|
|
121
|
+
# Backends without web search should not be included
|
|
122
|
+
assert "claude_code" not in web_search_backends
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class TestBackendValidation:
|
|
126
|
+
"""Test backend configuration validation."""
|
|
127
|
+
|
|
128
|
+
def test_validate_valid_openai_config(self):
|
|
129
|
+
"""Test validating a valid OpenAI config."""
|
|
130
|
+
config = {
|
|
131
|
+
"type": "openai",
|
|
132
|
+
"model": "gpt-4o",
|
|
133
|
+
"enable_web_search": True,
|
|
134
|
+
"enable_code_interpreter": True,
|
|
135
|
+
}
|
|
136
|
+
errors = validate_backend_config("openai", config)
|
|
137
|
+
assert len(errors) == 0
|
|
138
|
+
|
|
139
|
+
def test_validate_invalid_capability(self):
|
|
140
|
+
"""Test validation catches unsupported capability."""
|
|
141
|
+
# Claude Code doesn't support web_search
|
|
142
|
+
config = {
|
|
143
|
+
"type": "claude_code",
|
|
144
|
+
"enable_web_search": True,
|
|
145
|
+
}
|
|
146
|
+
errors = validate_backend_config("claude_code", config)
|
|
147
|
+
assert len(errors) > 0
|
|
148
|
+
assert any("web_search" in error for error in errors)
|
|
149
|
+
|
|
150
|
+
def test_validate_invalid_backend_type(self):
|
|
151
|
+
"""Test validation catches unknown backend."""
|
|
152
|
+
config = {"type": "nonexistent"}
|
|
153
|
+
errors = validate_backend_config("nonexistent", config)
|
|
154
|
+
assert len(errors) > 0
|
|
155
|
+
assert any("Unknown backend" in error for error in errors)
|
|
156
|
+
|
|
157
|
+
def test_validate_code_execution_variants(self):
|
|
158
|
+
"""Test validation handles different code execution config keys."""
|
|
159
|
+
# OpenAI uses enable_code_interpreter
|
|
160
|
+
config_openai = {"type": "openai", "enable_code_interpreter": True}
|
|
161
|
+
errors = validate_backend_config("openai", config_openai)
|
|
162
|
+
assert len(errors) == 0
|
|
163
|
+
|
|
164
|
+
# Claude uses enable_code_execution
|
|
165
|
+
config_claude = {"type": "claude", "enable_code_execution": True}
|
|
166
|
+
errors = validate_backend_config("claude", config_claude)
|
|
167
|
+
assert len(errors) == 0
|
|
168
|
+
|
|
169
|
+
def test_validate_mcp_servers(self):
|
|
170
|
+
"""Test validation of MCP server configuration."""
|
|
171
|
+
# Valid MCP config for backend that supports it
|
|
172
|
+
config = {
|
|
173
|
+
"type": "openai",
|
|
174
|
+
"mcp_servers": [
|
|
175
|
+
{
|
|
176
|
+
"name": "weather",
|
|
177
|
+
"command": "npx",
|
|
178
|
+
"args": ["-y", "@fak111/weather-mcp"],
|
|
179
|
+
},
|
|
180
|
+
],
|
|
181
|
+
}
|
|
182
|
+
errors = validate_backend_config("openai", config)
|
|
183
|
+
assert len(errors) == 0
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class TestSpecificBackends:
|
|
187
|
+
"""Test specific backend configurations."""
|
|
188
|
+
|
|
189
|
+
def test_openai_capabilities(self):
|
|
190
|
+
"""Test OpenAI backend capabilities."""
|
|
191
|
+
caps = get_capabilities("openai")
|
|
192
|
+
assert "web_search" in caps.supported_capabilities
|
|
193
|
+
assert "code_execution" in caps.supported_capabilities
|
|
194
|
+
assert "mcp" in caps.supported_capabilities
|
|
195
|
+
assert "reasoning" in caps.supported_capabilities
|
|
196
|
+
assert "image_generation" in caps.supported_capabilities
|
|
197
|
+
assert "image_understanding" in caps.supported_capabilities
|
|
198
|
+
assert "audio_generation" in caps.supported_capabilities
|
|
199
|
+
assert "video_generation" in caps.supported_capabilities
|
|
200
|
+
assert caps.filesystem_support == "mcp"
|
|
201
|
+
assert caps.env_var == "OPENAI_API_KEY"
|
|
202
|
+
|
|
203
|
+
def test_claude_capabilities(self):
|
|
204
|
+
"""Test Claude backend capabilities."""
|
|
205
|
+
caps = get_capabilities("claude")
|
|
206
|
+
assert "web_search" in caps.supported_capabilities
|
|
207
|
+
assert "code_execution" in caps.supported_capabilities
|
|
208
|
+
assert "mcp" in caps.supported_capabilities
|
|
209
|
+
assert caps.filesystem_support == "mcp"
|
|
210
|
+
assert caps.env_var == "ANTHROPIC_API_KEY"
|
|
211
|
+
|
|
212
|
+
def test_claude_code_capabilities(self):
|
|
213
|
+
"""Test Claude Code backend capabilities."""
|
|
214
|
+
caps = get_capabilities("claude_code")
|
|
215
|
+
assert "bash" in caps.supported_capabilities
|
|
216
|
+
assert "mcp" in caps.supported_capabilities
|
|
217
|
+
assert caps.filesystem_support == "native"
|
|
218
|
+
assert caps.env_var == "ANTHROPIC_API_KEY"
|
|
219
|
+
assert len(caps.builtin_tools) > 0
|
|
220
|
+
|
|
221
|
+
def test_gemini_capabilities(self):
|
|
222
|
+
"""Test Gemini backend capabilities."""
|
|
223
|
+
caps = get_capabilities("gemini")
|
|
224
|
+
assert "web_search" in caps.supported_capabilities
|
|
225
|
+
assert "code_execution" in caps.supported_capabilities
|
|
226
|
+
assert "mcp" in caps.supported_capabilities
|
|
227
|
+
assert "image_understanding" in caps.supported_capabilities
|
|
228
|
+
assert caps.filesystem_support == "mcp"
|
|
229
|
+
assert caps.env_var == "GEMINI_API_KEY"
|
|
230
|
+
|
|
231
|
+
def test_local_backends_no_api_key(self):
|
|
232
|
+
"""Test local backends don't require API keys."""
|
|
233
|
+
local_backends = ["lmstudio", "inference", "chatcompletion"]
|
|
234
|
+
for backend_type in local_backends:
|
|
235
|
+
caps = get_capabilities(backend_type)
|
|
236
|
+
# These backends may or may not require API keys depending on provider
|
|
237
|
+
# Just verify they're in the registry
|
|
238
|
+
assert caps is not None
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
class TestConsistency:
|
|
242
|
+
"""Test consistency between related fields."""
|
|
243
|
+
|
|
244
|
+
def test_filesystem_native_implies_capability(self):
|
|
245
|
+
"""Backends with native filesystem should have filesystem capability."""
|
|
246
|
+
for backend_type, caps in BACKEND_CAPABILITIES.items():
|
|
247
|
+
if caps.filesystem_support == "native":
|
|
248
|
+
# Should have filesystem_native in capabilities
|
|
249
|
+
assert "filesystem_native" in caps.supported_capabilities or len(caps.builtin_tools) > 0, f"{backend_type}: native filesystem but no capability/tools" # Or have filesystem tools
|
|
250
|
+
|
|
251
|
+
def test_mcp_capability_consistency(self):
|
|
252
|
+
"""All backends should support MCP except where explicitly excluded."""
|
|
253
|
+
# Most backends support MCP
|
|
254
|
+
mcp_backends = get_backends_with_capability("mcp")
|
|
255
|
+
assert len(mcp_backends) > 0
|
|
256
|
+
assert "openai" in mcp_backends
|
|
257
|
+
assert "claude" in mcp_backends
|
|
258
|
+
assert "gemini" in mcp_backends
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
if __name__ == "__main__":
|
|
262
|
+
pytest.main([__file__, "-v"])
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Event loop/resource cleanup tests for multiple backends without changing code.
|
|
5
|
+
These tests mock SDK async clients and assert aclose() is awaited by backends.
|
|
6
|
+
|
|
7
|
+
Backends covered:
|
|
8
|
+
- ResponseBackend (OpenAI Responses API)
|
|
9
|
+
- GrokBackend (xAI via OpenAI-compatible client)
|
|
10
|
+
- ClaudeBackend (Anthropic Messages API)
|
|
11
|
+
|
|
12
|
+
NOTE: Some tests may currently FAIL, revealing missing cleanup in backends.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
from types import SimpleNamespace
|
|
17
|
+
from typing import Any, List
|
|
18
|
+
|
|
19
|
+
import pytest
|
|
20
|
+
|
|
21
|
+
from massgen.backend import ClaudeBackend, GrokBackend, ResponseBackend
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# ---- Common fakes ----
|
|
25
|
+
class _FakeStreamSingleStop:
|
|
26
|
+
"""Async stream that yields once, then stops. Shape varies per backend needs."""
|
|
27
|
+
|
|
28
|
+
def __init__(self, item_factory):
|
|
29
|
+
self._yielded = False
|
|
30
|
+
self._item_factory = item_factory
|
|
31
|
+
|
|
32
|
+
def __aiter__(self):
|
|
33
|
+
return self
|
|
34
|
+
|
|
35
|
+
async def __anext__(self):
|
|
36
|
+
if self._yielded:
|
|
37
|
+
raise StopAsyncIteration
|
|
38
|
+
self._yielded = True
|
|
39
|
+
await asyncio.sleep(0)
|
|
40
|
+
return self._item_factory()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class _FakeAsyncClientBase:
|
|
44
|
+
def __init__(self, *args: Any, **kwargs: Any):
|
|
45
|
+
self.args = args
|
|
46
|
+
self.kwargs = kwargs
|
|
47
|
+
self._closed = False
|
|
48
|
+
|
|
49
|
+
async def aclose(self) -> None:
|
|
50
|
+
await asyncio.sleep(0)
|
|
51
|
+
self._closed = True
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# ---- ResponseBackend test ----
|
|
55
|
+
class _FakeResponses:
|
|
56
|
+
async def create(self, **kwargs: Any):
|
|
57
|
+
# Build a stream where each chunk has a 'type' attribute that ends the response quickly
|
|
58
|
+
def _item():
|
|
59
|
+
return SimpleNamespace(type="response.completed", response={"output": []})
|
|
60
|
+
|
|
61
|
+
return _FakeStreamSingleStop(_item)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class _FakeOpenAIClient(_FakeAsyncClientBase):
|
|
65
|
+
def __init__(self, *args: Any, **kwargs: Any):
|
|
66
|
+
super().__init__(*args, **kwargs)
|
|
67
|
+
self.responses = _FakeResponses()
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@pytest.mark.asyncio
|
|
71
|
+
async def test_response_backend_stream_closes_client(monkeypatch):
|
|
72
|
+
import sys
|
|
73
|
+
|
|
74
|
+
created: List[_FakeOpenAIClient] = []
|
|
75
|
+
|
|
76
|
+
def _factory(*args: Any, **kwargs: Any) -> _FakeOpenAIClient:
|
|
77
|
+
client = _FakeOpenAIClient(*args, **kwargs)
|
|
78
|
+
created.append(client)
|
|
79
|
+
return client
|
|
80
|
+
|
|
81
|
+
# Inject fake openai module so in-function import resolves to our factory
|
|
82
|
+
monkeypatch.setitem(sys.modules, "openai", SimpleNamespace(AsyncOpenAI=_factory))
|
|
83
|
+
|
|
84
|
+
backend = ResponseBackend()
|
|
85
|
+
|
|
86
|
+
messages = [{"role": "user", "content": "hi"}]
|
|
87
|
+
|
|
88
|
+
# Drain the stream
|
|
89
|
+
async for _ in backend.stream_with_tools(messages, tools=[], model="gpt-4o-mini"):
|
|
90
|
+
pass
|
|
91
|
+
|
|
92
|
+
assert len(created) == 1
|
|
93
|
+
# Expectation: backend should close client to avoid event-loop errors
|
|
94
|
+
assert created[0]._closed is True
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# ---- GrokBackend test ----
|
|
98
|
+
class _FakeChatCompletions:
|
|
99
|
+
async def create(self, **kwargs: Any):
|
|
100
|
+
# Yield a single finishing chunk similar to Chat Completions
|
|
101
|
+
def _item():
|
|
102
|
+
choice = SimpleNamespace(delta=None, finish_reason="stop")
|
|
103
|
+
return SimpleNamespace(choices=[choice], usage=None)
|
|
104
|
+
|
|
105
|
+
return _FakeStreamSingleStop(_item)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class _FakeOpenAIClientForGrok(_FakeAsyncClientBase):
|
|
109
|
+
def __init__(self, *args: Any, **kwargs: Any):
|
|
110
|
+
super().__init__(*args, **kwargs)
|
|
111
|
+
self.chat = SimpleNamespace(completions=_FakeChatCompletions())
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@pytest.mark.asyncio
|
|
115
|
+
async def test_grok_backend_stream_closes_client(monkeypatch):
|
|
116
|
+
import sys
|
|
117
|
+
|
|
118
|
+
created: List[_FakeOpenAIClientForGrok] = []
|
|
119
|
+
|
|
120
|
+
def _factory(*args: Any, **kwargs: Any) -> _FakeOpenAIClientForGrok:
|
|
121
|
+
client = _FakeOpenAIClientForGrok(*args, **kwargs)
|
|
122
|
+
created.append(client)
|
|
123
|
+
return client
|
|
124
|
+
|
|
125
|
+
# Inject fake openai module for dynamic import inside function
|
|
126
|
+
monkeypatch.setitem(sys.modules, "openai", SimpleNamespace(AsyncOpenAI=_factory))
|
|
127
|
+
|
|
128
|
+
backend = GrokBackend()
|
|
129
|
+
messages = [{"role": "user", "content": "hi"}]
|
|
130
|
+
|
|
131
|
+
async for _ in backend.stream_with_tools(messages, tools=[], model="grok-2-mini"):
|
|
132
|
+
pass
|
|
133
|
+
|
|
134
|
+
assert len(created) == 1
|
|
135
|
+
# Expectation: backend should close client to avoid event-loop errors
|
|
136
|
+
assert created[0]._closed is True
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# ---- ClaudeBackend test ----
|
|
140
|
+
class _FakeClaudeMessages:
|
|
141
|
+
async def create(self, **kwargs: Any):
|
|
142
|
+
# Stream that yields a single message_stop event
|
|
143
|
+
def _item():
|
|
144
|
+
return SimpleNamespace(type="message_stop")
|
|
145
|
+
|
|
146
|
+
return _FakeStreamSingleStop(_item)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class _FakeAnthropicClient(_FakeAsyncClientBase):
|
|
150
|
+
def __init__(self, *args: Any, **kwargs: Any):
|
|
151
|
+
super().__init__(*args, **kwargs)
|
|
152
|
+
# Provide both .messages and .beta.messages to cover branches
|
|
153
|
+
self.messages = _FakeClaudeMessages()
|
|
154
|
+
self.beta = SimpleNamespace(messages=_FakeClaudeMessages())
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
@pytest.mark.asyncio
|
|
158
|
+
async def test_claude_backend_stream_closes_client(monkeypatch):
|
|
159
|
+
import sys
|
|
160
|
+
|
|
161
|
+
created: List[_FakeAnthropicClient] = []
|
|
162
|
+
|
|
163
|
+
def _factory(*args: Any, **kwargs: Any) -> _FakeAnthropicClient:
|
|
164
|
+
client = _FakeAnthropicClient(*args, **kwargs)
|
|
165
|
+
created.append(client)
|
|
166
|
+
return client
|
|
167
|
+
|
|
168
|
+
# Inject fake anthropic module for dynamic import inside function
|
|
169
|
+
monkeypatch.setitem(sys.modules, "anthropic", SimpleNamespace(AsyncAnthropic=_factory))
|
|
170
|
+
|
|
171
|
+
backend = ClaudeBackend()
|
|
172
|
+
messages = [{"role": "user", "content": "hi"}]
|
|
173
|
+
|
|
174
|
+
async for _ in backend.stream_with_tools(messages, tools=[], model="claude-3.7-sonnet"):
|
|
175
|
+
pass
|
|
176
|
+
|
|
177
|
+
assert len(created) == 1
|
|
178
|
+
# Expectation: backend should close client to avoid event-loop errors
|
|
179
|
+
assert created[0]._closed is True
|