connectonion 0.4.12__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. connectonion/__init__.py +11 -5
  2. connectonion/agent.py +44 -42
  3. connectonion/cli/commands/init.py +1 -1
  4. connectonion/cli/commands/project_cmd_lib.py +4 -4
  5. connectonion/cli/commands/reset_commands.py +1 -1
  6. connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +15 -11
  7. connectonion/cli/templates/minimal/agent.py +2 -2
  8. connectonion/console.py +55 -3
  9. connectonion/events.py +96 -17
  10. connectonion/llm.py +21 -3
  11. connectonion/logger.py +289 -0
  12. connectonion/prompt_files/eval_expected.md +12 -0
  13. connectonion/tool_executor.py +43 -32
  14. connectonion/usage.py +4 -0
  15. connectonion/useful_events_handlers/reflect.py +13 -9
  16. connectonion/useful_plugins/__init__.py +2 -1
  17. connectonion/useful_plugins/calendar_plugin.py +2 -2
  18. connectonion/useful_plugins/eval.py +130 -0
  19. connectonion/useful_plugins/gmail_plugin.py +4 -4
  20. connectonion/useful_plugins/image_result_formatter.py +4 -3
  21. connectonion/useful_plugins/re_act.py +14 -56
  22. connectonion/useful_plugins/shell_approval.py +2 -2
  23. connectonion/useful_tools/memory.py +4 -0
  24. {connectonion-0.4.12.dist-info → connectonion-0.5.0.dist-info}/METADATA +48 -48
  25. {connectonion-0.4.12.dist-info → connectonion-0.5.0.dist-info}/RECORD +27 -71
  26. {connectonion-0.4.12.dist-info → connectonion-0.5.0.dist-info}/WHEEL +1 -2
  27. connectonion/cli/templates/email-agent/.env.example +0 -23
  28. connectonion/cli/templates/email-agent/README.md +0 -240
  29. connectonion/cli/templates/email-agent/agent.py +0 -374
  30. connectonion/cli/templates/email-agent/demo.py +0 -71
  31. connectonion/cli/templates/meta-agent/.env.example +0 -11
  32. connectonion/cli/templates/minimal/.env.example +0 -5
  33. connectonion/cli/templates/playwright/.env.example +0 -5
  34. connectonion-0.4.12.dist-info/top_level.txt +0 -2
  35. tests/__init__.py +0 -0
  36. tests/cli/__init__.py +0 -1
  37. tests/cli/argparse_runner.py +0 -85
  38. tests/cli/conftest.py +0 -5
  39. tests/cli/test_browser_cli.py +0 -61
  40. tests/cli/test_cli.py +0 -143
  41. tests/cli/test_cli_auth_google.py +0 -344
  42. tests/cli/test_cli_auth_microsoft.py +0 -256
  43. tests/cli/test_cli_create.py +0 -283
  44. tests/cli/test_cli_help.py +0 -200
  45. tests/cli/test_cli_init.py +0 -318
  46. tests/conftest.py +0 -283
  47. tests/debug_gemini_models.py +0 -23
  48. tests/fixtures/__init__.py +0 -1
  49. tests/fixtures/test_tools.py +0 -112
  50. tests/fixtures/trust_fixtures.py +0 -257
  51. tests/real_api/__init__.py +0 -0
  52. tests/real_api/conftest.py +0 -9
  53. tests/real_api/test_llm_do.py +0 -174
  54. tests/real_api/test_llm_do_comprehensive.py +0 -527
  55. tests/real_api/test_production_client.py +0 -94
  56. tests/real_api/test_real_anthropic.py +0 -100
  57. tests/real_api/test_real_api.py +0 -113
  58. tests/real_api/test_real_auth.py +0 -130
  59. tests/real_api/test_real_email.py +0 -95
  60. tests/real_api/test_real_gemini.py +0 -96
  61. tests/real_api/test_real_llm_do.py +0 -81
  62. tests/real_api/test_real_managed.py +0 -208
  63. tests/real_api/test_real_multi_llm.py +0 -454
  64. tests/real_api/test_real_openai.py +0 -100
  65. tests/real_api/test_responses_parse.py +0 -88
  66. tests/test_diff_writer.py +0 -126
  67. tests/test_events.py +0 -677
  68. tests/test_gemini_co.py +0 -70
  69. tests/test_image_result_formatter.py +0 -88
  70. tests/test_plugin_system.py +0 -110
  71. tests/utils/__init__.py +0 -1
  72. tests/utils/config_helpers.py +0 -188
  73. tests/utils/mock_helpers.py +0 -237
  74. {connectonion-0.4.12.dist-info → connectonion-0.5.0.dist-info}/entry_points.txt +0 -0
tests/test_gemini_co.py DELETED
@@ -1,70 +0,0 @@
1
- """Test Gemini models via direct API and co/ managed keys"""
2
- import os
3
- import pytest
4
- from connectonion import Agent, llm_do
5
-
6
- # Set Gemini API key for direct tests
7
- os.environ.setdefault('GEMINI_API_KEY', 'AIzaSyCMBM2LTb-5AYtrjMa1xfPdqj8NNSH9F34')
8
-
9
-
10
- @pytest.mark.real_api
11
- def test_gemini_flash_direct():
12
- """Test Gemini 2.5 Flash via direct API"""
13
- agent = Agent(
14
- name="test-gemini-direct",
15
- model="gemini-2.5-flash" # Direct, no co/ prefix
16
- )
17
- response = agent.input("Say hello in one word")
18
- assert response, "No response from Gemini Flash"
19
- print(f"Gemini Flash response: {response}")
20
-
21
-
22
- @pytest.mark.real_api
23
- def test_gemini_pro_direct():
24
- """Test Gemini 2.5 Pro via direct API"""
25
- agent = Agent(
26
- name="test-gemini-pro-direct",
27
- model="gemini-2.5-pro" # Direct, no co/ prefix
28
- )
29
- response = agent.input("What is 2+2? Just the number.")
30
- assert response, "No response from Gemini Pro"
31
- print(f"Gemini Pro response: {response}")
32
-
33
-
34
- @pytest.mark.real_api
35
- def test_llm_do_gemini_direct():
36
- """Test llm_do with Gemini model via direct API"""
37
- result = llm_do("Say hi", model="gemini-2.5-flash")
38
- assert result, "No result from llm_do with Gemini"
39
- print(f"llm_do Gemini result: {result}")
40
-
41
-
42
- @pytest.mark.real_api
43
- @pytest.mark.skip(reason="Requires co/ account with credits")
44
- def test_gemini_flash_via_co():
45
- """Test Gemini 2.5 Flash via co/ managed keys"""
46
- agent = Agent(
47
- name="test-gemini-co",
48
- model="co/gemini-2.5-flash"
49
- )
50
- response = agent.input("Say hello in one word")
51
- assert response, "No response from Gemini Flash"
52
- print(f"Gemini Flash (co/) response: {response}")
53
-
54
-
55
- if __name__ == "__main__":
56
- print("Testing Gemini models...")
57
-
58
- print("\n1. Testing Gemini Flash (direct API)...")
59
- test_gemini_flash_direct()
60
- print(" ✓ Passed")
61
-
62
- print("\n2. Testing Gemini Pro (direct API)...")
63
- test_gemini_pro_direct()
64
- print(" ✓ Passed")
65
-
66
- print("\n3. Testing llm_do with Gemini (direct API)...")
67
- test_llm_do_gemini_direct()
68
- print(" ✓ Passed")
69
-
70
- print("\n✅ All Gemini tests passed!")
@@ -1,88 +0,0 @@
1
- """Tests for image_result_formatter plugin"""
2
-
3
- import pytest
4
- from connectonion.useful_plugins.image_result_formatter import _is_base64_image
5
-
6
-
7
- class TestIsBase64Image:
8
- """Test base64 image detection logic"""
9
-
10
- def test_data_url_png(self):
11
- """Should detect PNG data URL"""
12
- # Tiny 1x1 red PNG
13
- data_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
14
-
15
- is_img, mime, data = _is_base64_image(data_url)
16
-
17
- assert is_img is True
18
- assert mime == "image/png"
19
- assert data == "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
20
-
21
- def test_data_url_jpeg(self):
22
- """Should detect JPEG data URL"""
23
- data_url = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAIBAQIBAQICAgICAgICAwUDAwMDAwYEBAMFBwYHBw=="
24
-
25
- is_img, mime, data = _is_base64_image(data_url)
26
-
27
- assert is_img is True
28
- assert mime == "image/jpeg"
29
- assert "/9j/4AAQSkZJRgABAQEAYABgAAD" in data
30
-
31
- def test_data_url_in_mixed_content(self):
32
- """Should detect data URL even when mixed with other text"""
33
- mixed = "Screenshot saved! Here's the data: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
34
-
35
- is_img, mime, data = _is_base64_image(mixed)
36
-
37
- assert is_img is True
38
- assert mime == "image/png"
39
-
40
- def test_long_plain_base64(self):
41
- """Should detect long plain base64 string (>100 chars)"""
42
- # Create a base64 string longer than 100 characters
43
- long_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg==" * 2
44
-
45
- is_img, mime, data = _is_base64_image(long_base64)
46
-
47
- assert is_img is True
48
- assert mime == "image/png" # Defaults to PNG
49
- assert data == long_base64.strip()
50
-
51
- def test_short_base64_not_detected(self):
52
- """Should NOT detect short base64-like strings (<100 chars)"""
53
- short_base64 = "ABC123DEF456GHI789"
54
-
55
- is_img, mime, data = _is_base64_image(short_base64)
56
-
57
- assert is_img is False
58
-
59
- def test_regular_text_not_detected(self):
60
- """Should NOT detect regular text"""
61
- text = "This is just regular text, not an image at all"
62
-
63
- is_img, mime, data = _is_base64_image(text)
64
-
65
- assert is_img is False
66
-
67
- def test_non_string_input(self):
68
- """Should handle non-string input gracefully"""
69
- is_img, mime, data = _is_base64_image(123)
70
-
71
- assert is_img is False
72
- assert mime == ""
73
- assert data == ""
74
-
75
- def test_empty_string(self):
76
- """Should handle empty string"""
77
- is_img, mime, data = _is_base64_image("")
78
-
79
- assert is_img is False
80
-
81
- def test_webp_data_url(self):
82
- """Should detect WebP data URL"""
83
- data_url = "data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAwA0JaQAA3AA/vuUAAA="
84
-
85
- is_img, mime, data = _is_base64_image(data_url)
86
-
87
- assert is_img is True
88
- assert mime == "image/webp"
@@ -1,110 +0,0 @@
1
- """Test the plugin system implementation."""
2
- from connectonion import Agent, after_llm, after_tool
3
-
4
- # Simple plugin - just a list
5
- def log_llm(agent):
6
- pass # Just a marker
7
-
8
- simple_logger = [after_llm(log_llm)]
9
-
10
-
11
- # Plugin factory for configuration
12
- def make_counter():
13
- """Create a counter plugin with state."""
14
- counts = {'llm': 0, 'tool': 0}
15
-
16
- def count_llm(agent):
17
- counts['llm'] += 1
18
-
19
- def count_tool(agent):
20
- counts['tool'] += 1
21
-
22
- return [
23
- after_llm(count_llm),
24
- after_tool(count_tool)
25
- ]
26
-
27
-
28
- def test_simple_plugin():
29
- """Test that simple list plugins register correctly."""
30
- agent = Agent(
31
- "test_simple",
32
- plugins=[simple_logger], # Pass the list directly
33
- log=False
34
- )
35
-
36
- # Should have 1 after_llm event handler
37
- assert len(agent.events['after_llm']) == 1
38
- # Total events should be 1
39
- total_events = sum(len(handlers) for handlers in agent.events.values())
40
- assert total_events == 1
41
-
42
-
43
- def test_factory_plugin():
44
- """Test that plugin factories work for configuration."""
45
- counter = make_counter() # Create the list
46
- agent = Agent(
47
- "test_factory",
48
- plugins=[counter], # Pass the list
49
- log=False
50
- )
51
-
52
- # Should have 1 after_llm and 1 after_tool handler
53
- assert len(agent.events['after_llm']) == 1
54
- assert len(agent.events['after_tool']) == 1
55
- # Total events should be 2
56
- total_events = sum(len(handlers) for handlers in agent.events.values())
57
- assert total_events == 2
58
-
59
-
60
- def test_multiple_plugins():
61
- """Test that multiple plugins can be registered together."""
62
- counter = make_counter()
63
- agent = Agent(
64
- "test_multiple",
65
- plugins=[simple_logger, counter], # Two lists
66
- log=False
67
- )
68
-
69
- # Should have 2 after_llm handlers (one from each plugin)
70
- assert len(agent.events['after_llm']) == 2
71
- # Should have 1 after_tool handler (from counter)
72
- assert len(agent.events['after_tool']) == 1
73
- # Total events should be 3
74
- total_events = sum(len(handlers) for handlers in agent.events.values())
75
- assert total_events == 3
76
-
77
-
78
- def test_plugins_with_on_events():
79
- """Test that plugins and on_events can be used together."""
80
- def custom_event(agent):
81
- pass
82
-
83
- agent = Agent(
84
- "test_combined",
85
- plugins=[simple_logger],
86
- on_events=[after_tool(custom_event)],
87
- log=False
88
- )
89
-
90
- # Should have 1 after_llm from plugin
91
- assert len(agent.events['after_llm']) == 1
92
- # Should have 1 after_tool from on_events
93
- assert len(agent.events['after_tool']) == 1
94
- # Total events should be 2
95
- total_events = sum(len(handlers) for handlers in agent.events.values())
96
- assert total_events == 2
97
-
98
-
99
- def test_reusable_plugin():
100
- """Test that plugins can be reused across multiple agents."""
101
- # Create plugin once
102
- counter = make_counter()
103
-
104
- # Use it in multiple agents
105
- agent1 = Agent("test1", plugins=[counter], log=False)
106
- agent2 = Agent("test2", plugins=[counter], log=False)
107
-
108
- # Both should have the same events
109
- assert len(agent1.events['after_llm']) == 1
110
- assert len(agent2.events['after_llm']) == 1
tests/utils/__init__.py DELETED
@@ -1 +0,0 @@
1
- """Test utilities package."""
@@ -1,188 +0,0 @@
1
- """Test configuration for ConnectOnion tests."""
2
-
3
- import os
4
- from pathlib import Path
5
-
6
- # Fixed test account details
7
- TEST_ACCOUNT = {
8
- "public_key": "04e1c4ae3c57d716383153479dae869e51e86d43d88db8dfa22fba7533f3968d",
9
- "private_key": "test_private_key_do_not_use_in_production",
10
- "address": "0x04e1c4ae3c57d716383153479dae869e51e86d43d88db8dfa22fba7533f3968d",
11
- "short_address": "0x04e1c4ae",
12
- "email": "0x04e1c4ae@mail.openonion.ai",
13
- "email_active": True
14
- }
15
-
16
- # Test JWT token (for testing only, not valid for production)
17
- TEST_JWT_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJwdWJsaWNfa2V5IjoiMDRlMWM0YWUzYzU3ZDcxNjM4MzE1MzQ3OWRhZTg2OWU1MWU4NmQ0M2Q4OGRiOGRmYTIyZmJhNzUzM2YzOTY4ZCIsImV4cCI6OTk5OTk5OTk5OX0.test_signature"
18
-
19
- # Test backend URL - defaults to production for real API tests
20
- TEST_BACKEND_URL = os.getenv("TEST_BACKEND_URL", "https://oo.openonion.ai")
21
-
22
- # Test configuration for .co directory
23
- TEST_CONFIG_TOML = {
24
- "project": {
25
- "name": "test-project",
26
- "created": "2024-01-01T00:00:00",
27
- "framework_version": "0.0.5",
28
- },
29
- "cli": {
30
- "version": "1.0.0",
31
- "command": "co init",
32
- "template": "test",
33
- },
34
- "agent": {
35
- "address": TEST_ACCOUNT["address"],
36
- "short_address": TEST_ACCOUNT["short_address"],
37
- "email": TEST_ACCOUNT["email"],
38
- "email_active": TEST_ACCOUNT["email_active"],
39
- "created_at": "2024-01-01T00:00:00",
40
- "algorithm": "ed25519",
41
- "default_model": "gpt-4o-mini",
42
- "max_iterations": 10,
43
- },
44
- "auth": {
45
- "token": TEST_JWT_TOKEN,
46
- "public_key": TEST_ACCOUNT["public_key"],
47
- "authenticated_at": "2024-01-01T00:00:00"
48
- }
49
- }
50
-
51
- # Sample test emails
52
- SAMPLE_EMAILS = [
53
- {
54
- "id": "msg_test_001",
55
- "from": "alice@example.com",
56
- "subject": "Test Email 1",
57
- "message": "This is test email number 1",
58
- "timestamp": "2024-01-15T10:00:00Z",
59
- "read": False
60
- },
61
- {
62
- "id": "msg_test_002",
63
- "from": "bob@example.com",
64
- "subject": "Test Email 2",
65
- "message": "This is test email number 2",
66
- "timestamp": "2024-01-15T11:00:00Z",
67
- "read": True
68
- },
69
- {
70
- "id": "msg_test_003",
71
- "from": "charlie@example.com",
72
- "subject": "Urgent: Test Email 3",
73
- "message": "This is an urgent test email",
74
- "timestamp": "2024-01-15T12:00:00Z",
75
- "read": False
76
- }
77
- ]
78
-
79
-
80
- def create_test_project(base_dir: Path = None) -> Path:
81
- """Create a test ConnectOnion project with fixed test account.
82
-
83
- Args:
84
- base_dir: Base directory to create project in. Uses temp dir if None.
85
-
86
- Returns:
87
- Path to the created project directory
88
- """
89
- import tempfile
90
- import toml
91
-
92
- if base_dir is None:
93
- base_dir = Path(tempfile.mkdtemp(prefix="co_test_"))
94
- else:
95
- base_dir = Path(base_dir)
96
- base_dir.mkdir(parents=True, exist_ok=True)
97
-
98
- # Create .co directory structure
99
- co_dir = base_dir / ".co"
100
- co_dir.mkdir(exist_ok=True)
101
-
102
- keys_dir = co_dir / "keys"
103
- keys_dir.mkdir(exist_ok=True)
104
-
105
- # Write test config
106
- config_path = co_dir / "config.toml"
107
- with open(config_path, "w") as f:
108
- toml.dump(TEST_CONFIG_TOML, f)
109
-
110
- # Write test keys (for testing only)
111
- public_key_path = keys_dir / "public_key.txt"
112
- public_key_path.write_text(TEST_ACCOUNT["public_key"])
113
-
114
- private_key_path = keys_dir / "private_key.txt"
115
- private_key_path.write_text(TEST_ACCOUNT["private_key"])
116
-
117
- # Create a sample agent.py
118
- agent_file = base_dir / "agent.py"
119
- agent_file.write_text("""#!/usr/bin/env python3
120
- \"\"\"Test agent for ConnectOnion.\"\"\"
121
-
122
- from connectonion import Agent, send_email, get_emails, mark_read
123
-
124
- def main():
125
- agent = Agent(
126
- "test-agent",
127
- tools=[send_email, get_emails, mark_read],
128
- model="gpt-4o-mini"
129
- )
130
-
131
- # Test email functionality
132
- emails = get_emails()
133
- print(f"Found {len(emails)} emails")
134
-
135
- for email in emails[:3]:
136
- print(f"- {email['from']}: {email['subject']}")
137
-
138
- if __name__ == "__main__":
139
- main()
140
- """)
141
-
142
- # Don't create a fake .env - use environment variables from tests/.env
143
- # The tests/.env file is loaded by the module-level load_dotenv in __init__.py
144
- # and re-loaded by ProjectHelper.__enter__() to ensure test env vars are available
145
-
146
- return base_dir
147
-
148
-
149
- def cleanup_test_project(project_dir: Path):
150
- """Clean up a test project directory.
151
-
152
- Args:
153
- project_dir: Path to the project directory to clean up
154
- """
155
- import shutil
156
-
157
- if project_dir.exists() and ".co" in os.listdir(project_dir):
158
- shutil.rmtree(project_dir)
159
-
160
-
161
- # Context manager for test projects
162
- class ProjectHelper:
163
- """Context manager for creating and cleaning up test projects."""
164
-
165
- def __init__(self, base_dir: Path = None):
166
- self.base_dir = base_dir
167
- self.project_dir = None
168
- self.original_cwd = None
169
-
170
- def __enter__(self):
171
- import os
172
- from dotenv import load_dotenv
173
- from pathlib import Path
174
- self.original_cwd = os.getcwd()
175
- self.project_dir = create_test_project(self.base_dir)
176
- os.chdir(self.project_dir)
177
- # Load environment variables from tests/.env
178
- # This ensures real API keys are available for integration tests
179
- tests_env = Path(__file__).parent.parent / ".env"
180
- if tests_env.exists():
181
- load_dotenv(tests_env, override=True)
182
- return self.project_dir
183
-
184
- def __exit__(self, exc_type, exc_val, exc_tb):
185
- import os
186
- os.chdir(self.original_cwd)
187
- if self.project_dir and self.project_dir.exists():
188
- cleanup_test_project(self.project_dir)
@@ -1,237 +0,0 @@
1
- """Mock helpers for ConnectOnion testing."""
2
-
3
- import json
4
- from unittest.mock import Mock, MagicMock
5
- from typing import Dict, List, Any, Optional
6
- from connectonion.llm import LLMResponse, ToolCall
7
- from connectonion.usage import TokenUsage
8
-
9
-
10
- class OpenAIMockBuilder:
11
- """Builder for creating OpenAI API mocks."""
12
-
13
- @staticmethod
14
- def simple_response(content: str, model: str = "gpt-3.5-turbo") -> Mock:
15
- """Create mock for text-only responses."""
16
- mock_response = MagicMock()
17
- mock_response.id = "chatcmpl-test123"
18
- mock_response.object = "chat.completion"
19
- mock_response.model = model
20
- mock_response.choices = [MagicMock()]
21
- mock_response.choices[0].message.content = content
22
- mock_response.choices[0].message.tool_calls = None
23
- mock_response.choices[0].finish_reason = "stop"
24
- return mock_response
25
-
26
- @staticmethod
27
- def tool_call_response(
28
- tool_name: str,
29
- arguments: Dict[str, Any],
30
- call_id: str = "call_test123"
31
- ) -> Mock:
32
- """Create mock for tool calling responses."""
33
- mock_response = MagicMock()
34
- mock_response.id = "chatcmpl-test456"
35
- mock_response.object = "chat.completion"
36
- mock_response.choices = [MagicMock()]
37
- mock_response.choices[0].message.content = None
38
-
39
- # Create tool call mock
40
- tool_call = MagicMock()
41
- tool_call.id = call_id
42
- tool_call.type = "function"
43
- tool_call.function.name = tool_name
44
- tool_call.function.arguments = json.dumps(arguments)
45
-
46
- mock_response.choices[0].message.tool_calls = [tool_call]
47
- mock_response.choices[0].finish_reason = "tool_calls"
48
- return mock_response
49
-
50
- @staticmethod
51
- def error_response(error_type: str, message: str) -> Exception:
52
- """Create mock for API errors."""
53
- from openai import APIError, RateLimitError, AuthenticationError
54
-
55
- error_map = {
56
- "rate_limit": RateLimitError,
57
- "auth": AuthenticationError,
58
- "api": APIError
59
- }
60
-
61
- error_class = error_map.get(error_type, APIError)
62
- return error_class(
63
- message=message,
64
- response=MagicMock(),
65
- body={"error": {"message": message}}
66
- )
67
-
68
- @staticmethod
69
- def multi_response_sequence(responses: List[Dict[str, Any]]) -> List[Mock]:
70
- """Create sequence of mock responses for side_effect."""
71
- mock_responses = []
72
-
73
- for response_data in responses:
74
- if response_data.get("type") == "text":
75
- mock_responses.append(
76
- OpenAIMockBuilder.simple_response(response_data["content"])
77
- )
78
- elif response_data.get("type") == "tool_call":
79
- mock_responses.append(
80
- OpenAIMockBuilder.tool_call_response(
81
- response_data["tool_name"],
82
- response_data["arguments"],
83
- response_data.get("call_id", "call_test")
84
- )
85
- )
86
- elif response_data.get("type") == "error":
87
- mock_responses.append(
88
- OpenAIMockBuilder.error_response(
89
- response_data["error_type"],
90
- response_data["message"]
91
- )
92
- )
93
-
94
- return mock_responses
95
-
96
-
97
- class LLMResponseBuilder:
98
- """Builder for creating LLMResponse objects."""
99
-
100
- @staticmethod
101
- def text_response(content: str) -> LLMResponse:
102
- """Create text-only LLMResponse."""
103
- return LLMResponse(
104
- content=content,
105
- tool_calls=[],
106
- raw_response=None,
107
- usage=TokenUsage(),
108
- )
109
-
110
- @staticmethod
111
- def tool_call_response(
112
- tool_name: str,
113
- arguments: Dict[str, Any],
114
- call_id: str = "call_test"
115
- ) -> LLMResponse:
116
- """Create tool calling LLMResponse."""
117
- tool_call = ToolCall(
118
- name=tool_name,
119
- arguments=arguments,
120
- id=call_id
121
- )
122
-
123
- return LLMResponse(
124
- content=None,
125
- tool_calls=[tool_call],
126
- raw_response=None,
127
- usage=TokenUsage(),
128
- )
129
-
130
- @staticmethod
131
- def multi_tool_response(tool_calls: List[Dict[str, Any]]) -> LLMResponse:
132
- """Create multi-tool calling LLMResponse."""
133
- calls = []
134
- for i, call_data in enumerate(tool_calls):
135
- calls.append(ToolCall(
136
- name=call_data["name"],
137
- arguments=call_data["arguments"],
138
- id=call_data.get("id", f"call_test_{i}")
139
- ))
140
-
141
- return LLMResponse(
142
- content=None,
143
- tool_calls=calls,
144
- raw_response=None,
145
- usage=TokenUsage(),
146
- )
147
-
148
-
149
- class FileSystemMocker:
150
- """Mock file system operations."""
151
-
152
- @staticmethod
153
- def create_mock_file_error(error_type: str, message: str = None):
154
- """Create file system error mocks."""
155
- error_map = {
156
- "not_found": FileNotFoundError,
157
- "permission": PermissionError,
158
- "disk_full": OSError
159
- }
160
-
161
- error_class = error_map.get(error_type, OSError)
162
- default_messages = {
163
- "not_found": "File not found",
164
- "permission": "Permission denied",
165
- "disk_full": "No space left on device"
166
- }
167
-
168
- error_message = message or default_messages.get(error_type, "File system error")
169
- return error_class(error_message)
170
-
171
-
172
- class AgentWorkflowMocker:
173
- """Mock complex agent workflows."""
174
-
175
- @staticmethod
176
- def calculator_workflow():
177
- """Mock a calculator workflow sequence."""
178
- return [
179
- LLMResponseBuilder.tool_call_response(
180
- "calculator",
181
- {"expression": "2 + 2"}
182
- ),
183
- LLMResponseBuilder.text_response("The result is 4.")
184
- ]
185
-
186
- @staticmethod
187
- def multi_tool_workflow():
188
- """Mock a multi-tool workflow sequence."""
189
- return [
190
- LLMResponseBuilder.tool_call_response(
191
- "calculator",
192
- {"expression": "100 / 4"}
193
- ),
194
- LLMResponseBuilder.tool_call_response(
195
- "current_time",
196
- {}
197
- ),
198
- LLMResponseBuilder.text_response(
199
- "The result is 25.0, calculated at the current time."
200
- )
201
- ]
202
-
203
- @staticmethod
204
- def error_recovery_workflow():
205
- """Mock a workflow with error recovery."""
206
- return [
207
- LLMResponseBuilder.tool_call_response(
208
- "calculator",
209
- {"expression": "invalid"} # This will cause an error
210
- ),
211
- LLMResponseBuilder.text_response(
212
- "I apologize for the error. Let me try a valid calculation."
213
- ),
214
- LLMResponseBuilder.tool_call_response(
215
- "calculator",
216
- {"expression": "2 + 2"}
217
- ),
218
- LLMResponseBuilder.text_response("The result is 4.")
219
- ]
220
-
221
-
222
- # Convenience functions for common scenarios
223
- def create_successful_agent_mock(responses: List[str]) -> Mock:
224
- """Create a mock agent that returns successful text responses."""
225
- mock_agent = Mock()
226
- mock_agent.run.side_effect = responses
227
- mock_agent.name = "test_agent"
228
- mock_agent.list_tools.return_value = ["calculator", "current_time"]
229
- return mock_agent
230
-
231
-
232
- def create_failing_agent_mock(error_message: str = "Agent error") -> Mock:
233
- """Create a mock agent that fails."""
234
- mock_agent = Mock()
235
- mock_agent.run.side_effect = Exception(error_message)
236
- mock_agent.name = "failing_agent"
237
- return mock_agent