empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
- empathy_os/__init__.py +1 -1
- empathy_os/cache/hybrid.py +5 -1
- empathy_os/cli/commands/batch.py +8 -0
- empathy_os/cli/commands/profiling.py +4 -0
- empathy_os/cli/commands/workflow.py +8 -4
- empathy_os/cli_router.py +9 -0
- empathy_os/config.py +15 -2
- empathy_os/core_modules/__init__.py +15 -0
- empathy_os/dashboard/simple_server.py +62 -30
- empathy_os/mcp/__init__.py +10 -0
- empathy_os/mcp/server.py +506 -0
- empathy_os/memory/control_panel.py +1 -131
- empathy_os/memory/control_panel_support.py +145 -0
- empathy_os/memory/encryption.py +159 -0
- empathy_os/memory/long_term.py +46 -631
- empathy_os/memory/long_term_types.py +99 -0
- empathy_os/memory/mixins/__init__.py +25 -0
- empathy_os/memory/mixins/backend_init_mixin.py +249 -0
- empathy_os/memory/mixins/capabilities_mixin.py +208 -0
- empathy_os/memory/mixins/handoff_mixin.py +208 -0
- empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
- empathy_os/memory/mixins/long_term_mixin.py +352 -0
- empathy_os/memory/mixins/promotion_mixin.py +109 -0
- empathy_os/memory/mixins/short_term_mixin.py +182 -0
- empathy_os/memory/short_term.py +61 -12
- empathy_os/memory/simple_storage.py +302 -0
- empathy_os/memory/storage_backend.py +167 -0
- empathy_os/memory/types.py +8 -3
- empathy_os/memory/unified.py +21 -1120
- empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
- empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
- empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
- empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
- empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
- empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
- empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
- empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
- empathy_os/models/telemetry/__init__.py +71 -0
- empathy_os/models/telemetry/analytics.py +594 -0
- empathy_os/models/telemetry/backend.py +196 -0
- empathy_os/models/telemetry/data_models.py +431 -0
- empathy_os/models/telemetry/storage.py +489 -0
- empathy_os/orchestration/__init__.py +35 -0
- empathy_os/orchestration/execution_strategies.py +481 -0
- empathy_os/orchestration/meta_orchestrator.py +488 -1
- empathy_os/routing/workflow_registry.py +36 -0
- empathy_os/telemetry/agent_coordination.py +2 -3
- empathy_os/telemetry/agent_tracking.py +26 -7
- empathy_os/telemetry/approval_gates.py +18 -24
- empathy_os/telemetry/cli.py +19 -724
- empathy_os/telemetry/commands/__init__.py +14 -0
- empathy_os/telemetry/commands/dashboard_commands.py +696 -0
- empathy_os/telemetry/event_streaming.py +7 -3
- empathy_os/telemetry/feedback_loop.py +28 -15
- empathy_os/tools.py +183 -0
- empathy_os/workflows/__init__.py +5 -0
- empathy_os/workflows/autonomous_test_gen.py +860 -161
- empathy_os/workflows/base.py +6 -2
- empathy_os/workflows/code_review.py +4 -1
- empathy_os/workflows/document_gen/__init__.py +25 -0
- empathy_os/workflows/document_gen/config.py +30 -0
- empathy_os/workflows/document_gen/report_formatter.py +162 -0
- empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
- empathy_os/workflows/output.py +4 -1
- empathy_os/workflows/progress.py +8 -2
- empathy_os/workflows/security_audit.py +2 -2
- empathy_os/workflows/security_audit_phase3.py +7 -4
- empathy_os/workflows/seo_optimization.py +633 -0
- empathy_os/workflows/test_gen/__init__.py +52 -0
- empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
- empathy_os/workflows/test_gen/config.py +88 -0
- empathy_os/workflows/test_gen/data_models.py +38 -0
- empathy_os/workflows/test_gen/report_formatter.py +289 -0
- empathy_os/workflows/test_gen/test_templates.py +381 -0
- empathy_os/workflows/test_gen/workflow.py +655 -0
- empathy_os/workflows/test_gen.py +42 -1905
- empathy_os/cli/parsers/cache 2.py +0 -65
- empathy_os/cli_router 2.py +0 -416
- empathy_os/dashboard/app 2.py +0 -512
- empathy_os/dashboard/simple_server 2.py +0 -403
- empathy_os/dashboard/standalone_server 2.py +0 -536
- empathy_os/memory/types 2.py +0 -441
- empathy_os/models/adaptive_routing 2.py +0 -437
- empathy_os/models/telemetry.py +0 -1660
- empathy_os/project_index/scanner_parallel 2.py +0 -291
- empathy_os/telemetry/agent_coordination 2.py +0 -478
- empathy_os/telemetry/agent_tracking 2.py +0 -350
- empathy_os/telemetry/approval_gates 2.py +0 -563
- empathy_os/telemetry/event_streaming 2.py +0 -405
- empathy_os/telemetry/feedback_loop 2.py +0 -557
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
"""Test Template Generation.
|
|
2
|
+
|
|
3
|
+
Functions to generate pytest test code for functions and classes.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def generate_test_for_function(module: str, func: dict) -> str:
|
|
12
|
+
"""Generate executable tests for a function based on AST analysis."""
|
|
13
|
+
name = func["name"]
|
|
14
|
+
params = func.get("params") or [] # List of (name, type, default) tuples, handle None
|
|
15
|
+
param_names = func.get("param_names") or [p[0] if isinstance(p, tuple) else p for p in params]
|
|
16
|
+
is_async = func.get("is_async", False)
|
|
17
|
+
return_type = func.get("return_type")
|
|
18
|
+
raises = func.get("raises") or []
|
|
19
|
+
has_side_effects = func.get("has_side_effects", False)
|
|
20
|
+
|
|
21
|
+
# Generate test values based on parameter types
|
|
22
|
+
test_cases = generate_test_cases_for_params(params)
|
|
23
|
+
param_str = ", ".join(test_cases.get("valid_args", [""] * len(params)))
|
|
24
|
+
|
|
25
|
+
# Build parametrized test if we have multiple test cases
|
|
26
|
+
parametrize_cases = test_cases.get("parametrize_cases", [])
|
|
27
|
+
|
|
28
|
+
tests = []
|
|
29
|
+
tests.append(f"import pytest\nfrom {module} import {name}\n")
|
|
30
|
+
|
|
31
|
+
# Generate parametrized test if we have cases
|
|
32
|
+
if parametrize_cases and len(parametrize_cases) > 1:
|
|
33
|
+
param_names_str = ", ".join(param_names) if param_names else "value"
|
|
34
|
+
cases_str = ",\n ".join(parametrize_cases)
|
|
35
|
+
|
|
36
|
+
if is_async:
|
|
37
|
+
tests.append(
|
|
38
|
+
f'''
|
|
39
|
+
@pytest.mark.parametrize("{param_names_str}", [
|
|
40
|
+
{cases_str},
|
|
41
|
+
])
|
|
42
|
+
@pytest.mark.asyncio
|
|
43
|
+
async def test_{name}_with_various_inputs({param_names_str}):
|
|
44
|
+
"""Test {name} with various input combinations."""
|
|
45
|
+
result = await {name}({", ".join(param_names)})
|
|
46
|
+
assert result is not None
|
|
47
|
+
''',
|
|
48
|
+
)
|
|
49
|
+
else:
|
|
50
|
+
tests.append(
|
|
51
|
+
f'''
|
|
52
|
+
@pytest.mark.parametrize("{param_names_str}", [
|
|
53
|
+
{cases_str},
|
|
54
|
+
])
|
|
55
|
+
def test_{name}_with_various_inputs({param_names_str}):
|
|
56
|
+
"""Test {name} with various input combinations."""
|
|
57
|
+
result = {name}({", ".join(param_names)})
|
|
58
|
+
assert result is not None
|
|
59
|
+
''',
|
|
60
|
+
)
|
|
61
|
+
# Simple valid input test
|
|
62
|
+
elif is_async:
|
|
63
|
+
tests.append(
|
|
64
|
+
f'''
|
|
65
|
+
@pytest.mark.asyncio
|
|
66
|
+
async def test_{name}_returns_value():
|
|
67
|
+
"""Test that {name} returns a value with valid inputs."""
|
|
68
|
+
result = await {name}({param_str})
|
|
69
|
+
assert result is not None
|
|
70
|
+
''',
|
|
71
|
+
)
|
|
72
|
+
else:
|
|
73
|
+
tests.append(
|
|
74
|
+
f'''
|
|
75
|
+
def test_{name}_returns_value():
|
|
76
|
+
"""Test that {name} returns a value with valid inputs."""
|
|
77
|
+
result = {name}({param_str})
|
|
78
|
+
assert result is not None
|
|
79
|
+
''',
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Generate edge case tests based on parameter types
|
|
83
|
+
edge_cases = test_cases.get("edge_cases", [])
|
|
84
|
+
if edge_cases:
|
|
85
|
+
edge_cases_str = ",\n ".join(edge_cases)
|
|
86
|
+
if is_async:
|
|
87
|
+
tests.append(
|
|
88
|
+
f'''
|
|
89
|
+
@pytest.mark.parametrize("edge_input", [
|
|
90
|
+
{edge_cases_str},
|
|
91
|
+
])
|
|
92
|
+
@pytest.mark.asyncio
|
|
93
|
+
async def test_{name}_edge_cases(edge_input):
|
|
94
|
+
"""Test {name} with edge case inputs."""
|
|
95
|
+
try:
|
|
96
|
+
result = await {name}(edge_input)
|
|
97
|
+
# Function should either return a value or raise an expected error
|
|
98
|
+
assert result is not None or result == 0 or result == "" or result == []
|
|
99
|
+
except (ValueError, TypeError, KeyError) as e:
|
|
100
|
+
# Expected error for edge cases
|
|
101
|
+
assert str(e) # Error message should not be empty
|
|
102
|
+
''',
|
|
103
|
+
)
|
|
104
|
+
else:
|
|
105
|
+
tests.append(
|
|
106
|
+
f'''
|
|
107
|
+
@pytest.mark.parametrize("edge_input", [
|
|
108
|
+
{edge_cases_str},
|
|
109
|
+
])
|
|
110
|
+
def test_{name}_edge_cases(edge_input):
|
|
111
|
+
"""Test {name} with edge case inputs."""
|
|
112
|
+
try:
|
|
113
|
+
result = {name}(edge_input)
|
|
114
|
+
# Function should either return a value or raise an expected error
|
|
115
|
+
assert result is not None or result == 0 or result == "" or result == []
|
|
116
|
+
except (ValueError, TypeError, KeyError) as e:
|
|
117
|
+
# Expected error for edge cases
|
|
118
|
+
assert str(e) # Error message should not be empty
|
|
119
|
+
''',
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Generate exception tests for each raised exception
|
|
123
|
+
for exc_type in raises[:3]: # Limit to 3 exception types
|
|
124
|
+
if is_async:
|
|
125
|
+
tests.append(
|
|
126
|
+
f'''
|
|
127
|
+
@pytest.mark.asyncio
|
|
128
|
+
async def test_{name}_raises_{exc_type.lower()}():
|
|
129
|
+
"""Test that {name} raises {exc_type} for invalid inputs."""
|
|
130
|
+
with pytest.raises({exc_type}):
|
|
131
|
+
await {name}(None) # Adjust input to trigger {exc_type}
|
|
132
|
+
''',
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
tests.append(
|
|
136
|
+
f'''
|
|
137
|
+
def test_{name}_raises_{exc_type.lower()}():
|
|
138
|
+
"""Test that {name} raises {exc_type} for invalid inputs."""
|
|
139
|
+
with pytest.raises({exc_type}):
|
|
140
|
+
{name}(None) # Adjust input to trigger {exc_type}
|
|
141
|
+
''',
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Add return type assertion if we know the type
|
|
145
|
+
if return_type and return_type not in ("None", "Any"):
|
|
146
|
+
type_check = get_type_assertion(return_type)
|
|
147
|
+
if type_check and not has_side_effects:
|
|
148
|
+
if is_async:
|
|
149
|
+
tests.append(
|
|
150
|
+
f'''
|
|
151
|
+
@pytest.mark.asyncio
|
|
152
|
+
async def test_{name}_returns_correct_type():
|
|
153
|
+
"""Test that {name} returns the expected type."""
|
|
154
|
+
result = await {name}({param_str})
|
|
155
|
+
{type_check}
|
|
156
|
+
''',
|
|
157
|
+
)
|
|
158
|
+
else:
|
|
159
|
+
tests.append(
|
|
160
|
+
f'''
|
|
161
|
+
def test_{name}_returns_correct_type():
|
|
162
|
+
"""Test that {name} returns the expected type."""
|
|
163
|
+
result = {name}({param_str})
|
|
164
|
+
{type_check}
|
|
165
|
+
''',
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return "\n".join(tests)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def generate_test_cases_for_params(params: list) -> dict:
|
|
172
|
+
"""Generate test cases based on parameter types."""
|
|
173
|
+
valid_args = []
|
|
174
|
+
parametrize_cases = []
|
|
175
|
+
edge_cases = []
|
|
176
|
+
|
|
177
|
+
for param in params:
|
|
178
|
+
if isinstance(param, tuple) and len(param) >= 2:
|
|
179
|
+
_name, type_hint, default = param[0], param[1], param[2] if len(param) > 2 else None
|
|
180
|
+
else:
|
|
181
|
+
_name = param if isinstance(param, str) else str(param)
|
|
182
|
+
type_hint = "Any"
|
|
183
|
+
default = None
|
|
184
|
+
|
|
185
|
+
# Generate valid value based on type
|
|
186
|
+
if "str" in type_hint.lower():
|
|
187
|
+
valid_args.append('"test_value"')
|
|
188
|
+
parametrize_cases.extend(['"hello"', '"world"', '"test_string"'])
|
|
189
|
+
edge_cases.extend(['""', '" "', '"a" * 1000'])
|
|
190
|
+
elif "int" in type_hint.lower():
|
|
191
|
+
valid_args.append("42")
|
|
192
|
+
parametrize_cases.extend(["0", "1", "100", "-1"])
|
|
193
|
+
edge_cases.extend(["0", "-1", "2**31 - 1"])
|
|
194
|
+
elif "float" in type_hint.lower():
|
|
195
|
+
valid_args.append("3.14")
|
|
196
|
+
parametrize_cases.extend(["0.0", "1.0", "-1.5", "100.5"])
|
|
197
|
+
edge_cases.extend(["0.0", "-0.0", "float('inf')"])
|
|
198
|
+
elif "bool" in type_hint.lower():
|
|
199
|
+
valid_args.append("True")
|
|
200
|
+
parametrize_cases.extend(["True", "False"])
|
|
201
|
+
elif "list" in type_hint.lower():
|
|
202
|
+
valid_args.append("[1, 2, 3]")
|
|
203
|
+
parametrize_cases.extend(["[]", "[1]", "[1, 2, 3]"])
|
|
204
|
+
edge_cases.extend(["[]", "[None]"])
|
|
205
|
+
elif "dict" in type_hint.lower():
|
|
206
|
+
valid_args.append('{"key": "value"}')
|
|
207
|
+
parametrize_cases.extend(["{}", '{"a": 1}', '{"key": "value"}'])
|
|
208
|
+
edge_cases.extend(["{}"])
|
|
209
|
+
elif default is not None:
|
|
210
|
+
valid_args.append(str(default))
|
|
211
|
+
else:
|
|
212
|
+
valid_args.append("None")
|
|
213
|
+
edge_cases.append("None")
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
"valid_args": valid_args,
|
|
217
|
+
"parametrize_cases": parametrize_cases[:5], # Limit cases
|
|
218
|
+
"edge_cases": list(dict.fromkeys(edge_cases))[
|
|
219
|
+
:5
|
|
220
|
+
], # Unique edge cases (preserves order)
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def get_type_assertion(return_type: str) -> str | None:
|
|
225
|
+
"""Generate assertion for return type checking."""
|
|
226
|
+
type_map = {
|
|
227
|
+
"str": "assert isinstance(result, str)",
|
|
228
|
+
"int": "assert isinstance(result, int)",
|
|
229
|
+
"float": "assert isinstance(result, (int, float))",
|
|
230
|
+
"bool": "assert isinstance(result, bool)",
|
|
231
|
+
"list": "assert isinstance(result, list)",
|
|
232
|
+
"dict": "assert isinstance(result, dict)",
|
|
233
|
+
"tuple": "assert isinstance(result, tuple)",
|
|
234
|
+
}
|
|
235
|
+
for type_name, assertion in type_map.items():
|
|
236
|
+
if type_name in return_type.lower():
|
|
237
|
+
return assertion
|
|
238
|
+
return None
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def get_param_test_values(type_hint: str) -> list[str]:
|
|
242
|
+
"""Get test values for a single parameter based on its type."""
|
|
243
|
+
type_hint_lower = type_hint.lower()
|
|
244
|
+
if "str" in type_hint_lower:
|
|
245
|
+
return ['"hello"', '"world"', '"test_string"']
|
|
246
|
+
if "int" in type_hint_lower:
|
|
247
|
+
return ["0", "1", "42", "-1"]
|
|
248
|
+
if "float" in type_hint_lower:
|
|
249
|
+
return ["0.0", "1.0", "3.14"]
|
|
250
|
+
if "bool" in type_hint_lower:
|
|
251
|
+
return ["True", "False"]
|
|
252
|
+
if "list" in type_hint_lower:
|
|
253
|
+
return ["[]", "[1, 2, 3]"]
|
|
254
|
+
if "dict" in type_hint_lower:
|
|
255
|
+
return ["{}", '{"key": "value"}']
|
|
256
|
+
return ['"test_value"']
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def generate_test_for_class(module: str, cls: dict) -> str:
|
|
260
|
+
"""Generate executable test class based on AST analysis."""
|
|
261
|
+
name = cls["name"]
|
|
262
|
+
init_params = cls.get("init_params", [])
|
|
263
|
+
methods = cls.get("methods", [])
|
|
264
|
+
required_params = cls.get("required_init_params", 0)
|
|
265
|
+
_docstring = cls.get("docstring", "") # Reserved for future use
|
|
266
|
+
|
|
267
|
+
# Generate constructor arguments - ensure we have values for ALL required params
|
|
268
|
+
init_args = generate_test_cases_for_params(init_params)
|
|
269
|
+
valid_args = init_args.get("valid_args", [])
|
|
270
|
+
|
|
271
|
+
# Ensure we have enough args for required params
|
|
272
|
+
while len(valid_args) < required_params:
|
|
273
|
+
valid_args.append('"test_value"')
|
|
274
|
+
|
|
275
|
+
init_arg_str = ", ".join(valid_args)
|
|
276
|
+
|
|
277
|
+
tests = []
|
|
278
|
+
tests.append(f"import pytest\nfrom {module} import {name}\n")
|
|
279
|
+
|
|
280
|
+
# Fixture for class instance
|
|
281
|
+
tests.append(
|
|
282
|
+
f'''
|
|
283
|
+
@pytest.fixture
|
|
284
|
+
def {name.lower()}_instance():
|
|
285
|
+
"""Create a {name} instance for testing."""
|
|
286
|
+
return {name}({init_arg_str})
|
|
287
|
+
''',
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Test initialization
|
|
291
|
+
tests.append(
|
|
292
|
+
f'''
|
|
293
|
+
class Test{name}:
|
|
294
|
+
"""Tests for {name} class."""
|
|
295
|
+
|
|
296
|
+
def test_initialization(self):
|
|
297
|
+
"""Test that {name} can be instantiated."""
|
|
298
|
+
instance = {name}({init_arg_str})
|
|
299
|
+
assert instance is not None
|
|
300
|
+
''',
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Only generate parametrized tests for single-param classes to avoid tuple mismatches
|
|
304
|
+
if len(init_params) == 1 and init_params[0][2] is None:
|
|
305
|
+
# Single required param - safe to parametrize
|
|
306
|
+
param_name = init_params[0][0]
|
|
307
|
+
param_type = init_params[0][1]
|
|
308
|
+
cases = get_param_test_values(param_type)
|
|
309
|
+
if len(cases) > 1:
|
|
310
|
+
cases_str = ",\n ".join(cases)
|
|
311
|
+
tests.append(
|
|
312
|
+
f'''
|
|
313
|
+
@pytest.mark.parametrize("{param_name}", [
|
|
314
|
+
{cases_str},
|
|
315
|
+
])
|
|
316
|
+
def test_initialization_with_various_args(self, {param_name}):
|
|
317
|
+
"""Test {name} initialization with various arguments."""
|
|
318
|
+
instance = {name}({param_name})
|
|
319
|
+
assert instance is not None
|
|
320
|
+
''',
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Generate tests for each public method
|
|
324
|
+
for method in methods[:5]: # Limit to 5 methods
|
|
325
|
+
method_name = method.get("name", "")
|
|
326
|
+
if method_name.startswith("_") and method_name != "__init__":
|
|
327
|
+
continue
|
|
328
|
+
if method_name == "__init__":
|
|
329
|
+
continue
|
|
330
|
+
|
|
331
|
+
method_params = method.get("params", [])[1:] # Skip self
|
|
332
|
+
is_async = method.get("is_async", False)
|
|
333
|
+
raises = method.get("raises", [])
|
|
334
|
+
|
|
335
|
+
# Generate method call args
|
|
336
|
+
method_args = generate_test_cases_for_params(method_params)
|
|
337
|
+
method_arg_str = ", ".join(method_args.get("valid_args", []))
|
|
338
|
+
|
|
339
|
+
if is_async:
|
|
340
|
+
tests.append(
|
|
341
|
+
f'''
|
|
342
|
+
@pytest.mark.asyncio
|
|
343
|
+
async def test_{method_name}_returns_value(self, {name.lower()}_instance):
|
|
344
|
+
"""Test that {method_name} returns a value."""
|
|
345
|
+
result = await {name.lower()}_instance.{method_name}({method_arg_str})
|
|
346
|
+
assert result is not None or result == 0 or result == "" or result == []
|
|
347
|
+
''',
|
|
348
|
+
)
|
|
349
|
+
else:
|
|
350
|
+
tests.append(
|
|
351
|
+
f'''
|
|
352
|
+
def test_{method_name}_returns_value(self, {name.lower()}_instance):
|
|
353
|
+
"""Test that {method_name} returns a value."""
|
|
354
|
+
result = {name.lower()}_instance.{method_name}({method_arg_str})
|
|
355
|
+
assert result is not None or result == 0 or result == "" or result == []
|
|
356
|
+
''',
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
# Add exception tests for methods that raise
|
|
360
|
+
for exc_type in raises[:2]:
|
|
361
|
+
if is_async:
|
|
362
|
+
tests.append(
|
|
363
|
+
f'''
|
|
364
|
+
@pytest.mark.asyncio
|
|
365
|
+
async def test_{method_name}_raises_{exc_type.lower()}(self, {name.lower()}_instance):
|
|
366
|
+
"""Test that {method_name} raises {exc_type} for invalid inputs."""
|
|
367
|
+
with pytest.raises({exc_type}):
|
|
368
|
+
await {name.lower()}_instance.{method_name}(None)
|
|
369
|
+
''',
|
|
370
|
+
)
|
|
371
|
+
else:
|
|
372
|
+
tests.append(
|
|
373
|
+
f'''
|
|
374
|
+
def test_{method_name}_raises_{exc_type.lower()}(self, {name.lower()}_instance):
|
|
375
|
+
"""Test that {method_name} raises {exc_type} for invalid inputs."""
|
|
376
|
+
with pytest.raises({exc_type}):
|
|
377
|
+
{name.lower()}_instance.{method_name}(None)
|
|
378
|
+
''',
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
return "\n".join(tests)
|