botrun-flow-lang 5.12.263__py3-none-any.whl → 5.12.264__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +811 -811
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +723 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +486 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +744 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-5.12.264.dist-info}/METADATA +1 -1
- botrun_flow_lang-5.12.264.dist-info/RECORD +102 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-5.12.264.dist-info}/WHEEL +0 -0
|
@@ -1,357 +1,357 @@
|
|
|
1
|
-
#!/usr/bin/env python
|
|
2
|
-
import sys
|
|
3
|
-
import os
|
|
4
|
-
import random
|
|
5
|
-
import time
|
|
6
|
-
import concurrent.futures
|
|
7
|
-
import traceback
|
|
8
|
-
import inspect
|
|
9
|
-
from typing import List, Callable, Dict, Any, Tuple
|
|
10
|
-
import unittest
|
|
11
|
-
from unittest.mock import patch
|
|
12
|
-
import json
|
|
13
|
-
from datetime import datetime
|
|
14
|
-
import asyncio
|
|
15
|
-
import aiohttp
|
|
16
|
-
|
|
17
|
-
# Add the parent directory to the sys.path to allow imports
|
|
18
|
-
sys.path.append(
|
|
19
|
-
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
20
|
-
)
|
|
21
|
-
|
|
22
|
-
# Import the test class that contains all the test methods
|
|
23
|
-
from botrun_flow_lang.tests.api_functional_tests import TestAPIFunctionality
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class StressTest:
|
|
27
|
-
def __init__(
|
|
28
|
-
self,
|
|
29
|
-
num_users: int = 50,
|
|
30
|
-
num_rounds: int = 2,
|
|
31
|
-
base_url: str = "https://botrun-flow-lang-fastapi-dev-36186877499.asia-east1.run.app",
|
|
32
|
-
include_tests: List[str] = None,
|
|
33
|
-
):
|
|
34
|
-
"""
|
|
35
|
-
Initialize the stress test with configuration parameters.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
num_users: Number of concurrent users to simulate
|
|
39
|
-
num_rounds: Number of testing rounds to perform
|
|
40
|
-
base_url: The base URL for the API
|
|
41
|
-
include_tests: List of test function names to include
|
|
42
|
-
"""
|
|
43
|
-
self.num_users = num_users
|
|
44
|
-
self.num_rounds = num_rounds
|
|
45
|
-
self.base_url = base_url
|
|
46
|
-
self.include_tests = include_tests or [
|
|
47
|
-
"test_langgraph_react_agent_social_housing"
|
|
48
|
-
]
|
|
49
|
-
|
|
50
|
-
# Get all test methods from TestAPIFunctionality class
|
|
51
|
-
self.test_methods = self._get_test_methods()
|
|
52
|
-
|
|
53
|
-
# Stats for tracking results
|
|
54
|
-
self.total_tests = 0
|
|
55
|
-
self.successful_tests = 0
|
|
56
|
-
self.failed_tests = 0
|
|
57
|
-
self.test_durations = {}
|
|
58
|
-
self.start_time = None
|
|
59
|
-
self.end_time = None
|
|
60
|
-
|
|
61
|
-
def _get_test_methods(self) -> List[str]:
|
|
62
|
-
"""Get all test methods from TestAPIFunctionality class, excluding the ones in include_tests."""
|
|
63
|
-
all_methods = [
|
|
64
|
-
method_name
|
|
65
|
-
for method_name, method in inspect.getmembers(
|
|
66
|
-
TestAPIFunctionality, predicate=inspect.isfunction
|
|
67
|
-
)
|
|
68
|
-
if method_name.startswith("test_") and method_name in self.include_tests
|
|
69
|
-
]
|
|
70
|
-
return all_methods
|
|
71
|
-
|
|
72
|
-
def run_single_test(self, user_id: int, test_name: str) -> Tuple[bool, float, str]:
|
|
73
|
-
"""
|
|
74
|
-
Run a single test function.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
user_id: ID of the simulated user
|
|
78
|
-
test_name: Name of the test function to run
|
|
79
|
-
|
|
80
|
-
Returns:
|
|
81
|
-
Tuple of (success, duration, error_message)
|
|
82
|
-
"""
|
|
83
|
-
start_time = time.time()
|
|
84
|
-
print(f"User {user_id}: Running {test_name}")
|
|
85
|
-
|
|
86
|
-
test_instance = TestAPIFunctionality(methodName="setUp")
|
|
87
|
-
test_instance.base_url = self.base_url
|
|
88
|
-
|
|
89
|
-
# Run the setUp method to initialize the test instance
|
|
90
|
-
test_instance.setUp()
|
|
91
|
-
|
|
92
|
-
# Get the actual test method
|
|
93
|
-
test_method = getattr(test_instance, test_name)
|
|
94
|
-
|
|
95
|
-
success = True
|
|
96
|
-
error_message = ""
|
|
97
|
-
|
|
98
|
-
try:
|
|
99
|
-
# Execute the test method
|
|
100
|
-
test_method()
|
|
101
|
-
except Exception as e:
|
|
102
|
-
success = False
|
|
103
|
-
error_message = f"{type(e).__name__}: {str(e)}\n{traceback.format_exc()}"
|
|
104
|
-
print(
|
|
105
|
-
f"User {user_id}: Error in {test_name} - {type(e).__name__}: {str(e)}"
|
|
106
|
-
)
|
|
107
|
-
finally:
|
|
108
|
-
# Always call tearDown to clean up resources
|
|
109
|
-
if hasattr(test_instance, "tearDown"):
|
|
110
|
-
test_instance.tearDown()
|
|
111
|
-
|
|
112
|
-
duration = time.time() - start_time
|
|
113
|
-
print(
|
|
114
|
-
f"User {user_id}: Completed {test_name} in {duration:.2f}s - {'Success' if success else 'Failed'}"
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
return success, duration, error_message
|
|
118
|
-
|
|
119
|
-
def user_workflow(self, user_id: int) -> List[Dict[str, Any]]:
|
|
120
|
-
"""
|
|
121
|
-
Simulates a user workflow by randomly selecting and running tests.
|
|
122
|
-
|
|
123
|
-
Args:
|
|
124
|
-
user_id: ID of the simulated user
|
|
125
|
-
|
|
126
|
-
Returns:
|
|
127
|
-
List of test results
|
|
128
|
-
"""
|
|
129
|
-
results = []
|
|
130
|
-
|
|
131
|
-
for round_num in range(self.num_rounds):
|
|
132
|
-
# Select a random test method
|
|
133
|
-
test_name = random.choice(self.test_methods)
|
|
134
|
-
|
|
135
|
-
# Run the test
|
|
136
|
-
success, duration, error_message = self.run_single_test(user_id, test_name)
|
|
137
|
-
|
|
138
|
-
# Record the result
|
|
139
|
-
result = {
|
|
140
|
-
"user_id": user_id,
|
|
141
|
-
"round": round_num + 1,
|
|
142
|
-
"test_name": test_name,
|
|
143
|
-
"success": success,
|
|
144
|
-
"duration": duration,
|
|
145
|
-
"timestamp": datetime.now().isoformat(),
|
|
146
|
-
"error_message": error_message,
|
|
147
|
-
}
|
|
148
|
-
results.append(result)
|
|
149
|
-
|
|
150
|
-
# Sleep for a short period to simulate some user think time (optional)
|
|
151
|
-
time.sleep(random.uniform(0.1, 0.5))
|
|
152
|
-
|
|
153
|
-
return results
|
|
154
|
-
|
|
155
|
-
def run_stress_test(self) -> Dict[str, Any]:
|
|
156
|
-
"""
|
|
157
|
-
Run the stress test with concurrent users.
|
|
158
|
-
|
|
159
|
-
Returns:
|
|
160
|
-
Dictionary with test results and statistics
|
|
161
|
-
"""
|
|
162
|
-
self.start_time = time.time()
|
|
163
|
-
all_results = []
|
|
164
|
-
self.test_durations = {}
|
|
165
|
-
|
|
166
|
-
print(
|
|
167
|
-
f"Starting stress test with {self.num_users} concurrent users for {self.num_rounds} rounds"
|
|
168
|
-
)
|
|
169
|
-
print(f"Using test methods: {', '.join(self.test_methods)}")
|
|
170
|
-
print("-" * 70)
|
|
171
|
-
|
|
172
|
-
# Use concurrent.futures to run tests in parallel
|
|
173
|
-
with concurrent.futures.ThreadPoolExecutor(
|
|
174
|
-
max_workers=self.num_users
|
|
175
|
-
) as executor:
|
|
176
|
-
future_to_user = {
|
|
177
|
-
executor.submit(self.user_workflow, user_id): user_id
|
|
178
|
-
for user_id in range(1, self.num_users + 1)
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
for future in concurrent.futures.as_completed(future_to_user):
|
|
182
|
-
user_id = future_to_user[future]
|
|
183
|
-
try:
|
|
184
|
-
user_results = future.result()
|
|
185
|
-
all_results.extend(user_results)
|
|
186
|
-
except Exception as e:
|
|
187
|
-
print(f"User {user_id} generated an exception: {e}")
|
|
188
|
-
|
|
189
|
-
# Calculate statistics
|
|
190
|
-
self.end_time = time.time()
|
|
191
|
-
self.total_tests = len(all_results)
|
|
192
|
-
self.successful_tests = sum(1 for r in all_results if r["success"])
|
|
193
|
-
self.failed_tests = self.total_tests - self.successful_tests
|
|
194
|
-
|
|
195
|
-
# Calculate average duration per test type
|
|
196
|
-
test_type_durations = {}
|
|
197
|
-
for result in all_results:
|
|
198
|
-
test_name = result["test_name"]
|
|
199
|
-
if test_name not in test_type_durations:
|
|
200
|
-
test_type_durations[test_name] = {"count": 0, "total_duration": 0}
|
|
201
|
-
|
|
202
|
-
test_type_durations[test_name]["count"] += 1
|
|
203
|
-
test_type_durations[test_name]["total_duration"] += result["duration"]
|
|
204
|
-
|
|
205
|
-
for test_name, data in test_type_durations.items():
|
|
206
|
-
avg_duration = data["total_duration"] / data["count"]
|
|
207
|
-
test_type_durations[test_name]["avg_duration"] = avg_duration
|
|
208
|
-
|
|
209
|
-
# Prepare summary report
|
|
210
|
-
summary = {
|
|
211
|
-
"config": {
|
|
212
|
-
"num_users": self.num_users,
|
|
213
|
-
"num_rounds": self.num_rounds,
|
|
214
|
-
"base_url": self.base_url,
|
|
215
|
-
"included_tests": self.include_tests,
|
|
216
|
-
},
|
|
217
|
-
"statistics": {
|
|
218
|
-
"total_tests": self.total_tests,
|
|
219
|
-
"successful_tests": self.successful_tests,
|
|
220
|
-
"failed_tests": self.failed_tests,
|
|
221
|
-
"success_rate": (
|
|
222
|
-
(self.successful_tests / self.total_tests) * 100
|
|
223
|
-
if self.total_tests > 0
|
|
224
|
-
else 0
|
|
225
|
-
),
|
|
226
|
-
"total_duration_seconds": self.end_time - self.start_time,
|
|
227
|
-
"avg_test_duration": (
|
|
228
|
-
sum(r["duration"] for r in all_results) / len(all_results)
|
|
229
|
-
if all_results
|
|
230
|
-
else 0
|
|
231
|
-
),
|
|
232
|
-
"test_type_statistics": {
|
|
233
|
-
test_name: {
|
|
234
|
-
"count": data["count"],
|
|
235
|
-
"avg_duration": data["avg_duration"],
|
|
236
|
-
"success_rate": (
|
|
237
|
-
sum(
|
|
238
|
-
1
|
|
239
|
-
for r in all_results
|
|
240
|
-
if r["test_name"] == test_name and r["success"]
|
|
241
|
-
)
|
|
242
|
-
/ data["count"]
|
|
243
|
-
* 100
|
|
244
|
-
),
|
|
245
|
-
}
|
|
246
|
-
for test_name, data in test_type_durations.items()
|
|
247
|
-
},
|
|
248
|
-
},
|
|
249
|
-
"detailed_results": all_results,
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
self._print_summary(summary)
|
|
253
|
-
return summary
|
|
254
|
-
|
|
255
|
-
def _print_summary(self, summary: Dict[str, Any]) -> None:
|
|
256
|
-
"""Print a human-readable summary of the stress test results."""
|
|
257
|
-
print("\n" + "=" * 70)
|
|
258
|
-
print(f"STRESS TEST SUMMARY")
|
|
259
|
-
print("=" * 70)
|
|
260
|
-
|
|
261
|
-
stats = summary["statistics"]
|
|
262
|
-
config = summary["config"]
|
|
263
|
-
|
|
264
|
-
print(f"Configuration:")
|
|
265
|
-
print(f" Users: {config['num_users']}")
|
|
266
|
-
print(f" Rounds per user: {config['num_rounds']}")
|
|
267
|
-
print(f" API Base URL: {config['base_url']}")
|
|
268
|
-
print(f" Included tests: {', '.join(config['included_tests'])}")
|
|
269
|
-
|
|
270
|
-
print("\nOverall Statistics:")
|
|
271
|
-
print(f" Total tests run: {stats['total_tests']}")
|
|
272
|
-
print(f" Successful tests: {stats['successful_tests']}")
|
|
273
|
-
print(f" Failed tests: {stats['failed_tests']}")
|
|
274
|
-
print(f" Success rate: {stats['success_rate']:.2f}%")
|
|
275
|
-
print(f" Total duration: {stats['total_duration_seconds']:.2f} seconds")
|
|
276
|
-
print(f" Average test duration: {stats['avg_test_duration']:.2f} seconds")
|
|
277
|
-
|
|
278
|
-
print("\nTest Type Statistics:")
|
|
279
|
-
for test_name, test_stats in stats["test_type_statistics"].items():
|
|
280
|
-
print(f" {test_name}:")
|
|
281
|
-
print(f" Count: {test_stats['count']}")
|
|
282
|
-
print(f" Average duration: {test_stats['avg_duration']:.2f} seconds")
|
|
283
|
-
print(f" Success rate: {test_stats['success_rate']:.2f}%")
|
|
284
|
-
|
|
285
|
-
print("\nFailed Tests:")
|
|
286
|
-
failed_tests = [r for r in summary["detailed_results"] if not r["success"]]
|
|
287
|
-
if failed_tests:
|
|
288
|
-
for i, test in enumerate(
|
|
289
|
-
failed_tests[:10], 1
|
|
290
|
-
): # Show only first 10 failures
|
|
291
|
-
print(
|
|
292
|
-
f" {i}. User {test['user_id']}, Round {test['round']}: {test['test_name']}"
|
|
293
|
-
)
|
|
294
|
-
error_first_line = test["error_message"].split("\n")[0]
|
|
295
|
-
print(f" Error: {error_first_line}")
|
|
296
|
-
|
|
297
|
-
if len(failed_tests) > 10:
|
|
298
|
-
print(f" ... and {len(failed_tests) - 10} more failures")
|
|
299
|
-
else:
|
|
300
|
-
print(" None")
|
|
301
|
-
|
|
302
|
-
print("=" * 70)
|
|
303
|
-
|
|
304
|
-
# Add a clear final summary line
|
|
305
|
-
print(
|
|
306
|
-
f"\n🏆 FINAL RESULT: {stats['successful_tests']} PASSED ✅ | {stats['failed_tests']} FAILED ❌ | {stats['success_rate']:.2f}% SUCCESS RATE"
|
|
307
|
-
)
|
|
308
|
-
print("=" * 70)
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
def main():
|
|
312
|
-
"""Main entry point for the stress test."""
|
|
313
|
-
# Configuration parameters - adjust these as needed
|
|
314
|
-
num_users = 50
|
|
315
|
-
num_rounds = 2
|
|
316
|
-
base_url = "https://botrun-flow-lang-fastapi-dev-36186877499.asia-east1.run.app"
|
|
317
|
-
include_tests = [
|
|
318
|
-
"test_langgraph_react_agent_social_housing"
|
|
319
|
-
] # 只執行社宅入住資格審查測試
|
|
320
|
-
|
|
321
|
-
# Create and run the stress test
|
|
322
|
-
stress_tester = StressTest(
|
|
323
|
-
num_users=num_users,
|
|
324
|
-
num_rounds=num_rounds,
|
|
325
|
-
base_url=base_url,
|
|
326
|
-
include_tests=include_tests, # 改用 include_tests 來指定要執行的測試
|
|
327
|
-
)
|
|
328
|
-
|
|
329
|
-
results = stress_tester.run_stress_test()
|
|
330
|
-
|
|
331
|
-
# Get success and failure counts for final output
|
|
332
|
-
successful_tests = results["statistics"]["successful_tests"]
|
|
333
|
-
failed_tests = results["statistics"]["failed_tests"]
|
|
334
|
-
success_rate = results["statistics"]["success_rate"]
|
|
335
|
-
|
|
336
|
-
# Optionally save the results to a JSON file
|
|
337
|
-
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
338
|
-
# results_file = f"stress_test_results_{timestamp}.json"
|
|
339
|
-
|
|
340
|
-
# with open(results_file, "w") as f:
|
|
341
|
-
# json.dump(results, f, indent=2)
|
|
342
|
-
|
|
343
|
-
# print(f"\nResults saved to {results_file}")
|
|
344
|
-
|
|
345
|
-
# Display final status for quick reference
|
|
346
|
-
print("\n" + "=" * 70)
|
|
347
|
-
print(
|
|
348
|
-
f"STRESS TEST COMPLETED: {successful_tests} PASSED, {failed_tests} FAILED, {success_rate:.2f}% SUCCESS RATE"
|
|
349
|
-
)
|
|
350
|
-
|
|
351
|
-
# Return non-zero exit code if any tests failed (useful for CI/CD pipelines)
|
|
352
|
-
if failed_tests > 0:
|
|
353
|
-
sys.exit(1)
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
if __name__ == "__main__":
|
|
357
|
-
main()
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
import sys
|
|
3
|
+
import os
|
|
4
|
+
import random
|
|
5
|
+
import time
|
|
6
|
+
import concurrent.futures
|
|
7
|
+
import traceback
|
|
8
|
+
import inspect
|
|
9
|
+
from typing import List, Callable, Dict, Any, Tuple
|
|
10
|
+
import unittest
|
|
11
|
+
from unittest.mock import patch
|
|
12
|
+
import json
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
import asyncio
|
|
15
|
+
import aiohttp
|
|
16
|
+
|
|
17
|
+
# Add the parent directory to the sys.path to allow imports
|
|
18
|
+
sys.path.append(
|
|
19
|
+
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# Import the test class that contains all the test methods
|
|
23
|
+
from botrun_flow_lang.tests.api_functional_tests import TestAPIFunctionality
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class StressTest:
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
num_users: int = 50,
|
|
30
|
+
num_rounds: int = 2,
|
|
31
|
+
base_url: str = "https://botrun-flow-lang-fastapi-dev-36186877499.asia-east1.run.app",
|
|
32
|
+
include_tests: List[str] = None,
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
Initialize the stress test with configuration parameters.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
num_users: Number of concurrent users to simulate
|
|
39
|
+
num_rounds: Number of testing rounds to perform
|
|
40
|
+
base_url: The base URL for the API
|
|
41
|
+
include_tests: List of test function names to include
|
|
42
|
+
"""
|
|
43
|
+
self.num_users = num_users
|
|
44
|
+
self.num_rounds = num_rounds
|
|
45
|
+
self.base_url = base_url
|
|
46
|
+
self.include_tests = include_tests or [
|
|
47
|
+
"test_langgraph_react_agent_social_housing"
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
# Get all test methods from TestAPIFunctionality class
|
|
51
|
+
self.test_methods = self._get_test_methods()
|
|
52
|
+
|
|
53
|
+
# Stats for tracking results
|
|
54
|
+
self.total_tests = 0
|
|
55
|
+
self.successful_tests = 0
|
|
56
|
+
self.failed_tests = 0
|
|
57
|
+
self.test_durations = {}
|
|
58
|
+
self.start_time = None
|
|
59
|
+
self.end_time = None
|
|
60
|
+
|
|
61
|
+
def _get_test_methods(self) -> List[str]:
|
|
62
|
+
"""Get all test methods from TestAPIFunctionality class, excluding the ones in include_tests."""
|
|
63
|
+
all_methods = [
|
|
64
|
+
method_name
|
|
65
|
+
for method_name, method in inspect.getmembers(
|
|
66
|
+
TestAPIFunctionality, predicate=inspect.isfunction
|
|
67
|
+
)
|
|
68
|
+
if method_name.startswith("test_") and method_name in self.include_tests
|
|
69
|
+
]
|
|
70
|
+
return all_methods
|
|
71
|
+
|
|
72
|
+
def run_single_test(self, user_id: int, test_name: str) -> Tuple[bool, float, str]:
|
|
73
|
+
"""
|
|
74
|
+
Run a single test function.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
user_id: ID of the simulated user
|
|
78
|
+
test_name: Name of the test function to run
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Tuple of (success, duration, error_message)
|
|
82
|
+
"""
|
|
83
|
+
start_time = time.time()
|
|
84
|
+
print(f"User {user_id}: Running {test_name}")
|
|
85
|
+
|
|
86
|
+
test_instance = TestAPIFunctionality(methodName="setUp")
|
|
87
|
+
test_instance.base_url = self.base_url
|
|
88
|
+
|
|
89
|
+
# Run the setUp method to initialize the test instance
|
|
90
|
+
test_instance.setUp()
|
|
91
|
+
|
|
92
|
+
# Get the actual test method
|
|
93
|
+
test_method = getattr(test_instance, test_name)
|
|
94
|
+
|
|
95
|
+
success = True
|
|
96
|
+
error_message = ""
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
# Execute the test method
|
|
100
|
+
test_method()
|
|
101
|
+
except Exception as e:
|
|
102
|
+
success = False
|
|
103
|
+
error_message = f"{type(e).__name__}: {str(e)}\n{traceback.format_exc()}"
|
|
104
|
+
print(
|
|
105
|
+
f"User {user_id}: Error in {test_name} - {type(e).__name__}: {str(e)}"
|
|
106
|
+
)
|
|
107
|
+
finally:
|
|
108
|
+
# Always call tearDown to clean up resources
|
|
109
|
+
if hasattr(test_instance, "tearDown"):
|
|
110
|
+
test_instance.tearDown()
|
|
111
|
+
|
|
112
|
+
duration = time.time() - start_time
|
|
113
|
+
print(
|
|
114
|
+
f"User {user_id}: Completed {test_name} in {duration:.2f}s - {'Success' if success else 'Failed'}"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
return success, duration, error_message
|
|
118
|
+
|
|
119
|
+
def user_workflow(self, user_id: int) -> List[Dict[str, Any]]:
|
|
120
|
+
"""
|
|
121
|
+
Simulates a user workflow by randomly selecting and running tests.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
user_id: ID of the simulated user
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
List of test results
|
|
128
|
+
"""
|
|
129
|
+
results = []
|
|
130
|
+
|
|
131
|
+
for round_num in range(self.num_rounds):
|
|
132
|
+
# Select a random test method
|
|
133
|
+
test_name = random.choice(self.test_methods)
|
|
134
|
+
|
|
135
|
+
# Run the test
|
|
136
|
+
success, duration, error_message = self.run_single_test(user_id, test_name)
|
|
137
|
+
|
|
138
|
+
# Record the result
|
|
139
|
+
result = {
|
|
140
|
+
"user_id": user_id,
|
|
141
|
+
"round": round_num + 1,
|
|
142
|
+
"test_name": test_name,
|
|
143
|
+
"success": success,
|
|
144
|
+
"duration": duration,
|
|
145
|
+
"timestamp": datetime.now().isoformat(),
|
|
146
|
+
"error_message": error_message,
|
|
147
|
+
}
|
|
148
|
+
results.append(result)
|
|
149
|
+
|
|
150
|
+
# Sleep for a short period to simulate some user think time (optional)
|
|
151
|
+
time.sleep(random.uniform(0.1, 0.5))
|
|
152
|
+
|
|
153
|
+
return results
|
|
154
|
+
|
|
155
|
+
def run_stress_test(self) -> Dict[str, Any]:
|
|
156
|
+
"""
|
|
157
|
+
Run the stress test with concurrent users.
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
Dictionary with test results and statistics
|
|
161
|
+
"""
|
|
162
|
+
self.start_time = time.time()
|
|
163
|
+
all_results = []
|
|
164
|
+
self.test_durations = {}
|
|
165
|
+
|
|
166
|
+
print(
|
|
167
|
+
f"Starting stress test with {self.num_users} concurrent users for {self.num_rounds} rounds"
|
|
168
|
+
)
|
|
169
|
+
print(f"Using test methods: {', '.join(self.test_methods)}")
|
|
170
|
+
print("-" * 70)
|
|
171
|
+
|
|
172
|
+
# Use concurrent.futures to run tests in parallel
|
|
173
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
174
|
+
max_workers=self.num_users
|
|
175
|
+
) as executor:
|
|
176
|
+
future_to_user = {
|
|
177
|
+
executor.submit(self.user_workflow, user_id): user_id
|
|
178
|
+
for user_id in range(1, self.num_users + 1)
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
for future in concurrent.futures.as_completed(future_to_user):
|
|
182
|
+
user_id = future_to_user[future]
|
|
183
|
+
try:
|
|
184
|
+
user_results = future.result()
|
|
185
|
+
all_results.extend(user_results)
|
|
186
|
+
except Exception as e:
|
|
187
|
+
print(f"User {user_id} generated an exception: {e}")
|
|
188
|
+
|
|
189
|
+
# Calculate statistics
|
|
190
|
+
self.end_time = time.time()
|
|
191
|
+
self.total_tests = len(all_results)
|
|
192
|
+
self.successful_tests = sum(1 for r in all_results if r["success"])
|
|
193
|
+
self.failed_tests = self.total_tests - self.successful_tests
|
|
194
|
+
|
|
195
|
+
# Calculate average duration per test type
|
|
196
|
+
test_type_durations = {}
|
|
197
|
+
for result in all_results:
|
|
198
|
+
test_name = result["test_name"]
|
|
199
|
+
if test_name not in test_type_durations:
|
|
200
|
+
test_type_durations[test_name] = {"count": 0, "total_duration": 0}
|
|
201
|
+
|
|
202
|
+
test_type_durations[test_name]["count"] += 1
|
|
203
|
+
test_type_durations[test_name]["total_duration"] += result["duration"]
|
|
204
|
+
|
|
205
|
+
for test_name, data in test_type_durations.items():
|
|
206
|
+
avg_duration = data["total_duration"] / data["count"]
|
|
207
|
+
test_type_durations[test_name]["avg_duration"] = avg_duration
|
|
208
|
+
|
|
209
|
+
# Prepare summary report
|
|
210
|
+
summary = {
|
|
211
|
+
"config": {
|
|
212
|
+
"num_users": self.num_users,
|
|
213
|
+
"num_rounds": self.num_rounds,
|
|
214
|
+
"base_url": self.base_url,
|
|
215
|
+
"included_tests": self.include_tests,
|
|
216
|
+
},
|
|
217
|
+
"statistics": {
|
|
218
|
+
"total_tests": self.total_tests,
|
|
219
|
+
"successful_tests": self.successful_tests,
|
|
220
|
+
"failed_tests": self.failed_tests,
|
|
221
|
+
"success_rate": (
|
|
222
|
+
(self.successful_tests / self.total_tests) * 100
|
|
223
|
+
if self.total_tests > 0
|
|
224
|
+
else 0
|
|
225
|
+
),
|
|
226
|
+
"total_duration_seconds": self.end_time - self.start_time,
|
|
227
|
+
"avg_test_duration": (
|
|
228
|
+
sum(r["duration"] for r in all_results) / len(all_results)
|
|
229
|
+
if all_results
|
|
230
|
+
else 0
|
|
231
|
+
),
|
|
232
|
+
"test_type_statistics": {
|
|
233
|
+
test_name: {
|
|
234
|
+
"count": data["count"],
|
|
235
|
+
"avg_duration": data["avg_duration"],
|
|
236
|
+
"success_rate": (
|
|
237
|
+
sum(
|
|
238
|
+
1
|
|
239
|
+
for r in all_results
|
|
240
|
+
if r["test_name"] == test_name and r["success"]
|
|
241
|
+
)
|
|
242
|
+
/ data["count"]
|
|
243
|
+
* 100
|
|
244
|
+
),
|
|
245
|
+
}
|
|
246
|
+
for test_name, data in test_type_durations.items()
|
|
247
|
+
},
|
|
248
|
+
},
|
|
249
|
+
"detailed_results": all_results,
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
self._print_summary(summary)
|
|
253
|
+
return summary
|
|
254
|
+
|
|
255
|
+
def _print_summary(self, summary: Dict[str, Any]) -> None:
|
|
256
|
+
"""Print a human-readable summary of the stress test results."""
|
|
257
|
+
print("\n" + "=" * 70)
|
|
258
|
+
print(f"STRESS TEST SUMMARY")
|
|
259
|
+
print("=" * 70)
|
|
260
|
+
|
|
261
|
+
stats = summary["statistics"]
|
|
262
|
+
config = summary["config"]
|
|
263
|
+
|
|
264
|
+
print(f"Configuration:")
|
|
265
|
+
print(f" Users: {config['num_users']}")
|
|
266
|
+
print(f" Rounds per user: {config['num_rounds']}")
|
|
267
|
+
print(f" API Base URL: {config['base_url']}")
|
|
268
|
+
print(f" Included tests: {', '.join(config['included_tests'])}")
|
|
269
|
+
|
|
270
|
+
print("\nOverall Statistics:")
|
|
271
|
+
print(f" Total tests run: {stats['total_tests']}")
|
|
272
|
+
print(f" Successful tests: {stats['successful_tests']}")
|
|
273
|
+
print(f" Failed tests: {stats['failed_tests']}")
|
|
274
|
+
print(f" Success rate: {stats['success_rate']:.2f}%")
|
|
275
|
+
print(f" Total duration: {stats['total_duration_seconds']:.2f} seconds")
|
|
276
|
+
print(f" Average test duration: {stats['avg_test_duration']:.2f} seconds")
|
|
277
|
+
|
|
278
|
+
print("\nTest Type Statistics:")
|
|
279
|
+
for test_name, test_stats in stats["test_type_statistics"].items():
|
|
280
|
+
print(f" {test_name}:")
|
|
281
|
+
print(f" Count: {test_stats['count']}")
|
|
282
|
+
print(f" Average duration: {test_stats['avg_duration']:.2f} seconds")
|
|
283
|
+
print(f" Success rate: {test_stats['success_rate']:.2f}%")
|
|
284
|
+
|
|
285
|
+
print("\nFailed Tests:")
|
|
286
|
+
failed_tests = [r for r in summary["detailed_results"] if not r["success"]]
|
|
287
|
+
if failed_tests:
|
|
288
|
+
for i, test in enumerate(
|
|
289
|
+
failed_tests[:10], 1
|
|
290
|
+
): # Show only first 10 failures
|
|
291
|
+
print(
|
|
292
|
+
f" {i}. User {test['user_id']}, Round {test['round']}: {test['test_name']}"
|
|
293
|
+
)
|
|
294
|
+
error_first_line = test["error_message"].split("\n")[0]
|
|
295
|
+
print(f" Error: {error_first_line}")
|
|
296
|
+
|
|
297
|
+
if len(failed_tests) > 10:
|
|
298
|
+
print(f" ... and {len(failed_tests) - 10} more failures")
|
|
299
|
+
else:
|
|
300
|
+
print(" None")
|
|
301
|
+
|
|
302
|
+
print("=" * 70)
|
|
303
|
+
|
|
304
|
+
# Add a clear final summary line
|
|
305
|
+
print(
|
|
306
|
+
f"\n🏆 FINAL RESULT: {stats['successful_tests']} PASSED ✅ | {stats['failed_tests']} FAILED ❌ | {stats['success_rate']:.2f}% SUCCESS RATE"
|
|
307
|
+
)
|
|
308
|
+
print("=" * 70)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def main():
|
|
312
|
+
"""Main entry point for the stress test."""
|
|
313
|
+
# Configuration parameters - adjust these as needed
|
|
314
|
+
num_users = 50
|
|
315
|
+
num_rounds = 2
|
|
316
|
+
base_url = "https://botrun-flow-lang-fastapi-dev-36186877499.asia-east1.run.app"
|
|
317
|
+
include_tests = [
|
|
318
|
+
"test_langgraph_react_agent_social_housing"
|
|
319
|
+
] # 只執行社宅入住資格審查測試
|
|
320
|
+
|
|
321
|
+
# Create and run the stress test
|
|
322
|
+
stress_tester = StressTest(
|
|
323
|
+
num_users=num_users,
|
|
324
|
+
num_rounds=num_rounds,
|
|
325
|
+
base_url=base_url,
|
|
326
|
+
include_tests=include_tests, # 改用 include_tests 來指定要執行的測試
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
results = stress_tester.run_stress_test()
|
|
330
|
+
|
|
331
|
+
# Get success and failure counts for final output
|
|
332
|
+
successful_tests = results["statistics"]["successful_tests"]
|
|
333
|
+
failed_tests = results["statistics"]["failed_tests"]
|
|
334
|
+
success_rate = results["statistics"]["success_rate"]
|
|
335
|
+
|
|
336
|
+
# Optionally save the results to a JSON file
|
|
337
|
+
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
338
|
+
# results_file = f"stress_test_results_{timestamp}.json"
|
|
339
|
+
|
|
340
|
+
# with open(results_file, "w") as f:
|
|
341
|
+
# json.dump(results, f, indent=2)
|
|
342
|
+
|
|
343
|
+
# print(f"\nResults saved to {results_file}")
|
|
344
|
+
|
|
345
|
+
# Display final status for quick reference
|
|
346
|
+
print("\n" + "=" * 70)
|
|
347
|
+
print(
|
|
348
|
+
f"STRESS TEST COMPLETED: {successful_tests} PASSED, {failed_tests} FAILED, {success_rate:.2f}% SUCCESS RATE"
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
# Return non-zero exit code if any tests failed (useful for CI/CD pipelines)
|
|
352
|
+
if failed_tests > 0:
|
|
353
|
+
sys.exit(1)
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
if __name__ == "__main__":
|
|
357
|
+
main()
|