scalebox-sdk 0.1.25__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scalebox/__init__.py +2 -2
- scalebox/api/__init__.py +3 -1
- scalebox/api/client/api/sandboxes/get_sandboxes.py +1 -1
- scalebox/api/client/api/sandboxes/post_sandboxes_sandbox_id_connect.py +193 -0
- scalebox/api/client/models/connect_sandbox.py +59 -0
- scalebox/api/client/models/error.py +2 -2
- scalebox/api/client/models/listed_sandbox.py +24 -3
- scalebox/api/client/models/new_sandbox.py +10 -0
- scalebox/api/client/models/sandbox.py +13 -0
- scalebox/api/client/models/sandbox_detail.py +24 -0
- scalebox/cli.py +125 -125
- scalebox/client/aclient.py +57 -57
- scalebox/client/client.py +102 -102
- scalebox/code_interpreter/__init__.py +12 -12
- scalebox/code_interpreter/charts.py +230 -230
- scalebox/code_interpreter/code_interpreter_async.py +3 -1
- scalebox/code_interpreter/code_interpreter_sync.py +3 -1
- scalebox/code_interpreter/constants.py +3 -3
- scalebox/code_interpreter/exceptions.py +13 -13
- scalebox/code_interpreter/models.py +485 -485
- scalebox/connection_config.py +36 -1
- scalebox/csx_connect/__init__.py +1 -1
- scalebox/csx_connect/client.py +485 -485
- scalebox/csx_desktop/main.py +651 -651
- scalebox/exceptions.py +83 -83
- scalebox/generated/api.py +61 -61
- scalebox/generated/api_pb2.py +203 -203
- scalebox/generated/api_pb2.pyi +956 -956
- scalebox/generated/api_pb2_connect.py +1407 -1407
- scalebox/generated/rpc.py +50 -50
- scalebox/sandbox/main.py +146 -139
- scalebox/sandbox/sandbox_api.py +105 -91
- scalebox/sandbox/signature.py +40 -40
- scalebox/sandbox/utils.py +34 -34
- scalebox/sandbox_async/main.py +226 -44
- scalebox/sandbox_async/sandbox_api.py +124 -3
- scalebox/sandbox_sync/main.py +205 -130
- scalebox/sandbox_sync/sandbox_api.py +119 -3
- scalebox/test/CODE_INTERPRETER_TESTS_READY.md +323 -323
- scalebox/test/README.md +329 -329
- scalebox/test/bedrock_openai_adapter.py +73 -0
- scalebox/test/code_interpreter_test.py +34 -34
- scalebox/test/code_interpreter_test_sync.py +34 -34
- scalebox/test/run_stress_code_interpreter_sync.py +178 -0
- scalebox/test/simple_upload_example.py +131 -0
- scalebox/test/stabitiy_test.py +323 -0
- scalebox/test/test_browser_use.py +27 -0
- scalebox/test/test_browser_use_scalebox.py +62 -0
- scalebox/test/test_code_interpreter_execcode.py +289 -211
- scalebox/test/test_code_interpreter_sync_comprehensive.py +116 -69
- scalebox/test/test_connect_pause_async.py +300 -0
- scalebox/test/test_connect_pause_sync.py +300 -0
- scalebox/test/test_csx_desktop_examples.py +3 -3
- scalebox/test/test_desktop_sandbox_sf.py +112 -0
- scalebox/test/test_download_url.py +41 -0
- scalebox/test/test_existing_sandbox.py +1037 -0
- scalebox/test/test_sandbox_async_comprehensive.py +5 -3
- scalebox/test/test_sandbox_object_storage_example.py +151 -0
- scalebox/test/test_sandbox_object_storage_example_async.py +159 -0
- scalebox/test/test_sandbox_sync_comprehensive.py +1 -1
- scalebox/test/test_sf.py +141 -0
- scalebox/test/test_watch_dir_async.py +58 -0
- scalebox/test/testacreate.py +1 -1
- scalebox/test/testagetinfo.py +1 -3
- scalebox/test/testcomputeuse.py +243 -243
- scalebox/test/testsandbox_api.py +5 -5
- scalebox/test/testsandbox_async.py +17 -47
- scalebox/test/testsandbox_sync.py +19 -15
- scalebox/test/upload_100mb_example.py +377 -0
- scalebox/utils/httpcoreclient.py +297 -297
- scalebox/utils/httpxclient.py +403 -403
- scalebox/version.py +2 -2
- {scalebox_sdk-0.1.25.dist-info → scalebox_sdk-1.0.2.dist-info}/METADATA +1 -1
- {scalebox_sdk-0.1.25.dist-info → scalebox_sdk-1.0.2.dist-info}/RECORD +78 -60
- {scalebox_sdk-0.1.25.dist-info → scalebox_sdk-1.0.2.dist-info}/WHEEL +1 -1
- {scalebox_sdk-0.1.25.dist-info → scalebox_sdk-1.0.2.dist-info}/entry_points.txt +0 -0
- {scalebox_sdk-0.1.25.dist-info → scalebox_sdk-1.0.2.dist-info}/licenses/LICENSE +0 -0
- {scalebox_sdk-0.1.25.dist-info → scalebox_sdk-1.0.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
稳定性测试脚本 - 并发执行CodeInterpreter验证测试
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import concurrent.futures
|
|
7
|
+
import time
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import threading
|
|
11
|
+
from typing import List, Dict, Any
|
|
12
|
+
import sys
|
|
13
|
+
import argparse
|
|
14
|
+
|
|
15
|
+
# 导入原始测试代码
|
|
16
|
+
from code_interpreter_validator import CodeInterpreterValidator
|
|
17
|
+
|
|
18
|
+
# 配置日志
|
|
19
|
+
logging.basicConfig(
|
|
20
|
+
level=logging.INFO,
|
|
21
|
+
format="%(asctime)s - %(levelname)s - [%(threadName)s] - %(message)s",
|
|
22
|
+
)
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class StabilityTester:
|
|
27
|
+
"""稳定性测试器"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, concurrency: int = 10):
|
|
30
|
+
self.concurrency = concurrency
|
|
31
|
+
self.results = []
|
|
32
|
+
self.lock = threading.Lock()
|
|
33
|
+
self.test_counter = 0
|
|
34
|
+
self.total_tests = 0
|
|
35
|
+
|
|
36
|
+
def get_test_methods(self) -> List[str]:
|
|
37
|
+
"""获取所有测试方法"""
|
|
38
|
+
validator = CodeInterpreterValidator()
|
|
39
|
+
test_methods = []
|
|
40
|
+
|
|
41
|
+
# 获取所有以test_开头的方法
|
|
42
|
+
for method_name in dir(validator):
|
|
43
|
+
if method_name.startswith("test_") and callable(
|
|
44
|
+
getattr(validator, method_name)
|
|
45
|
+
):
|
|
46
|
+
test_methods.append(method_name)
|
|
47
|
+
|
|
48
|
+
self.total_tests = len(test_methods)
|
|
49
|
+
logger.info(f"发现 {self.total_tests} 个测试方法")
|
|
50
|
+
return test_methods
|
|
51
|
+
|
|
52
|
+
def run_single_test(self, test_name: str) -> Dict[str, Any]:
|
|
53
|
+
"""运行单个测试"""
|
|
54
|
+
thread_name = threading.current_thread().name
|
|
55
|
+
test_id = 0
|
|
56
|
+
|
|
57
|
+
with self.lock:
|
|
58
|
+
self.test_counter += 1
|
|
59
|
+
test_id = self.test_counter
|
|
60
|
+
|
|
61
|
+
logger.info(
|
|
62
|
+
f"[线程 {thread_name}] 开始执行测试 {test_id}/{self.total_tests}: {test_name}"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
start_time = time.time()
|
|
66
|
+
success = False
|
|
67
|
+
error_message = ""
|
|
68
|
+
duration = 0
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
# 为每个测试创建独立的验证器实例
|
|
72
|
+
validator = CodeInterpreterValidator()
|
|
73
|
+
|
|
74
|
+
# 运行沙箱创建测试
|
|
75
|
+
validator.test_code_interpreter_creation()
|
|
76
|
+
|
|
77
|
+
# 运行目标测试
|
|
78
|
+
test_method = getattr(validator, test_name)
|
|
79
|
+
test_method()
|
|
80
|
+
|
|
81
|
+
duration = time.time() - start_time
|
|
82
|
+
success = True
|
|
83
|
+
logger.info(
|
|
84
|
+
f"[线程 {thread_name}] ✅ 测试通过: {test_name} ({duration:.3f}s)"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
duration = time.time() - start_time
|
|
89
|
+
error_message = str(e)
|
|
90
|
+
logger.error(
|
|
91
|
+
f"[线程 {thread_name}] ❌ 测试失败: {test_name} - {error_message} ({duration:.3f}s)"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
finally:
|
|
95
|
+
# 清理资源
|
|
96
|
+
try:
|
|
97
|
+
if "validator" in locals():
|
|
98
|
+
validator.cleanup()
|
|
99
|
+
except Exception as cleanup_error:
|
|
100
|
+
logger.warning(f"[线程 {thread_name}] 清理资源时出错: {cleanup_error}")
|
|
101
|
+
|
|
102
|
+
result = {
|
|
103
|
+
"test_id": test_id,
|
|
104
|
+
"test_name": test_name,
|
|
105
|
+
"thread_name": thread_name,
|
|
106
|
+
"success": success,
|
|
107
|
+
"error_message": error_message,
|
|
108
|
+
"duration": duration,
|
|
109
|
+
"timestamp": time.time(),
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
with self.lock:
|
|
113
|
+
self.results.append(result)
|
|
114
|
+
|
|
115
|
+
return result
|
|
116
|
+
|
|
117
|
+
def run_concurrent_tests(self) -> Dict[str, Any]:
|
|
118
|
+
"""运行并发测试"""
|
|
119
|
+
test_methods = self.get_test_methods()
|
|
120
|
+
|
|
121
|
+
if not test_methods:
|
|
122
|
+
logger.error("未发现测试方法")
|
|
123
|
+
return {}
|
|
124
|
+
|
|
125
|
+
logger.info(f"开始稳定性测试,并发数: {self.concurrency}")
|
|
126
|
+
logger.info(f"总测试数: {len(test_methods)}")
|
|
127
|
+
|
|
128
|
+
start_time = time.time()
|
|
129
|
+
|
|
130
|
+
# 使用线程池执行并发测试
|
|
131
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
132
|
+
max_workers=self.concurrency, thread_name_prefix="TestWorker"
|
|
133
|
+
) as executor:
|
|
134
|
+
|
|
135
|
+
# 提交所有测试任务
|
|
136
|
+
future_to_test = {
|
|
137
|
+
executor.submit(self.run_single_test, test_name): test_name
|
|
138
|
+
for test_name in test_methods
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
# 等待所有测试完成
|
|
142
|
+
completed = 0
|
|
143
|
+
for future in concurrent.futures.as_completed(future_to_test):
|
|
144
|
+
test_name = future_to_test[future]
|
|
145
|
+
try:
|
|
146
|
+
future.result()
|
|
147
|
+
except Exception as exc:
|
|
148
|
+
logger.error(f"测试 {test_name} 生成异常: {exc}")
|
|
149
|
+
completed += 1
|
|
150
|
+
logger.info(f"测试进度: {completed}/{len(test_methods)}")
|
|
151
|
+
|
|
152
|
+
total_duration = time.time() - start_time
|
|
153
|
+
|
|
154
|
+
# 生成测试报告
|
|
155
|
+
report = self.generate_report(total_duration)
|
|
156
|
+
|
|
157
|
+
return report
|
|
158
|
+
|
|
159
|
+
def generate_report(self, total_duration: float) -> Dict[str, Any]:
|
|
160
|
+
"""生成测试报告"""
|
|
161
|
+
successful_tests = [r for r in self.results if r["success"]]
|
|
162
|
+
failed_tests = [r for r in self.results if not r["success"]]
|
|
163
|
+
|
|
164
|
+
total_tests = len(self.results)
|
|
165
|
+
success_count = len(successful_tests)
|
|
166
|
+
failure_count = len(failed_tests)
|
|
167
|
+
success_rate = (success_count / total_tests * 100) if total_tests > 0 else 0
|
|
168
|
+
|
|
169
|
+
# 计算统计信息
|
|
170
|
+
durations = [r["duration"] for r in self.results]
|
|
171
|
+
avg_duration = sum(durations) / len(durations) if durations else 0
|
|
172
|
+
max_duration = max(durations) if durations else 0
|
|
173
|
+
min_duration = min(durations) if durations else 0
|
|
174
|
+
|
|
175
|
+
report = {
|
|
176
|
+
"summary": {
|
|
177
|
+
"total_tests": total_tests,
|
|
178
|
+
"successful_tests": success_count,
|
|
179
|
+
"failed_tests": failure_count,
|
|
180
|
+
"success_rate": round(success_rate, 2),
|
|
181
|
+
"total_duration": round(total_duration, 3),
|
|
182
|
+
"concurrency": self.concurrency,
|
|
183
|
+
"avg_duration_per_test": round(avg_duration, 3),
|
|
184
|
+
"max_duration": round(max_duration, 3),
|
|
185
|
+
"min_duration": round(min_duration, 3),
|
|
186
|
+
},
|
|
187
|
+
"successful_tests": [
|
|
188
|
+
{
|
|
189
|
+
"test_name": r["test_name"],
|
|
190
|
+
"duration": round(r["duration"], 3),
|
|
191
|
+
"thread": r["thread_name"],
|
|
192
|
+
}
|
|
193
|
+
for r in successful_tests
|
|
194
|
+
],
|
|
195
|
+
"failed_tests": [
|
|
196
|
+
{
|
|
197
|
+
"test_name": r["test_name"],
|
|
198
|
+
"error": r["error_message"],
|
|
199
|
+
"duration": round(r["duration"], 3),
|
|
200
|
+
"thread": r["thread_name"],
|
|
201
|
+
}
|
|
202
|
+
for r in failed_tests
|
|
203
|
+
],
|
|
204
|
+
"execution_timeline": [
|
|
205
|
+
{
|
|
206
|
+
"test_id": r["test_id"],
|
|
207
|
+
"test_name": r["test_name"],
|
|
208
|
+
"thread": r["thread_name"],
|
|
209
|
+
"success": r["success"],
|
|
210
|
+
"duration": round(r["duration"], 3),
|
|
211
|
+
"timestamp": r["timestamp"],
|
|
212
|
+
}
|
|
213
|
+
for r in self.results
|
|
214
|
+
],
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
return report
|
|
218
|
+
|
|
219
|
+
def print_detailed_report(self, report: Dict[str, Any]):
|
|
220
|
+
"""打印详细报告"""
|
|
221
|
+
summary = report["summary"]
|
|
222
|
+
|
|
223
|
+
print("\n" + "=" * 80)
|
|
224
|
+
print("🚀 CODEINTERPRETER 稳定性测试报告")
|
|
225
|
+
print("=" * 80)
|
|
226
|
+
|
|
227
|
+
print(f"\n📊 测试摘要:")
|
|
228
|
+
print(f" 总测试数: {summary['total_tests']}")
|
|
229
|
+
print(f" 通过测试: {summary['successful_tests']} ✅")
|
|
230
|
+
print(f" 失败测试: {summary['failed_tests']} ❌")
|
|
231
|
+
print(f" 成功率: {summary['success_rate']}%")
|
|
232
|
+
print(f" 总执行时间: {summary['total_duration']}s")
|
|
233
|
+
print(f" 并发数: {summary['concurrency']}")
|
|
234
|
+
print(f" 平均测试时间: {summary['avg_duration_per_test']}s")
|
|
235
|
+
print(f" 最长测试时间: {summary['max_duration']}s")
|
|
236
|
+
print(f" 最短测试时间: {summary['min_duration']}s")
|
|
237
|
+
|
|
238
|
+
# 打印成功测试
|
|
239
|
+
if report["successful_tests"]:
|
|
240
|
+
print(f"\n✅ 通过的测试 ({len(report['successful_tests'])}):")
|
|
241
|
+
for test in report["successful_tests"]:
|
|
242
|
+
print(
|
|
243
|
+
f" - {test['test_name']} ({test['duration']}s) [{test['thread']}]"
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# 打印失败测试
|
|
247
|
+
if report["failed_tests"]:
|
|
248
|
+
print(f"\n❌ 失败的测试 ({len(report['failed_tests'])}):")
|
|
249
|
+
for test in report["failed_tests"]:
|
|
250
|
+
print(f" - {test['test_name']}")
|
|
251
|
+
print(f" 错误: {test['error']}")
|
|
252
|
+
print(f" 时间: {test['duration']}s")
|
|
253
|
+
print(f" 线程: {test['thread']}")
|
|
254
|
+
|
|
255
|
+
# 打印执行时间线
|
|
256
|
+
print(f"\n⏰ 执行时间线:")
|
|
257
|
+
for execution in sorted(
|
|
258
|
+
report["execution_timeline"], key=lambda x: x["timestamp"]
|
|
259
|
+
):
|
|
260
|
+
status = "✅" if execution["success"] else "❌"
|
|
261
|
+
print(
|
|
262
|
+
f" {status} [{execution['thread']}] {execution['test_name']} ({execution['duration']}s)"
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
print("\n" + "=" * 80)
|
|
266
|
+
|
|
267
|
+
# 保存详细报告到文件
|
|
268
|
+
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
|
269
|
+
filename = f"stability_test_report_{timestamp}.json"
|
|
270
|
+
with open(filename, "w", encoding="utf-8") as f:
|
|
271
|
+
json.dump(report, f, ensure_ascii=False, indent=2)
|
|
272
|
+
print(f"📄 详细报告已保存至: {filename}")
|
|
273
|
+
print("=" * 80)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def main():
|
|
277
|
+
"""主函数"""
|
|
278
|
+
parser = argparse.ArgumentParser(description="CodeInterpreter稳定性测试")
|
|
279
|
+
parser.add_argument(
|
|
280
|
+
"--concurrency", type=int, default=10, help="并发线程数 (默认: 10)"
|
|
281
|
+
)
|
|
282
|
+
parser.add_argument(
|
|
283
|
+
"--log-level",
|
|
284
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
|
|
285
|
+
default="INFO",
|
|
286
|
+
help="日志级别 (默认: INFO)",
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
args = parser.parse_args()
|
|
290
|
+
|
|
291
|
+
# 设置日志级别
|
|
292
|
+
logging.getLogger().setLevel(getattr(logging, args.log_level))
|
|
293
|
+
|
|
294
|
+
logger.info(f"启动稳定性测试,并发数: {args.concurrency}")
|
|
295
|
+
|
|
296
|
+
tester = StabilityTester(concurrency=args.concurrency)
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
report = tester.run_concurrent_tests()
|
|
300
|
+
tester.print_detailed_report(report)
|
|
301
|
+
|
|
302
|
+
# 根据成功率返回适当的退出码
|
|
303
|
+
success_rate = report["summary"]["success_rate"]
|
|
304
|
+
if success_rate >= 95:
|
|
305
|
+
logger.info(f"🎉 测试成功! 成功率: {success_rate}%")
|
|
306
|
+
sys.exit(0)
|
|
307
|
+
elif success_rate >= 80:
|
|
308
|
+
logger.warning(f"⚠️ 测试基本通过,但有改进空间。成功率: {success_rate}%")
|
|
309
|
+
sys.exit(0)
|
|
310
|
+
else:
|
|
311
|
+
logger.error(f"💥 测试失败! 成功率: {success_rate}%")
|
|
312
|
+
sys.exit(1)
|
|
313
|
+
|
|
314
|
+
except KeyboardInterrupt:
|
|
315
|
+
logger.info("测试被用户中断")
|
|
316
|
+
sys.exit(1)
|
|
317
|
+
except Exception as e:
|
|
318
|
+
logger.error(f"测试执行出错: {e}")
|
|
319
|
+
sys.exit(1)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
if __name__ == "__main__":
|
|
323
|
+
main()
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import asyncio, openai
|
|
2
|
+
from browser_use import Agent, BrowserProfile
|
|
3
|
+
|
|
4
|
+
# 指向本地 adapter
|
|
5
|
+
openai.base_url = "http://localhost:8000/v1/"
|
|
6
|
+
openai.api_key = "dummy"
|
|
7
|
+
|
|
8
|
+
TASK = (
|
|
9
|
+
"打开 https://www.baidu.com,"
|
|
10
|
+
"输入“今日天气”并搜索,"
|
|
11
|
+
"把第一条结果的标题复制出来。"
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def main():
|
|
16
|
+
agent = Agent(
|
|
17
|
+
task=TASK,
|
|
18
|
+
browser_profile=BrowserProfile(headless=False),
|
|
19
|
+
model="gpt-4", # 欺骗 browser-use
|
|
20
|
+
)
|
|
21
|
+
result = await agent.run()
|
|
22
|
+
print("---------- 最终结果 ----------")
|
|
23
|
+
print(result)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if __name__ == "__main__":
|
|
27
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
# from browser_use import Agent, BrowserProfile
|
|
5
|
+
from scalebox.sandbox_async.main import AsyncSandbox
|
|
6
|
+
|
|
7
|
+
# 设置环境变量,避免浏览器启动问题
|
|
8
|
+
os.environ["BROWSER_USE_DISABLE_TELEMETRY"] = "1"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def main():
|
|
12
|
+
sandbox = await AsyncSandbox.create(
|
|
13
|
+
timeout=3600,
|
|
14
|
+
template="browser-use-headless",
|
|
15
|
+
)
|
|
16
|
+
proc = await sandbox.commands.run("echo hello from async")
|
|
17
|
+
print("exit_code =", proc.exit_code)
|
|
18
|
+
print("stdout =", proc.stdout)
|
|
19
|
+
|
|
20
|
+
# try:
|
|
21
|
+
# # 配置浏览器参数 - 使用更稳定的配置
|
|
22
|
+
# browser_profile = BrowserProfile(
|
|
23
|
+
# # 设置浏览器窗口大小
|
|
24
|
+
# viewport_width=1200,
|
|
25
|
+
# viewport_height=800,
|
|
26
|
+
# # 在服务器环境必须使用无头模式
|
|
27
|
+
# headless=True,
|
|
28
|
+
# # 禁用沙盒,提高容器兼容性
|
|
29
|
+
# sandbox=False,
|
|
30
|
+
# # 增加超时时间
|
|
31
|
+
# browser_launch_timeout=60000, # 60秒
|
|
32
|
+
# # 禁用GPU加速(服务器环境推荐)
|
|
33
|
+
# gpu_acceleration=False,
|
|
34
|
+
# )
|
|
35
|
+
#
|
|
36
|
+
# # 定义需要 agent 执行的任务
|
|
37
|
+
# task = """
|
|
38
|
+
# 请打开百度首页 (https://www.baidu.com),在搜索框中输入"今日天气",然后进行搜索。
|
|
39
|
+
# 请等待搜索结果页面加载完成。
|
|
40
|
+
# """
|
|
41
|
+
#
|
|
42
|
+
# # 创建 Agent 实例
|
|
43
|
+
# agent = Agent(
|
|
44
|
+
# task=task,
|
|
45
|
+
# browser_profile=browser_profile,
|
|
46
|
+
# )
|
|
47
|
+
#
|
|
48
|
+
# print("开始执行浏览器自动化任务...")
|
|
49
|
+
#
|
|
50
|
+
# # 运行 Agent
|
|
51
|
+
# result = await agent.run()
|
|
52
|
+
# print(f"任务执行完成: {result}")
|
|
53
|
+
#
|
|
54
|
+
# except Exception as e:
|
|
55
|
+
# print(f"执行过程中出现错误: {e}")
|
|
56
|
+
# # 打印更详细的错误信息
|
|
57
|
+
# import traceback
|
|
58
|
+
# traceback.print_exc()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
if __name__ == "__main__":
|
|
62
|
+
asyncio.run(main())
|