scalebox-sdk 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. scalebox/__init__.py +1 -1
  2. scalebox/api/client/api/sandboxes/get_sandboxes.py +1 -1
  3. scalebox/api/client/models/error.py +1 -1
  4. scalebox/api/client/models/listed_sandbox.py +6 -3
  5. scalebox/api/client/models/sandbox.py +2 -2
  6. scalebox/code_interpreter/code_interpreter_async.py +3 -1
  7. scalebox/code_interpreter/code_interpreter_sync.py +3 -1
  8. scalebox/connection_config.py +2 -0
  9. scalebox/generated/api_pb2_connect.py +3 -3
  10. scalebox/sandbox/main.py +4 -4
  11. scalebox/test/bedrock_openai_adapter.py +8 -2
  12. scalebox/test/run_stress_code_interpreter_sync.py +18 -6
  13. scalebox/test/simple_upload_example.py +39 -31
  14. scalebox/test/stabitiy_test.py +82 -69
  15. scalebox/test/test_browser_use.py +4 -2
  16. scalebox/test/test_browser_use_scalebox.py +5 -4
  17. scalebox/test/test_code_interpreter_execcode.py +289 -211
  18. scalebox/test/test_code_interpreter_sync_comprehensive.py +2 -5
  19. scalebox/test/test_connect_pause_async.py +34 -11
  20. scalebox/test/test_connect_pause_sync.py +49 -16
  21. scalebox/test/test_csx_desktop_examples.py +3 -3
  22. scalebox/test/test_desktop_sandbox_sf.py +18 -23
  23. scalebox/test/test_download_url.py +6 -14
  24. scalebox/test/test_existing_sandbox.py +1037 -0
  25. scalebox/test/test_sandbox_async_comprehensive.py +4 -2
  26. scalebox/test/test_sandbox_object_storage_example.py +14 -9
  27. scalebox/test/test_sandbox_object_storage_example_async.py +6 -3
  28. scalebox/test/test_sandbox_sync_comprehensive.py +1 -1
  29. scalebox/test/test_sf.py +12 -8
  30. scalebox/test/test_watch_dir_async.py +6 -4
  31. scalebox/test/testagetinfo.py +1 -3
  32. scalebox/test/testsandbox_api.py +5 -3
  33. scalebox/test/testsandbox_async.py +17 -47
  34. scalebox/test/testsandbox_sync.py +18 -14
  35. scalebox/test/upload_100mb_example.py +77 -55
  36. scalebox/version.py +2 -2
  37. {scalebox_sdk-1.0.1.dist-info → scalebox_sdk-1.0.2.dist-info}/METADATA +1 -1
  38. {scalebox_sdk-1.0.1.dist-info → scalebox_sdk-1.0.2.dist-info}/RECORD +42 -41
  39. {scalebox_sdk-1.0.1.dist-info → scalebox_sdk-1.0.2.dist-info}/WHEEL +0 -0
  40. {scalebox_sdk-1.0.1.dist-info → scalebox_sdk-1.0.2.dist-info}/entry_points.txt +0 -0
  41. {scalebox_sdk-1.0.1.dist-info → scalebox_sdk-1.0.2.dist-info}/licenses/LICENSE +0 -0
  42. {scalebox_sdk-1.0.1.dist-info → scalebox_sdk-1.0.2.dist-info}/top_level.txt +0 -0
@@ -18,7 +18,7 @@ from code_interpreter_validator import CodeInterpreterValidator
18
18
  # 配置日志
19
19
  logging.basicConfig(
20
20
  level=logging.INFO,
21
- format='%(asctime)s - %(levelname)s - [%(threadName)s] - %(message)s'
21
+ format="%(asctime)s - %(levelname)s - [%(threadName)s] - %(message)s",
22
22
  )
23
23
  logger = logging.getLogger(__name__)
24
24
 
@@ -40,7 +40,9 @@ class StabilityTester:
40
40
 
41
41
  # 获取所有以test_开头的方法
42
42
  for method_name in dir(validator):
43
- if method_name.startswith('test_') and callable(getattr(validator, method_name)):
43
+ if method_name.startswith("test_") and callable(
44
+ getattr(validator, method_name)
45
+ ):
44
46
  test_methods.append(method_name)
45
47
 
46
48
  self.total_tests = len(test_methods)
@@ -56,7 +58,9 @@ class StabilityTester:
56
58
  self.test_counter += 1
57
59
  test_id = self.test_counter
58
60
 
59
- logger.info(f"[线程 {thread_name}] 开始执行测试 {test_id}/{self.total_tests}: {test_name}")
61
+ logger.info(
62
+ f"[线程 {thread_name}] 开始执行测试 {test_id}/{self.total_tests}: {test_name}"
63
+ )
60
64
 
61
65
  start_time = time.time()
62
66
  success = False
@@ -76,29 +80,33 @@ class StabilityTester:
76
80
 
77
81
  duration = time.time() - start_time
78
82
  success = True
79
- logger.info(f"[线程 {thread_name}] ✅ 测试通过: {test_name} ({duration:.3f}s)")
83
+ logger.info(
84
+ f"[线程 {thread_name}] ✅ 测试通过: {test_name} ({duration:.3f}s)"
85
+ )
80
86
 
81
87
  except Exception as e:
82
88
  duration = time.time() - start_time
83
89
  error_message = str(e)
84
- logger.error(f"[线程 {thread_name}] ❌ 测试失败: {test_name} - {error_message} ({duration:.3f}s)")
90
+ logger.error(
91
+ f"[线程 {thread_name}] ❌ 测试失败: {test_name} - {error_message} ({duration:.3f}s)"
92
+ )
85
93
 
86
94
  finally:
87
95
  # 清理资源
88
96
  try:
89
- if 'validator' in locals():
97
+ if "validator" in locals():
90
98
  validator.cleanup()
91
99
  except Exception as cleanup_error:
92
100
  logger.warning(f"[线程 {thread_name}] 清理资源时出错: {cleanup_error}")
93
101
 
94
102
  result = {
95
- 'test_id': test_id,
96
- 'test_name': test_name,
97
- 'thread_name': thread_name,
98
- 'success': success,
99
- 'error_message': error_message,
100
- 'duration': duration,
101
- 'timestamp': time.time()
103
+ "test_id": test_id,
104
+ "test_name": test_name,
105
+ "thread_name": thread_name,
106
+ "success": success,
107
+ "error_message": error_message,
108
+ "duration": duration,
109
+ "timestamp": time.time(),
102
110
  }
103
111
 
104
112
  with self.lock:
@@ -121,8 +129,7 @@ class StabilityTester:
121
129
 
122
130
  # 使用线程池执行并发测试
123
131
  with concurrent.futures.ThreadPoolExecutor(
124
- max_workers=self.concurrency,
125
- thread_name_prefix='TestWorker'
132
+ max_workers=self.concurrency, thread_name_prefix="TestWorker"
126
133
  ) as executor:
127
134
 
128
135
  # 提交所有测试任务
@@ -151,8 +158,8 @@ class StabilityTester:
151
158
 
152
159
  def generate_report(self, total_duration: float) -> Dict[str, Any]:
153
160
  """生成测试报告"""
154
- successful_tests = [r for r in self.results if r['success']]
155
- failed_tests = [r for r in self.results if not r['success']]
161
+ successful_tests = [r for r in self.results if r["success"]]
162
+ failed_tests = [r for r in self.results if not r["success"]]
156
163
 
157
164
  total_tests = len(self.results)
158
165
  success_count = len(successful_tests)
@@ -160,55 +167,58 @@ class StabilityTester:
160
167
  success_rate = (success_count / total_tests * 100) if total_tests > 0 else 0
161
168
 
162
169
  # 计算统计信息
163
- durations = [r['duration'] for r in self.results]
170
+ durations = [r["duration"] for r in self.results]
164
171
  avg_duration = sum(durations) / len(durations) if durations else 0
165
172
  max_duration = max(durations) if durations else 0
166
173
  min_duration = min(durations) if durations else 0
167
174
 
168
175
  report = {
169
- 'summary': {
170
- 'total_tests': total_tests,
171
- 'successful_tests': success_count,
172
- 'failed_tests': failure_count,
173
- 'success_rate': round(success_rate, 2),
174
- 'total_duration': round(total_duration, 3),
175
- 'concurrency': self.concurrency,
176
- 'avg_duration_per_test': round(avg_duration, 3),
177
- 'max_duration': round(max_duration, 3),
178
- 'min_duration': round(min_duration, 3)
176
+ "summary": {
177
+ "total_tests": total_tests,
178
+ "successful_tests": success_count,
179
+ "failed_tests": failure_count,
180
+ "success_rate": round(success_rate, 2),
181
+ "total_duration": round(total_duration, 3),
182
+ "concurrency": self.concurrency,
183
+ "avg_duration_per_test": round(avg_duration, 3),
184
+ "max_duration": round(max_duration, 3),
185
+ "min_duration": round(min_duration, 3),
179
186
  },
180
- 'successful_tests': [
187
+ "successful_tests": [
181
188
  {
182
- 'test_name': r['test_name'],
183
- 'duration': round(r['duration'], 3),
184
- 'thread': r['thread_name']
185
- } for r in successful_tests
189
+ "test_name": r["test_name"],
190
+ "duration": round(r["duration"], 3),
191
+ "thread": r["thread_name"],
192
+ }
193
+ for r in successful_tests
186
194
  ],
187
- 'failed_tests': [
195
+ "failed_tests": [
188
196
  {
189
- 'test_name': r['test_name'],
190
- 'error': r['error_message'],
191
- 'duration': round(r['duration'], 3),
192
- 'thread': r['thread_name']
193
- } for r in failed_tests
197
+ "test_name": r["test_name"],
198
+ "error": r["error_message"],
199
+ "duration": round(r["duration"], 3),
200
+ "thread": r["thread_name"],
201
+ }
202
+ for r in failed_tests
194
203
  ],
195
- 'execution_timeline': [
204
+ "execution_timeline": [
196
205
  {
197
- 'test_id': r['test_id'],
198
- 'test_name': r['test_name'],
199
- 'thread': r['thread_name'],
200
- 'success': r['success'],
201
- 'duration': round(r['duration'], 3),
202
- 'timestamp': r['timestamp']
203
- } for r in self.results
204
- ]
206
+ "test_id": r["test_id"],
207
+ "test_name": r["test_name"],
208
+ "thread": r["thread_name"],
209
+ "success": r["success"],
210
+ "duration": round(r["duration"], 3),
211
+ "timestamp": r["timestamp"],
212
+ }
213
+ for r in self.results
214
+ ],
205
215
  }
206
216
 
207
217
  return report
208
218
 
209
219
  def print_detailed_report(self, report: Dict[str, Any]):
210
220
  """打印详细报告"""
211
- summary = report['summary']
221
+ summary = report["summary"]
212
222
 
213
223
  print("\n" + "=" * 80)
214
224
  print("🚀 CODEINTERPRETER 稳定性测试报告")
@@ -226,15 +236,17 @@ class StabilityTester:
226
236
  print(f" 最短测试时间: {summary['min_duration']}s")
227
237
 
228
238
  # 打印成功测试
229
- if report['successful_tests']:
239
+ if report["successful_tests"]:
230
240
  print(f"\n✅ 通过的测试 ({len(report['successful_tests'])}):")
231
- for test in report['successful_tests']:
232
- print(f" - {test['test_name']} ({test['duration']}s) [{test['thread']}]")
241
+ for test in report["successful_tests"]:
242
+ print(
243
+ f" - {test['test_name']} ({test['duration']}s) [{test['thread']}]"
244
+ )
233
245
 
234
246
  # 打印失败测试
235
- if report['failed_tests']:
247
+ if report["failed_tests"]:
236
248
  print(f"\n❌ 失败的测试 ({len(report['failed_tests'])}):")
237
- for test in report['failed_tests']:
249
+ for test in report["failed_tests"]:
238
250
  print(f" - {test['test_name']}")
239
251
  print(f" 错误: {test['error']}")
240
252
  print(f" 时间: {test['duration']}s")
@@ -242,16 +254,20 @@ class StabilityTester:
242
254
 
243
255
  # 打印执行时间线
244
256
  print(f"\n⏰ 执行时间线:")
245
- for execution in sorted(report['execution_timeline'], key=lambda x: x['timestamp']):
246
- status = "✅" if execution['success'] else ""
247
- print(f" {status} [{execution['thread']}] {execution['test_name']} ({execution['duration']}s)")
257
+ for execution in sorted(
258
+ report["execution_timeline"], key=lambda x: x["timestamp"]
259
+ ):
260
+ status = "✅" if execution["success"] else "❌"
261
+ print(
262
+ f" {status} [{execution['thread']}] {execution['test_name']} ({execution['duration']}s)"
263
+ )
248
264
 
249
265
  print("\n" + "=" * 80)
250
266
 
251
267
  # 保存详细报告到文件
252
268
  timestamp = time.strftime("%Y%m%d_%H%M%S")
253
269
  filename = f"stability_test_report_{timestamp}.json"
254
- with open(filename, 'w', encoding='utf-8') as f:
270
+ with open(filename, "w", encoding="utf-8") as f:
255
271
  json.dump(report, f, ensure_ascii=False, indent=2)
256
272
  print(f"📄 详细报告已保存至: {filename}")
257
273
  print("=" * 80)
@@ -259,18 +275,15 @@ class StabilityTester:
259
275
 
260
276
  def main():
261
277
  """主函数"""
262
- parser = argparse.ArgumentParser(description='CodeInterpreter稳定性测试')
278
+ parser = argparse.ArgumentParser(description="CodeInterpreter稳定性测试")
263
279
  parser.add_argument(
264
- '--concurrency',
265
- type=int,
266
- default=10,
267
- help='并发线程数 (默认: 10)'
280
+ "--concurrency", type=int, default=10, help="并发线程数 (默认: 10)"
268
281
  )
269
282
  parser.add_argument(
270
- '--log-level',
271
- choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
272
- default='INFO',
273
- help='日志级别 (默认: INFO)'
283
+ "--log-level",
284
+ choices=["DEBUG", "INFO", "WARNING", "ERROR"],
285
+ default="INFO",
286
+ help="日志级别 (默认: INFO)",
274
287
  )
275
288
 
276
289
  args = parser.parse_args()
@@ -287,7 +300,7 @@ def main():
287
300
  tester.print_detailed_report(report)
288
301
 
289
302
  # 根据成功率返回适当的退出码
290
- success_rate = report['summary']['success_rate']
303
+ success_rate = report["summary"]["success_rate"]
291
304
  if success_rate >= 95:
292
305
  logger.info(f"🎉 测试成功! 成功率: {success_rate}%")
293
306
  sys.exit(0)
@@ -307,4 +320,4 @@ def main():
307
320
 
308
321
 
309
322
  if __name__ == "__main__":
310
- main()
323
+ main()
@@ -11,15 +11,17 @@ TASK = (
11
11
  "把第一条结果的标题复制出来。"
12
12
  )
13
13
 
14
+
14
15
  async def main():
15
16
  agent = Agent(
16
17
  task=TASK,
17
18
  browser_profile=BrowserProfile(headless=False),
18
- model="gpt-4" # 欺骗 browser-use
19
+ model="gpt-4", # 欺骗 browser-use
19
20
  )
20
21
  result = await agent.run()
21
22
  print("---------- 最终结果 ----------")
22
23
  print(result)
23
24
 
25
+
24
26
  if __name__ == "__main__":
25
- asyncio.run(main())
27
+ asyncio.run(main())
@@ -1,17 +1,18 @@
1
1
  import asyncio
2
2
  import os
3
+
3
4
  # from browser_use import Agent, BrowserProfile
4
5
  from scalebox.sandbox_async.main import AsyncSandbox
5
6
 
6
7
  # 设置环境变量,避免浏览器启动问题
7
- os.environ['BROWSER_USE_DISABLE_TELEMETRY'] = '1'
8
+ os.environ["BROWSER_USE_DISABLE_TELEMETRY"] = "1"
8
9
 
9
10
 
10
11
  async def main():
11
- sandbox = await (AsyncSandbox.create(
12
+ sandbox = await AsyncSandbox.create(
12
13
  timeout=3600,
13
14
  template="browser-use-headless",
14
- ))
15
+ )
15
16
  proc = await sandbox.commands.run("echo hello from async")
16
17
  print("exit_code =", proc.exit_code)
17
18
  print("stdout =", proc.stdout)
@@ -58,4 +59,4 @@ async def main():
58
59
 
59
60
 
60
61
  if __name__ == "__main__":
61
- asyncio.run(main())
62
+ asyncio.run(main())