tamar-model-client 0.1.18__py3-none-any.whl → 0.1.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,448 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 简化版的 Google/Azure 场景测试脚本
4
+ 只保留基本调用和打印功能
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ import os
10
+ import sys
11
+
12
+ # 配置测试脚本专用的日志
13
+ # 使用特定的logger名称,避免影响客户端日志
14
+ test_logger = logging.getLogger('test_google_azure_final')
15
+ test_logger.setLevel(logging.INFO)
16
+ test_logger.propagate = False # 不传播到根logger
17
+
18
+ # 创建测试脚本专用的handler
19
+ test_handler = logging.StreamHandler()
20
+ test_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
21
+ test_logger.addHandler(test_handler)
22
+
23
+ logger = test_logger
24
+
25
+ os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "false"
26
+ os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "localhost:50051"
27
+ os.environ['MODEL_MANAGER_SERVER_JWT_SECRET_KEY'] = "model-manager-server-jwt-key"
28
+
29
+ # 导入客户端模块
30
+ try:
31
+ from tamar_model_client import TamarModelClient, AsyncTamarModelClient
32
+ from tamar_model_client.schemas import ModelRequest, UserContext
33
+ from tamar_model_client.enums import ProviderType, InvokeType, Channel
34
+ except ImportError as e:
35
+ logger.error(f"导入模块失败: {e}")
36
+ sys.exit(1)
37
+
38
+
39
+ def test_google_ai_studio():
40
+ """测试 Google AI Studio"""
41
+ print("\n🔍 测试 Google AI Studio...")
42
+
43
+ try:
44
+ client = TamarModelClient()
45
+
46
+ request = ModelRequest(
47
+ provider=ProviderType.GOOGLE,
48
+ channel=Channel.AI_STUDIO,
49
+ invoke_type=InvokeType.GENERATION,
50
+ model="tamar-google-gemini-flash-lite",
51
+ contents=[
52
+ {"role": "user", "parts": [{"text": "Hello, how are you?"}]}
53
+ ],
54
+ user_context=UserContext(
55
+ user_id="test_user",
56
+ org_id="test_org",
57
+ client_type="test_client"
58
+ ),
59
+ config={
60
+ "temperature": 0.7,
61
+ "maxOutputTokens": 100
62
+ }
63
+ )
64
+
65
+ response = client.invoke(request)
66
+ print(f"✅ Google AI Studio 成功")
67
+ print(f" 响应类型: {type(response)}")
68
+ print(f" 响应内容: {str(response)[:200]}...")
69
+
70
+ except Exception as e:
71
+ print(f"❌ Google AI Studio 失败: {str(e)}")
72
+
73
+
74
+ def test_google_vertex_ai():
75
+ """测试 Google Vertex AI"""
76
+ print("\n🔍 测试 Google Vertex AI...")
77
+
78
+ try:
79
+ client = TamarModelClient()
80
+
81
+ request = ModelRequest(
82
+ provider=ProviderType.GOOGLE,
83
+ channel=Channel.VERTEXAI,
84
+ invoke_type=InvokeType.GENERATION,
85
+ model="tamar-google-gemini-flash-lite",
86
+ contents=[
87
+ {"role": "user", "parts": [{"text": "What is AI?"}]}
88
+ ],
89
+ user_context=UserContext(
90
+ user_id="test_user",
91
+ org_id="test_org",
92
+ client_type="test_client"
93
+ ),
94
+ config={
95
+ "temperature": 0.5
96
+ }
97
+ )
98
+
99
+ response = client.invoke(request)
100
+ print(f"✅ Google Vertex AI 成功")
101
+ print(f" 响应类型: {type(response)}")
102
+ print(f" 响应内容: {str(response)[:200]}...")
103
+
104
+ except Exception as e:
105
+ print(f"❌ Google Vertex AI 失败: {str(e)}")
106
+
107
+
108
+ def test_azure_openai():
109
+ """测试 Azure OpenAI"""
110
+ print("\n☁️ 测试 Azure OpenAI...")
111
+
112
+ try:
113
+ client = TamarModelClient()
114
+
115
+ request = ModelRequest(
116
+ provider=ProviderType.AZURE,
117
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
118
+ model="gpt-4o-mini",
119
+ messages=[
120
+ {"role": "user", "content": "Hello, how are you?"}
121
+ ],
122
+ user_context=UserContext(
123
+ user_id="test_user",
124
+ org_id="test_org",
125
+ client_type="test_client"
126
+ ),
127
+ )
128
+
129
+ response = client.invoke(request)
130
+ print(f"✅ Azure OpenAI 成功")
131
+ print(f" 响应内容: {response.model_dump_json()}...")
132
+
133
+ except Exception as e:
134
+ print(f"❌ Azure OpenAI 失败: {str(e)}")
135
+
136
+
137
+ async def test_google_streaming():
138
+ """测试 Google 流式响应"""
139
+ print("\n📡 测试 Google 流式响应...")
140
+
141
+ try:
142
+ async with AsyncTamarModelClient() as client:
143
+ request = ModelRequest(
144
+ provider=ProviderType.GOOGLE,
145
+ channel=Channel.AI_STUDIO,
146
+ invoke_type=InvokeType.GENERATION,
147
+ model="tamar-google-gemini-flash-lite",
148
+ contents=[
149
+ {"role": "user", "parts": [{"text": "Count 1 to 5"}]}
150
+ ],
151
+ user_context=UserContext(
152
+ user_id="test_user",
153
+ org_id="test_org",
154
+ client_type="test_client"
155
+ ),
156
+ stream=True,
157
+ config={
158
+ "temperature": 0.1,
159
+ "maxOutputTokens": 50
160
+ }
161
+ )
162
+
163
+ response_gen = await client.invoke(request)
164
+ print(f"✅ Google 流式调用成功")
165
+ print(f" 响应类型: {type(response_gen)}")
166
+
167
+ chunk_count = 0
168
+ async for chunk in response_gen:
169
+ chunk_count += 1
170
+ print(f" 数据块 {chunk_count}: {type(chunk)} - {chunk.model_dump_json()}...")
171
+ if chunk_count >= 3: # 只显示前3个数据块
172
+ break
173
+
174
+ except Exception as e:
175
+ print(f"❌ Google 流式响应失败: {str(e)}")
176
+
177
+
178
+ async def test_azure_streaming():
179
+ """测试 Azure 流式响应"""
180
+ print("\n📡 测试 Azure 流式响应...")
181
+
182
+ try:
183
+ async with AsyncTamarModelClient() as client:
184
+ request = ModelRequest(
185
+ provider=ProviderType.AZURE,
186
+ channel=Channel.OPENAI,
187
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
188
+ model="gpt-4o-mini",
189
+ messages=[
190
+ {"role": "user", "content": "Count 1 to 5"}
191
+ ],
192
+ user_context=UserContext(
193
+ user_id="test_user",
194
+ org_id="test_org",
195
+ client_type="test_client"
196
+ ),
197
+ stream=True # 添加流式参数
198
+ )
199
+
200
+ response_gen = await client.invoke(request)
201
+ print(f"✅ Azure 流式调用成功")
202
+ print(f" 响应类型: {type(response_gen)}")
203
+
204
+ chunk_count = 0
205
+ async for chunk in response_gen:
206
+ chunk_count += 1
207
+ print(f" 数据块 {chunk_count}: {type(chunk)} - {chunk.model_dump_json()}...")
208
+ if chunk_count >= 3: # 只显示前3个数据块
209
+ break
210
+
211
+ except Exception as e:
212
+ print(f"❌ Azure 流式响应失败: {str(e)}")
213
+
214
+
215
+ def test_sync_batch_requests():
216
+ """测试同步批量请求"""
217
+ print("\n📦 测试同步批量请求...")
218
+
219
+ try:
220
+ from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
221
+
222
+ with TamarModelClient() as client:
223
+ # 构建批量请求,包含 Google 和 Azure 的多个请求
224
+ batch_request = BatchModelRequest(
225
+ user_context=UserContext(
226
+ user_id="test_user",
227
+ org_id="test_org",
228
+ client_type="test_client"
229
+ ),
230
+ items=[
231
+ # Google AI Studio 请求
232
+ BatchModelRequestItem(
233
+ provider=ProviderType.GOOGLE,
234
+ invoke_type=InvokeType.GENERATION,
235
+ model="tamar-google-gemini-flash-lite",
236
+ contents=[
237
+ {"role": "user", "parts": [{"text": "Hello from sync batch - Google AI Studio"}]}
238
+ ],
239
+ custom_id="sync-google-ai-studio-1",
240
+ ),
241
+ # Azure OpenAI 请求
242
+ BatchModelRequestItem(
243
+ provider=ProviderType.AZURE,
244
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
245
+ model="gpt-4o-mini",
246
+ messages=[
247
+ {"role": "user", "content": "Hello from sync batch - Azure OpenAI"}
248
+ ],
249
+ custom_id="sync-azure-openai-1",
250
+ ),
251
+ # 再添加一个 Azure 请求
252
+ BatchModelRequestItem(
253
+ provider=ProviderType.AZURE,
254
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
255
+ model="gpt-4o-mini",
256
+ messages=[
257
+ {"role": "user", "content": "What is 2+2?"}
258
+ ],
259
+ custom_id="sync-azure-openai-2",
260
+ )
261
+ ]
262
+ )
263
+
264
+ # 执行批量请求
265
+ batch_response = client.invoke_batch(batch_request)
266
+
267
+ print(f"✅ 同步批量请求成功")
268
+ print(f" 请求数量: {len(batch_request.items)}")
269
+ print(f" 响应数量: {len(batch_response.responses)}")
270
+ print(f" 批量请求ID: {batch_response.request_id}")
271
+
272
+ # 显示每个响应的结果
273
+ for i, response in enumerate(batch_response.responses):
274
+ print(f"\n 响应 {i+1}:")
275
+ print(f" - custom_id: {response.custom_id}")
276
+ print(f" - 内容长度: {len(response.content) if response.content else 0}")
277
+ print(f" - 有错误: {'是' if response.error else '否'}")
278
+ if response.content:
279
+ print(f" - 内容预览: {response.content[:100]}...")
280
+ if response.error:
281
+ print(f" - 错误信息: {response.error}")
282
+
283
+ except Exception as e:
284
+ print(f"❌ 同步批量请求失败: {str(e)}")
285
+
286
+
287
+ async def test_batch_requests():
288
+ """测试异步批量请求"""
289
+ print("\n📦 测试异步批量请求...")
290
+
291
+ try:
292
+ from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
293
+
294
+ async with AsyncTamarModelClient() as client:
295
+ # 构建批量请求,包含 Google 和 Azure 的多个请求
296
+ batch_request = BatchModelRequest(
297
+ user_context=UserContext(
298
+ user_id="test_user",
299
+ org_id="test_org",
300
+ client_type="test_client"
301
+ ),
302
+ items=[
303
+ # Google AI Studio 请求
304
+ BatchModelRequestItem(
305
+ provider=ProviderType.GOOGLE,
306
+ invoke_type=InvokeType.GENERATION,
307
+ model="tamar-google-gemini-flash-lite",
308
+ contents=[
309
+ {"role": "user", "parts": [{"text": "Hello from Google AI Studio"}]}
310
+ ],
311
+ custom_id="google-ai-studio-1",
312
+ ),
313
+ # Google Vertex AI 请求
314
+ BatchModelRequestItem(
315
+ provider=ProviderType.GOOGLE,
316
+ channel=Channel.VERTEXAI,
317
+ invoke_type=InvokeType.GENERATION,
318
+ model="tamar-google-gemini-flash-lite",
319
+ contents=[
320
+ {"role": "user", "parts": [{"text": "Hello from Google Vertex AI"}]}
321
+ ],
322
+ custom_id="google-vertex-ai-1",
323
+ ),
324
+ # Azure OpenAI 请求
325
+ BatchModelRequestItem(
326
+ provider=ProviderType.AZURE,
327
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
328
+ model="gpt-4o-mini",
329
+ messages=[
330
+ {"role": "user", "content": "Hello from Azure OpenAI"}
331
+ ],
332
+ custom_id="azure-openai-1",
333
+ ),
334
+ # 再添加一个 Azure 请求
335
+ BatchModelRequestItem(
336
+ provider=ProviderType.AZURE,
337
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
338
+ model="gpt-4o-mini",
339
+ messages=[
340
+ {"role": "user", "content": "What is the capital of France?"}
341
+ ],
342
+ custom_id="azure-openai-2",
343
+ )
344
+ ]
345
+ )
346
+
347
+ # 执行批量请求
348
+ batch_response = await client.invoke_batch(batch_request)
349
+
350
+ print(f"✅ 批量请求成功")
351
+ print(f" 请求数量: {len(batch_request.items)}")
352
+ print(f" 响应数量: {len(batch_response.responses)}")
353
+ print(f" 批量请求ID: {batch_response.request_id}")
354
+
355
+ # 显示每个响应的结果
356
+ for i, response in enumerate(batch_response.responses):
357
+ print(f"\n 响应 {i+1}:")
358
+ print(f" - custom_id: {response.custom_id}")
359
+ print(f" - 内容长度: {len(response.content) if response.content else 0}")
360
+ print(f" - 有错误: {'是' if response.error else '否'}")
361
+ if response.content:
362
+ print(f" - 内容预览: {response.content[:100]}...")
363
+ if response.error:
364
+ print(f" - 错误信息: {response.error}")
365
+
366
+ except Exception as e:
367
+ print(f"❌ 批量请求失败: {str(e)}")
368
+
369
+
370
+ async def main():
371
+ """主函数"""
372
+ print("🚀 简化版 Google/Azure 测试")
373
+ print("=" * 50)
374
+
375
+ try:
376
+ # 同步测试
377
+ test_google_ai_studio()
378
+ test_google_vertex_ai()
379
+ test_azure_openai()
380
+
381
+ # 同步批量测试
382
+ test_sync_batch_requests()
383
+
384
+ # 异步流式测试
385
+ await asyncio.wait_for(test_google_streaming(), timeout=60.0)
386
+ await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
387
+
388
+ # 异步批量测试
389
+ await asyncio.wait_for(test_batch_requests(), timeout=120.0)
390
+
391
+ print("\n✅ 测试完成")
392
+
393
+ except asyncio.TimeoutError:
394
+ print("\n⏰ 测试超时")
395
+ except KeyboardInterrupt:
396
+ print("\n⚠️ 用户中断测试")
397
+ except Exception as e:
398
+ print(f"\n❌ 测试执行出错: {e}")
399
+ finally:
400
+ # 简单优雅的任务清理
401
+ print("📝 清理异步任务...")
402
+ try:
403
+ # 短暂等待让正在完成的任务自然结束
404
+ await asyncio.sleep(0.5)
405
+
406
+ # 检查是否还有未完成的任务
407
+ current_task = asyncio.current_task()
408
+ tasks = [task for task in asyncio.all_tasks()
409
+ if not task.done() and task != current_task]
410
+
411
+ if tasks:
412
+ print(f" 发现 {len(tasks)} 个未完成任务,等待自然完成...")
413
+ # 简单等待,不强制取消
414
+ try:
415
+ await asyncio.wait_for(
416
+ asyncio.sleep(2.0), # 给任务2秒时间自然完成
417
+ timeout=2.0
418
+ )
419
+ except asyncio.TimeoutError:
420
+ pass
421
+
422
+ print(" 任务清理完成")
423
+
424
+ except Exception as e:
425
+ print(f" ⚠️ 任务清理时出现异常: {e}")
426
+
427
+ print("🔚 程序即将退出")
428
+
429
+
430
+ if __name__ == "__main__":
431
+ try:
432
+ # 临时降低 asyncio 日志级别,减少任务取消时的噪音
433
+ asyncio_logger = logging.getLogger('asyncio')
434
+ original_level = asyncio_logger.level
435
+ asyncio_logger.setLevel(logging.ERROR)
436
+
437
+ try:
438
+ asyncio.run(main())
439
+ finally:
440
+ # 恢复原始日志级别
441
+ asyncio_logger.setLevel(original_level)
442
+
443
+ except KeyboardInterrupt:
444
+ print("\n⚠️ 程序被用户中断")
445
+ except Exception as e:
446
+ print(f"\n❌ 程序执行出错: {e}")
447
+ finally:
448
+ print("🏁 程序已退出")
tests/test_simple.py ADDED
@@ -0,0 +1,235 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 简化版的 Google/Azure 场景测试脚本
4
+ 只保留基本调用和打印功能
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ import os
10
+ import sys
11
+
12
+ # 配置日志
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format='%(asctime)s - %(levelname)s - %(message)s'
16
+ )
17
+ logger = logging.getLogger(__name__)
18
+
19
+ os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "false"
20
+ os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "localhost:50051"
21
+ os.environ['MODEL_MANAGER_SERVER_JWT_SECRET_KEY'] = "model-manager-server-jwt-key"
22
+
23
+ # 导入客户端模块
24
+ try:
25
+ from tamar_model_client import TamarModelClient, AsyncTamarModelClient
26
+ from tamar_model_client.schemas import ModelRequest, UserContext
27
+ from tamar_model_client.enums import ProviderType, InvokeType, Channel
28
+ except ImportError as e:
29
+ logger.error(f"导入模块失败: {e}")
30
+ sys.exit(1)
31
+
32
+
33
+ def test_google_ai_studio():
34
+ """测试 Google AI Studio"""
35
+ print("\n🔍 测试 Google AI Studio...")
36
+
37
+ try:
38
+ client = TamarModelClient()
39
+
40
+ request = ModelRequest(
41
+ provider=ProviderType.GOOGLE,
42
+ channel=Channel.AI_STUDIO,
43
+ invoke_type=InvokeType.GENERATION,
44
+ model="gemini-pro",
45
+ contents=[
46
+ {"role": "user", "parts": [{"text": "Hello, how are you?"}]}
47
+ ],
48
+ user_context=UserContext(
49
+ user_id="test_user",
50
+ org_id="test_org",
51
+ client_type="test_client"
52
+ ),
53
+ config={
54
+ "temperature": 0.7,
55
+ "maxOutputTokens": 100
56
+ }
57
+ )
58
+
59
+ response = client.invoke(request)
60
+ print(f"✅ Google AI Studio 成功")
61
+ print(f" 响应类型: {type(response)}")
62
+ print(f" 响应内容: {str(response)[:200]}...")
63
+
64
+ except Exception as e:
65
+ print(f"❌ Google AI Studio 失败: {str(e)}")
66
+
67
+
68
+ def test_google_vertex_ai():
69
+ """测试 Google Vertex AI"""
70
+ print("\n🔍 测试 Google Vertex AI...")
71
+
72
+ try:
73
+ client = TamarModelClient()
74
+
75
+ request = ModelRequest(
76
+ provider=ProviderType.GOOGLE,
77
+ channel=Channel.VERTEXAI,
78
+ invoke_type=InvokeType.GENERATION,
79
+ model="gemini-1.5-flash",
80
+ contents=[
81
+ {"role": "user", "parts": [{"text": "What is AI?"}]}
82
+ ],
83
+ user_context=UserContext(
84
+ user_id="test_user",
85
+ org_id="test_org",
86
+ client_type="test_client"
87
+ ),
88
+ config={
89
+ "temperature": 0.5
90
+ }
91
+ )
92
+
93
+ response = client.invoke(request)
94
+ print(f"✅ Google Vertex AI 成功")
95
+ print(f" 响应类型: {type(response)}")
96
+ print(f" 响应内容: {str(response)[:200]}...")
97
+
98
+ except Exception as e:
99
+ print(f"❌ Google Vertex AI 失败: {str(e)}")
100
+
101
+
102
+ def test_azure_openai():
103
+ """测试 Azure OpenAI"""
104
+ print("\n☁️ 测试 Azure OpenAI...")
105
+
106
+ try:
107
+ client = TamarModelClient()
108
+
109
+ request = ModelRequest(
110
+ provider=ProviderType.AZURE,
111
+ channel=Channel.OPENAI,
112
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
113
+ model="gpt-4o-mini",
114
+ messages=[
115
+ {"role": "user", "content": "Hello, how are you?"}
116
+ ],
117
+ user_context=UserContext(
118
+ user_id="test_user",
119
+ org_id="test_org",
120
+ client_type="test_client"
121
+ ),
122
+ temperature=0.7,
123
+ max_tokens=100
124
+ )
125
+
126
+ response = client.invoke(request)
127
+ print(f"✅ Azure OpenAI 成功")
128
+ print(f" 响应类型: {type(response)}")
129
+ print(f" 响应内容: {str(response)[:200]}...")
130
+
131
+ except Exception as e:
132
+ print(f"❌ Azure OpenAI 失败: {str(e)}")
133
+
134
+
135
+ async def test_google_streaming():
136
+ """测试 Google 流式响应"""
137
+ print("\n📡 测试 Google 流式响应...")
138
+
139
+ try:
140
+ client = AsyncTamarModelClient()
141
+
142
+ request = ModelRequest(
143
+ provider=ProviderType.GOOGLE,
144
+ channel=Channel.AI_STUDIO,
145
+ invoke_type=InvokeType.GENERATION,
146
+ model="gemini-pro",
147
+ contents=[
148
+ {"role": "user", "parts": [{"text": "Count 1 to 5"}]}
149
+ ],
150
+ user_context=UserContext(
151
+ user_id="test_user",
152
+ org_id="test_org",
153
+ client_type="test_client"
154
+ ),
155
+ stream=True,
156
+ config={
157
+ "temperature": 0.1,
158
+ "maxOutputTokens": 50
159
+ }
160
+ )
161
+
162
+ response_gen = await client.invoke(request)
163
+ print(f"✅ Google 流式调用成功")
164
+ print(f" 响应类型: {type(response_gen)}")
165
+
166
+ chunk_count = 0
167
+ async for chunk in response_gen:
168
+ chunk_count += 1
169
+ print(f" 数据块 {chunk_count}: {type(chunk)} - {str(chunk)[:100]}...")
170
+ if chunk_count >= 3: # 只显示前3个数据块
171
+ break
172
+
173
+ except Exception as e:
174
+ print(f"❌ Google 流式响应失败: {str(e)}")
175
+
176
+
177
+ async def test_azure_streaming():
178
+ """测试 Azure 流式响应"""
179
+ print("\n📡 测试 Azure 流式响应...")
180
+
181
+ try:
182
+ client = AsyncTamarModelClient()
183
+
184
+ request = ModelRequest(
185
+ provider=ProviderType.AZURE,
186
+ channel=Channel.OPENAI,
187
+ invoke_type=InvokeType.CHAT_COMPLETIONS,
188
+ model="gpt-4o-mini",
189
+ messages=[
190
+ {"role": "user", "content": "Count 1 to 5"}
191
+ ],
192
+ user_context=UserContext(
193
+ user_id="test_user",
194
+ org_id="test_org",
195
+ client_type="test_client"
196
+ ),
197
+ stream=True,
198
+ temperature=0.1,
199
+ max_tokens=50
200
+ )
201
+
202
+ response_gen = await client.invoke(request)
203
+ print(f"✅ Azure 流式调用成功")
204
+ print(f" 响应类型: {type(response_gen)}")
205
+
206
+ chunk_count = 0
207
+ async for chunk in response_gen:
208
+ chunk_count += 1
209
+ print(f" 数据块 {chunk_count}: {type(chunk)} - {str(chunk)[:100]}...")
210
+ if chunk_count >= 3: # 只显示前3个数据块
211
+ break
212
+
213
+ except Exception as e:
214
+ print(f"❌ Azure 流式响应失败: {str(e)}")
215
+
216
+
217
+ async def main():
218
+ """主函数"""
219
+ print("🚀 简化版 Google/Azure 测试")
220
+ print("=" * 50)
221
+
222
+ # 同步测试
223
+ test_google_ai_studio()
224
+ test_google_vertex_ai()
225
+ test_azure_openai()
226
+
227
+ # 异步流式测试
228
+ await test_google_streaming()
229
+ await test_azure_streaming()
230
+
231
+ print("\n✅ 测试完成")
232
+
233
+
234
+ if __name__ == "__main__":
235
+ asyncio.run(main())