tamar-model-client 0.1.20__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,6 +8,10 @@ import asyncio
8
8
  import logging
9
9
  import os
10
10
  import sys
11
+ import time
12
+ import threading
13
+ from concurrent.futures import ThreadPoolExecutor
14
+ from typing import List, Dict, Tuple
11
15
 
12
16
  # 配置测试脚本专用的日志
13
17
  # 使用特定的logger名称,避免影响客户端日志
@@ -22,8 +26,8 @@ test_logger.addHandler(test_handler)
22
26
 
23
27
  logger = test_logger
24
28
 
25
- os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "false"
26
- os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "localhost:50051"
29
+ os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "true"
30
+ os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "model-manager-server-grpc-131786869360.asia-northeast1.run.app"
27
31
  os.environ['MODEL_MANAGER_SERVER_JWT_SECRET_KEY'] = "model-manager-server-jwt-key"
28
32
 
29
33
  # 导入客户端模块
@@ -31,6 +35,12 @@ try:
31
35
  from tamar_model_client import TamarModelClient, AsyncTamarModelClient
32
36
  from tamar_model_client.schemas import ModelRequest, UserContext
33
37
  from tamar_model_client.enums import ProviderType, InvokeType, Channel
38
+
39
+ # 为了调试,临时启用 SDK 的日志输出
40
+ # 注意:这会输出 JSON 格式的日志
41
+ import os
42
+ os.environ['TAMAR_MODEL_CLIENT_LOG_LEVEL'] = 'INFO'
43
+
34
44
  except ImportError as e:
35
45
  logger.error(f"导入模块失败: {e}")
36
46
  sys.exit(1)
@@ -39,10 +49,10 @@ except ImportError as e:
39
49
  def test_google_ai_studio():
40
50
  """测试 Google AI Studio"""
41
51
  print("\n🔍 测试 Google AI Studio...")
42
-
52
+
43
53
  try:
44
54
  client = TamarModelClient()
45
-
55
+
46
56
  request = ModelRequest(
47
57
  provider=ProviderType.GOOGLE,
48
58
  channel=Channel.AI_STUDIO,
@@ -61,12 +71,12 @@ def test_google_ai_studio():
61
71
  "maxOutputTokens": 100
62
72
  }
63
73
  )
64
-
74
+
65
75
  response = client.invoke(request)
66
76
  print(f"✅ Google AI Studio 成功")
67
77
  print(f" 响应类型: {type(response)}")
68
78
  print(f" 响应内容: {str(response)[:200]}...")
69
-
79
+
70
80
  except Exception as e:
71
81
  print(f"❌ Google AI Studio 失败: {str(e)}")
72
82
 
@@ -74,10 +84,10 @@ def test_google_ai_studio():
74
84
  def test_google_vertex_ai():
75
85
  """测试 Google Vertex AI"""
76
86
  print("\n🔍 测试 Google Vertex AI...")
77
-
87
+
78
88
  try:
79
89
  client = TamarModelClient()
80
-
90
+
81
91
  request = ModelRequest(
82
92
  provider=ProviderType.GOOGLE,
83
93
  channel=Channel.VERTEXAI,
@@ -95,12 +105,12 @@ def test_google_vertex_ai():
95
105
  "temperature": 0.5
96
106
  }
97
107
  )
98
-
108
+
99
109
  response = client.invoke(request)
100
110
  print(f"✅ Google Vertex AI 成功")
101
111
  print(f" 响应类型: {type(response)}")
102
112
  print(f" 响应内容: {str(response)[:200]}...")
103
-
113
+
104
114
  except Exception as e:
105
115
  print(f"❌ Google Vertex AI 失败: {str(e)}")
106
116
 
@@ -108,10 +118,10 @@ def test_google_vertex_ai():
108
118
  def test_azure_openai():
109
119
  """测试 Azure OpenAI"""
110
120
  print("\n☁️ 测试 Azure OpenAI...")
111
-
121
+
112
122
  try:
113
123
  client = TamarModelClient()
114
-
124
+
115
125
  request = ModelRequest(
116
126
  provider=ProviderType.AZURE,
117
127
  invoke_type=InvokeType.CHAT_COMPLETIONS,
@@ -125,11 +135,11 @@ def test_azure_openai():
125
135
  client_type="test_client"
126
136
  ),
127
137
  )
128
-
138
+
129
139
  response = client.invoke(request)
130
140
  print(f"✅ Azure OpenAI 成功")
131
141
  print(f" 响应内容: {response.model_dump_json()}...")
132
-
142
+
133
143
  except Exception as e:
134
144
  print(f"❌ Azure OpenAI 失败: {str(e)}")
135
145
 
@@ -137,7 +147,7 @@ def test_azure_openai():
137
147
  async def test_google_streaming():
138
148
  """测试 Google 流式响应"""
139
149
  print("\n📡 测试 Google 流式响应...")
140
-
150
+
141
151
  try:
142
152
  async with AsyncTamarModelClient() as client:
143
153
  request = ModelRequest(
@@ -159,18 +169,18 @@ async def test_google_streaming():
159
169
  "maxOutputTokens": 50
160
170
  }
161
171
  )
162
-
172
+
163
173
  response_gen = await client.invoke(request)
164
174
  print(f"✅ Google 流式调用成功")
165
175
  print(f" 响应类型: {type(response_gen)}")
166
-
176
+
167
177
  chunk_count = 0
168
178
  async for chunk in response_gen:
169
179
  chunk_count += 1
170
180
  print(f" 数据块 {chunk_count}: {type(chunk)} - {chunk.model_dump_json()}...")
171
181
  if chunk_count >= 3: # 只显示前3个数据块
172
182
  break
173
-
183
+
174
184
  except Exception as e:
175
185
  print(f"❌ Google 流式响应失败: {str(e)}")
176
186
 
@@ -178,7 +188,7 @@ async def test_google_streaming():
178
188
  async def test_azure_streaming():
179
189
  """测试 Azure 流式响应"""
180
190
  print("\n📡 测试 Azure 流式响应...")
181
-
191
+
182
192
  try:
183
193
  async with AsyncTamarModelClient() as client:
184
194
  request = ModelRequest(
@@ -196,18 +206,18 @@ async def test_azure_streaming():
196
206
  ),
197
207
  stream=True # 添加流式参数
198
208
  )
199
-
209
+
200
210
  response_gen = await client.invoke(request)
201
211
  print(f"✅ Azure 流式调用成功")
202
212
  print(f" 响应类型: {type(response_gen)}")
203
-
213
+
204
214
  chunk_count = 0
205
215
  async for chunk in response_gen:
206
216
  chunk_count += 1
207
217
  print(f" 数据块 {chunk_count}: {type(chunk)} - {chunk.model_dump_json()}...")
208
218
  if chunk_count >= 3: # 只显示前3个数据块
209
219
  break
210
-
220
+
211
221
  except Exception as e:
212
222
  print(f"❌ Azure 流式响应失败: {str(e)}")
213
223
 
@@ -215,10 +225,10 @@ async def test_azure_streaming():
215
225
  def test_sync_batch_requests():
216
226
  """测试同步批量请求"""
217
227
  print("\n📦 测试同步批量请求...")
218
-
228
+
219
229
  try:
220
230
  from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
221
-
231
+
222
232
  with TamarModelClient() as client:
223
233
  # 构建批量请求,包含 Google 和 Azure 的多个请求
224
234
  batch_request = BatchModelRequest(
@@ -260,18 +270,18 @@ def test_sync_batch_requests():
260
270
  )
261
271
  ]
262
272
  )
263
-
273
+
264
274
  # 执行批量请求
265
275
  batch_response = client.invoke_batch(batch_request)
266
-
276
+
267
277
  print(f"✅ 同步批量请求成功")
268
278
  print(f" 请求数量: {len(batch_request.items)}")
269
279
  print(f" 响应数量: {len(batch_response.responses)}")
270
280
  print(f" 批量请求ID: {batch_response.request_id}")
271
-
281
+
272
282
  # 显示每个响应的结果
273
283
  for i, response in enumerate(batch_response.responses):
274
- print(f"\n 响应 {i+1}:")
284
+ print(f"\n 响应 {i + 1}:")
275
285
  print(f" - custom_id: {response.custom_id}")
276
286
  print(f" - 内容长度: {len(response.content) if response.content else 0}")
277
287
  print(f" - 有错误: {'是' if response.error else '否'}")
@@ -279,7 +289,7 @@ def test_sync_batch_requests():
279
289
  print(f" - 内容预览: {response.content[:100]}...")
280
290
  if response.error:
281
291
  print(f" - 错误信息: {response.error}")
282
-
292
+
283
293
  except Exception as e:
284
294
  print(f"❌ 同步批量请求失败: {str(e)}")
285
295
 
@@ -287,10 +297,10 @@ def test_sync_batch_requests():
287
297
  async def test_batch_requests():
288
298
  """测试异步批量请求"""
289
299
  print("\n📦 测试异步批量请求...")
290
-
300
+
291
301
  try:
292
302
  from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
293
-
303
+
294
304
  async with AsyncTamarModelClient() as client:
295
305
  # 构建批量请求,包含 Google 和 Azure 的多个请求
296
306
  batch_request = BatchModelRequest(
@@ -343,18 +353,18 @@ async def test_batch_requests():
343
353
  )
344
354
  ]
345
355
  )
346
-
356
+
347
357
  # 执行批量请求
348
358
  batch_response = await client.invoke_batch(batch_request)
349
-
359
+
350
360
  print(f"✅ 批量请求成功")
351
361
  print(f" 请求数量: {len(batch_request.items)}")
352
362
  print(f" 响应数量: {len(batch_response.responses)}")
353
363
  print(f" 批量请求ID: {batch_response.request_id}")
354
-
364
+
355
365
  # 显示每个响应的结果
356
366
  for i, response in enumerate(batch_response.responses):
357
- print(f"\n 响应 {i+1}:")
367
+ print(f"\n 响应 {i + 1}:")
358
368
  print(f" - custom_id: {response.custom_id}")
359
369
  print(f" - 内容长度: {len(response.content) if response.content else 0}")
360
370
  print(f" - 有错误: {'是' if response.error else '否'}")
@@ -362,34 +372,286 @@ async def test_batch_requests():
362
372
  print(f" - 内容预览: {response.content[:100]}...")
363
373
  if response.error:
364
374
  print(f" - 错误信息: {response.error}")
365
-
375
+
366
376
  except Exception as e:
367
377
  print(f"❌ 批量请求失败: {str(e)}")
368
378
 
369
379
 
380
+ def test_concurrent_requests(num_requests: int = 150):
381
+ """测试并发请求
382
+
383
+ Args:
384
+ num_requests: 要发送的总请求数,默认150个
385
+ """
386
+ print(f"\n🚀 测试并发请求 ({num_requests} 个请求)...")
387
+
388
+ # 统计变量
389
+ total_requests = 0
390
+ successful_requests = 0
391
+ failed_requests = 0
392
+ request_times: List[float] = []
393
+ errors: Dict[str, int] = {}
394
+
395
+ # 线程安全的锁
396
+ stats_lock = threading.Lock()
397
+
398
+ def make_single_request(request_id: int) -> Tuple[bool, float, str]:
399
+ """执行单个请求并返回结果
400
+
401
+ Returns:
402
+ (success, duration, error_msg)
403
+ """
404
+ start_time = time.time()
405
+ try:
406
+ # 每个线程创建自己的客户端实例
407
+ client = TamarModelClient()
408
+
409
+ # Google Vertex AI
410
+ request = ModelRequest(
411
+ provider=ProviderType.GOOGLE,
412
+ channel=Channel.VERTEXAI,
413
+ invoke_type=InvokeType.GENERATION,
414
+ model="tamar-google-gemini-flash-lite",
415
+ contents="1+1等于几?",
416
+ user_context=UserContext(
417
+ user_id=f"concurrent_user_{request_id:03d}",
418
+ org_id="test_org",
419
+ client_type="concurrent_test"
420
+ ),
421
+ config={"temperature": 0.1}
422
+ )
423
+
424
+ response = client.invoke(request, timeout=300000.0)
425
+ duration = time.time() - start_time
426
+ return (True, duration, "")
427
+
428
+ except Exception as e:
429
+ duration = time.time() - start_time
430
+ error_msg = str(e)
431
+ return (False, duration, error_msg)
432
+
433
+ def worker(request_id: int):
434
+ """工作线程函数"""
435
+ nonlocal total_requests, successful_requests, failed_requests
436
+
437
+ success, duration, error_msg = make_single_request(request_id)
438
+
439
+ with stats_lock:
440
+ total_requests += 1
441
+ request_times.append(duration)
442
+
443
+ if success:
444
+ successful_requests += 1
445
+ else:
446
+ failed_requests += 1
447
+ # 统计错误类型
448
+ error_type = error_msg.split(':')[0] if ':' in error_msg else error_msg[:50]
449
+ errors[error_type] = errors.get(error_type, 0) + 1
450
+
451
+ # 每20个请求输出一次进度
452
+ if total_requests % 20 == 0:
453
+ print(
454
+ f" 进度: {total_requests}/{num_requests} (成功: {successful_requests}, 失败: {failed_requests})")
455
+
456
+ # 使用线程池执行并发请求
457
+ start_time = time.time()
458
+
459
+ # 使用线程池,最多50个并发线程
460
+ with ThreadPoolExecutor(max_workers=50) as executor:
461
+ # 提交所有任务
462
+ futures = [executor.submit(worker, i) for i in range(num_requests)]
463
+
464
+ # 等待所有任务完成
465
+ for future in futures:
466
+ future.result()
467
+
468
+ total_duration = time.time() - start_time
469
+
470
+ # 计算统计信息
471
+ avg_request_time = sum(request_times) / len(request_times) if request_times else 0
472
+ min_request_time = min(request_times) if request_times else 0
473
+ max_request_time = max(request_times) if request_times else 0
474
+
475
+ # 输出结果
476
+ print(f"\n📊 并发测试结果:")
477
+ print(f" 总请求数: {total_requests}")
478
+ print(f" 成功请求: {successful_requests} ({successful_requests / total_requests * 100:.1f}%)")
479
+ print(f" 失败请求: {failed_requests} ({failed_requests / total_requests * 100:.1f}%)")
480
+ print(f" 总耗时: {total_duration:.2f} 秒")
481
+ print(f" 平均QPS: {total_requests / total_duration:.2f}")
482
+ print(f"\n 请求耗时统计:")
483
+ print(f" - 平均: {avg_request_time:.3f} 秒")
484
+ print(f" - 最小: {min_request_time:.3f} 秒")
485
+ print(f" - 最大: {max_request_time:.3f} 秒")
486
+
487
+ if errors:
488
+ print(f"\n 错误统计:")
489
+ for error_type, count in sorted(errors.items(), key=lambda x: x[1], reverse=True):
490
+ print(f" - {error_type}: {count} 次")
491
+
492
+ return {
493
+ "total": total_requests,
494
+ "successful": successful_requests,
495
+ "failed": failed_requests,
496
+ "duration": total_duration,
497
+ "qps": total_requests / total_duration
498
+ }
499
+
500
+
501
+ async def test_async_concurrent_requests(num_requests: int = 150):
502
+ """测试异步并发请求
503
+
504
+ Args:
505
+ num_requests: 要发送的总请求数,默认150个
506
+ """
507
+ print(f"\n🚀 测试异步并发请求 ({num_requests} 个请求)...")
508
+
509
+ # 统计变量
510
+ total_requests = 0
511
+ successful_requests = 0
512
+ failed_requests = 0
513
+ request_times: List[float] = []
514
+ errors: Dict[str, int] = {}
515
+
516
+ # 异步锁
517
+ stats_lock = asyncio.Lock()
518
+
519
+ async def make_single_async_request(client: AsyncTamarModelClient, request_id: int) -> Tuple[bool, float, str]:
520
+ """执行单个异步请求并返回结果
521
+
522
+ Returns:
523
+ (success, duration, error_msg)
524
+ """
525
+ start_time = time.time()
526
+ try:
527
+ # 根据请求ID选择不同的provider,以增加测试多样性
528
+ # Google Vertex AI
529
+ request = ModelRequest(
530
+ provider=ProviderType.GOOGLE,
531
+ channel=Channel.VERTEXAI,
532
+ invoke_type=InvokeType.GENERATION,
533
+ model="tamar-google-gemini-flash-lite",
534
+ contents="1+1等于几?",
535
+ user_context=UserContext(
536
+ user_id=f"async_concurrent_user_{request_id:03d}",
537
+ org_id="test_org",
538
+ client_type="async_concurrent_test"
539
+ ),
540
+ config={"temperature": 0.1}
541
+ )
542
+
543
+ response = await client.invoke(request, timeout=300000.0)
544
+ duration = time.time() - start_time
545
+ return (True, duration, "")
546
+
547
+ except Exception as e:
548
+ duration = time.time() - start_time
549
+ error_msg = str(e)
550
+ return (False, duration, error_msg)
551
+
552
+ async def async_worker(client: AsyncTamarModelClient, request_id: int):
553
+ """异步工作协程"""
554
+ nonlocal total_requests, successful_requests, failed_requests
555
+
556
+ success, duration, error_msg = await make_single_async_request(client, request_id)
557
+
558
+ async with stats_lock:
559
+ total_requests += 1
560
+ request_times.append(duration)
561
+
562
+ if success:
563
+ successful_requests += 1
564
+ else:
565
+ failed_requests += 1
566
+ # 统计错误类型
567
+ error_type = error_msg.split(':')[0] if ':' in error_msg else error_msg[:50]
568
+ errors[error_type] = errors.get(error_type, 0) + 1
569
+
570
+ # 每20个请求输出一次进度
571
+ if total_requests % 20 == 0:
572
+ print(
573
+ f" 进度: {total_requests}/{num_requests} (成功: {successful_requests}, 失败: {failed_requests})")
574
+
575
+ # 使用异步客户端执行并发请求
576
+ start_time = time.time()
577
+
578
+ # 创建一个共享的异步客户端
579
+ async with AsyncTamarModelClient() as client:
580
+ # 创建所有任务,但限制并发数
581
+ semaphore = asyncio.Semaphore(50) # 限制最多50个并发请求
582
+
583
+ async def limited_worker(request_id: int):
584
+ async with semaphore:
585
+ await async_worker(client, request_id)
586
+
587
+ # 创建所有任务
588
+ tasks = [limited_worker(i) for i in range(num_requests)]
589
+
590
+ # 等待所有任务完成
591
+ await asyncio.gather(*tasks)
592
+
593
+ total_duration = time.time() - start_time
594
+
595
+ # 计算统计信息
596
+ avg_request_time = sum(request_times) / len(request_times) if request_times else 0
597
+ min_request_time = min(request_times) if request_times else 0
598
+ max_request_time = max(request_times) if request_times else 0
599
+
600
+ # 输出结果
601
+ print(f"\n📊 异步并发测试结果:")
602
+ print(f" 总请求数: {total_requests}")
603
+ print(f" 成功请求: {successful_requests} ({successful_requests / total_requests * 100:.1f}%)")
604
+ print(f" 失败请求: {failed_requests} ({failed_requests / total_requests * 100:.1f}%)")
605
+ print(f" 总耗时: {total_duration:.2f} 秒")
606
+ print(f" 平均QPS: {total_requests / total_duration:.2f}")
607
+ print(f"\n 请求耗时统计:")
608
+ print(f" - 平均: {avg_request_time:.3f} 秒")
609
+ print(f" - 最小: {min_request_time:.3f} 秒")
610
+ print(f" - 最大: {max_request_time:.3f} 秒")
611
+
612
+ if errors:
613
+ print(f"\n 错误统计:")
614
+ for error_type, count in sorted(errors.items(), key=lambda x: x[1], reverse=True):
615
+ print(f" - {error_type}: {count} 次")
616
+
617
+ return {
618
+ "total": total_requests,
619
+ "successful": successful_requests,
620
+ "failed": failed_requests,
621
+ "duration": total_duration,
622
+ "qps": total_requests / total_duration
623
+ }
624
+
625
+
370
626
  async def main():
371
627
  """主函数"""
372
628
  print("🚀 简化版 Google/Azure 测试")
373
629
  print("=" * 50)
374
-
630
+
375
631
  try:
376
- # 同步测试
377
- test_google_ai_studio()
378
- test_google_vertex_ai()
379
- test_azure_openai()
380
-
381
- # 同步批量测试
382
- test_sync_batch_requests()
383
-
384
- # 异步流式测试
385
- await asyncio.wait_for(test_google_streaming(), timeout=60.0)
386
- await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
387
-
388
- # 异步批量测试
389
- await asyncio.wait_for(test_batch_requests(), timeout=120.0)
390
-
632
+ # # 同步测试
633
+ # test_google_ai_studio()
634
+ # test_google_vertex_ai()
635
+ # test_azure_openai()
636
+ #
637
+ # # 同步批量测试
638
+ # test_sync_batch_requests()
639
+ #
640
+ # # 异步流式测试
641
+ # await asyncio.wait_for(test_google_streaming(), timeout=60.0)
642
+ # await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
643
+ #
644
+ # # 异步批量测试
645
+ # await asyncio.wait_for(test_batch_requests(), timeout=120.0)
646
+
647
+ # 同步并发测试
648
+ #test_concurrent_requests(150) # 测试150个并发请求
649
+
650
+ # 异步并发测试
651
+ await test_async_concurrent_requests(1000) # 测试150个异步并发请求
652
+
391
653
  print("\n✅ 测试完成")
392
-
654
+
393
655
  except asyncio.TimeoutError:
394
656
  print("\n⏰ 测试超时")
395
657
  except KeyboardInterrupt:
@@ -402,12 +664,12 @@ async def main():
402
664
  try:
403
665
  # 短暂等待让正在完成的任务自然结束
404
666
  await asyncio.sleep(0.5)
405
-
667
+
406
668
  # 检查是否还有未完成的任务
407
669
  current_task = asyncio.current_task()
408
- tasks = [task for task in asyncio.all_tasks()
409
- if not task.done() and task != current_task]
410
-
670
+ tasks = [task for task in asyncio.all_tasks()
671
+ if not task.done() and task != current_task]
672
+
411
673
  if tasks:
412
674
  print(f" 发现 {len(tasks)} 个未完成任务,等待自然完成...")
413
675
  # 简单等待,不强制取消
@@ -418,12 +680,12 @@ async def main():
418
680
  )
419
681
  except asyncio.TimeoutError:
420
682
  pass
421
-
683
+
422
684
  print(" 任务清理完成")
423
-
685
+
424
686
  except Exception as e:
425
687
  print(f" ⚠️ 任务清理时出现异常: {e}")
426
-
688
+
427
689
  print("🔚 程序即将退出")
428
690
 
429
691
 
@@ -433,16 +695,16 @@ if __name__ == "__main__":
433
695
  asyncio_logger = logging.getLogger('asyncio')
434
696
  original_level = asyncio_logger.level
435
697
  asyncio_logger.setLevel(logging.ERROR)
436
-
698
+
437
699
  try:
438
700
  asyncio.run(main())
439
701
  finally:
440
702
  # 恢复原始日志级别
441
703
  asyncio_logger.setLevel(original_level)
442
-
704
+
443
705
  except KeyboardInterrupt:
444
706
  print("\n⚠️ 程序被用户中断")
445
707
  except Exception as e:
446
708
  print(f"\n❌ 程序执行出错: {e}")
447
709
  finally:
448
- print("🏁 程序已退出")
710
+ print("🏁 程序已退出")
@@ -0,0 +1,75 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 测试日志格式问题
4
+ """
5
+
6
+ import asyncio
7
+ import logging
8
+ import os
9
+ import sys
10
+
11
+ # 设置环境变量
12
+ os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "false"
13
+ os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "localhost:50051"
14
+ os.environ['MODEL_MANAGER_SERVER_JWT_SECRET_KEY'] = "model-manager-server-jwt-key"
15
+
16
+ # 先导入 SDK
17
+ from tamar_model_client import AsyncTamarModelClient
18
+ from tamar_model_client.schemas import ModelRequest, UserContext
19
+ from tamar_model_client.enums import ProviderType, InvokeType, Channel
20
+
21
+ # 检查 SDK 的日志配置
22
+ print("=== SDK Logger Configuration ===")
23
+ sdk_loggers = [
24
+ 'tamar_model_client',
25
+ 'tamar_model_client.async_client',
26
+ 'tamar_model_client.error_handler',
27
+ 'tamar_model_client.core.base_client'
28
+ ]
29
+
30
+ for logger_name in sdk_loggers:
31
+ logger = logging.getLogger(logger_name)
32
+ print(f"\nLogger: {logger_name}")
33
+ print(f" Level: {logging.getLevelName(logger.level)}")
34
+ print(f" Handlers: {len(logger.handlers)}")
35
+ for i, handler in enumerate(logger.handlers):
36
+ print(f" Handler {i}: {type(handler).__name__}")
37
+ if hasattr(handler, 'formatter'):
38
+ print(f" Formatter: {type(handler.formatter).__name__ if handler.formatter else 'None'}")
39
+ print(f" Propagate: {logger.propagate}")
40
+
41
+
42
+ async def test_error_logging():
43
+ """测试错误日志格式"""
44
+ print("\n=== Testing Error Logging ===")
45
+
46
+ try:
47
+ async with AsyncTamarModelClient() as client:
48
+ # 故意创建一个会失败的请求
49
+ request = ModelRequest(
50
+ provider=ProviderType.GOOGLE,
51
+ channel=Channel.VERTEXAI,
52
+ invoke_type=InvokeType.GENERATION,
53
+ model="invalid-model",
54
+ contents="test",
55
+ user_context=UserContext(
56
+ user_id="test_user",
57
+ org_id="test_org",
58
+ client_type="test_client"
59
+ )
60
+ )
61
+
62
+ response = await client.invoke(request, timeout=5.0)
63
+ print(f"Response: {response}")
64
+
65
+ except Exception as e:
66
+ print(f"Exception caught: {type(e).__name__}: {str(e)}")
67
+
68
+
69
+ async def main():
70
+ await test_error_logging()
71
+
72
+
73
+ if __name__ == "__main__":
74
+ print("Starting logging test...")
75
+ asyncio.run(main())