tamar-model-client 0.1.21__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -244,7 +244,16 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
244
244
  Raises:
245
245
  TamarModelException: 当所有重试都失败时
246
246
  """
247
- return await self.retry_handler.execute_with_retry(func, *args, **kwargs)
247
+ # kwargs中提取request_id(如果有的话),然后移除它
248
+ request_id = kwargs.pop('request_id', None) or get_request_id()
249
+
250
+ # 构建包含request_id的上下文
251
+ context = {
252
+ 'method': func.__name__ if hasattr(func, '__name__') else 'unknown',
253
+ 'client_version': 'async',
254
+ 'request_id': request_id,
255
+ }
256
+ return await self.retry_handler.execute_with_retry(func, *args, context=context, **kwargs)
248
257
 
249
258
  async def _retry_request_stream(self, func, *args, **kwargs):
250
259
  """
@@ -260,10 +269,18 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
260
269
  Returns:
261
270
  AsyncIterator: 流式响应迭代器
262
271
  """
272
+ # 记录方法开始时间
273
+ import time
274
+ method_start_time = time.time()
275
+
276
+ # 从kwargs中提取request_id(如果有的话),然后移除它
277
+ request_id = kwargs.pop('request_id', None) or get_request_id()
278
+
263
279
  last_exception = None
264
280
  context = {
265
281
  'method': 'stream',
266
282
  'client_version': 'async',
283
+ 'request_id': request_id,
267
284
  }
268
285
 
269
286
  for attempt in range(self.max_retries + 1):
@@ -283,10 +300,16 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
283
300
  error_context = ErrorContext(e, context)
284
301
  error_code = e.code()
285
302
  policy = get_retry_policy(error_code)
286
- retryable = policy.get('retryable', False)
287
303
 
288
- should_retry = False
289
- if attempt < self.max_retries:
304
+ # 先检查错误级别的 max_attempts 配置
305
+ # max_attempts 表示最大重试次数(不包括初始请求)
306
+ error_max_attempts = policy.get('max_attempts', self.max_retries)
307
+ if attempt >= error_max_attempts:
308
+ should_retry = False
309
+ elif attempt >= self.max_retries:
310
+ should_retry = False
311
+ else:
312
+ retryable = policy.get('retryable', False)
290
313
  if retryable == True:
291
314
  should_retry = True
292
315
  elif retryable == 'conditional':
@@ -295,8 +318,11 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
295
318
  should_retry = error_context.is_network_cancelled()
296
319
  else:
297
320
  should_retry = self._check_error_details_for_retry(e)
321
+ else:
322
+ should_retry = False
298
323
 
299
324
  if should_retry:
325
+ current_duration = time.time() - method_start_time
300
326
  log_data = {
301
327
  "log_type": "info",
302
328
  "request_id": context.get('request_id'),
@@ -305,7 +331,8 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
305
331
  "retry_count": attempt,
306
332
  "max_retries": self.max_retries,
307
333
  "method": "stream"
308
- }
334
+ },
335
+ "duration": current_duration
309
336
  }
310
337
  logger.warning(
311
338
  f"Stream attempt {attempt + 1}/{self.max_retries + 1} failed: {e.code()} (will retry)",
@@ -317,6 +344,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
317
344
  await asyncio.sleep(delay)
318
345
  else:
319
346
  # 不重试或已达到最大重试次数
347
+ current_duration = time.time() - method_start_time
320
348
  log_data = {
321
349
  "log_type": "info",
322
350
  "request_id": context.get('request_id'),
@@ -326,12 +354,14 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
326
354
  "max_retries": self.max_retries,
327
355
  "method": "stream",
328
356
  "will_retry": False
329
- }
357
+ },
358
+ "duration": current_duration
330
359
  }
331
360
  logger.error(
332
361
  f"Stream failed: {e.code()} (no retry)",
333
362
  extra=log_data
334
363
  )
364
+ context['duration'] = current_duration
335
365
  last_exception = self.error_handler.handle_error(e, context)
336
366
  break
337
367
 
@@ -454,7 +484,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
454
484
  chunk_count = 0
455
485
 
456
486
  # 使用重试逻辑获取流生成器
457
- stream_generator = self._retry_request_stream(self._stream, request, metadata, invoke_timeout)
487
+ stream_generator = self._retry_request_stream(self._stream, request, metadata, invoke_timeout, request_id=get_request_id())
458
488
 
459
489
  try:
460
490
  async for response in stream_generator:
@@ -609,7 +639,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
609
639
  # 对于流式响应,直接返回带日志记录的包装器
610
640
  return self._stream_with_logging(request, metadata, invoke_timeout, start_time, model_request)
611
641
  else:
612
- result = await self._retry_request(self._invoke_request, request, metadata, invoke_timeout)
642
+ result = await self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id)
613
643
 
614
644
  # 记录非流式响应的成功日志
615
645
  duration = time.time() - start_time
@@ -739,7 +769,8 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
739
769
  self.stub.BatchInvoke,
740
770
  batch_request,
741
771
  metadata=metadata,
742
- timeout=invoke_timeout
772
+ timeout=invoke_timeout,
773
+ request_id=request_id
743
774
  )
744
775
 
745
776
  # 构建响应对象
@@ -62,6 +62,10 @@ class GrpcErrorHandler:
62
62
  }
63
63
  }
64
64
 
65
+ # 如果上下文中有 duration,添加到日志中
66
+ if 'duration' in context:
67
+ log_data['duration'] = context['duration']
68
+
65
69
  self.logger.error(
66
70
  f"gRPC Error occurred: {error_context.error_code.name if error_context.error_code else 'UNKNOWN'}",
67
71
  extra=log_data
@@ -211,6 +215,10 @@ class EnhancedRetryHandler:
211
215
  Raises:
212
216
  TamarModelException: 包装后的异常
213
217
  """
218
+ # 记录开始时间
219
+ import time
220
+ method_start_time = time.time()
221
+
214
222
  context = context or {}
215
223
  last_exception = None
216
224
 
@@ -226,8 +234,13 @@ class EnhancedRetryHandler:
226
234
  # 判断是否可以重试
227
235
  if not self._should_retry(e, attempt):
228
236
  # 不可重试或已达到最大重试次数
237
+ current_duration = time.time() - method_start_time
238
+ context['duration'] = current_duration
229
239
  last_exception = self.error_handler.handle_error(e, context)
230
240
  break
241
+
242
+ # 计算当前耗时
243
+ current_duration = time.time() - method_start_time
231
244
 
232
245
  # 记录重试日志
233
246
  log_data = {
@@ -241,7 +254,8 @@ class EnhancedRetryHandler:
241
254
  "category": error_context._get_error_category(),
242
255
  "is_retryable": True, # 既然在重试,说明是可重试的
243
256
  "method": error_context.method
244
- }
257
+ },
258
+ "duration": current_duration
245
259
  }
246
260
  logger.warning(
247
261
  f"Attempt {attempt + 1}/{self.max_retries + 1} failed: {e.code()}",
@@ -253,6 +267,7 @@ class EnhancedRetryHandler:
253
267
  delay = self._calculate_backoff(attempt)
254
268
  await asyncio.sleep(delay)
255
269
 
270
+ context['duration'] = current_duration
256
271
  last_exception = self.error_handler.handle_error(e, context)
257
272
 
258
273
  except Exception as e:
@@ -271,12 +286,19 @@ class EnhancedRetryHandler:
271
286
 
272
287
  def _should_retry(self, error: grpc.RpcError, attempt: int) -> bool:
273
288
  """判断是否应该重试"""
274
- if attempt >= self.max_retries:
275
- return False
276
-
277
289
  error_code = error.code()
278
290
  policy = RETRY_POLICY.get(error_code, {})
279
291
 
292
+ # 先检查错误级别的 max_attempts 配置
293
+ # max_attempts 表示最大重试次数(不包括初始请求)
294
+ error_max_attempts = policy.get('max_attempts', self.max_retries)
295
+ if attempt >= error_max_attempts:
296
+ return False
297
+
298
+ # 再检查全局的 max_retries
299
+ if attempt >= self.max_retries:
300
+ return False
301
+
280
302
  # 检查基本重试策略
281
303
  retryable = policy.get('retryable', False)
282
304
  if retryable == False:
@@ -77,7 +77,7 @@ RETRY_POLICY = {
77
77
  grpc.StatusCode.CANCELLED: {
78
78
  'retryable': True,
79
79
  'backoff': 'linear', # 线性退避,网络问题通常不需要指数退避
80
- 'max_attempts': 2, # 限制重试次数,避免过度重试
80
+ 'max_attempts': 2, # 最大重试次数(不包括初始请求),总共会尝试3次
81
81
  'check_details': False # 不检查详细信息,统一重试
82
82
  },
83
83
  grpc.StatusCode.ABORTED: {
@@ -184,6 +184,37 @@ class ErrorContext:
184
184
  'DATA': '数据损坏或丢失,请检查输入数据',
185
185
  }
186
186
  return suggestions.get(self._get_error_category(), '未知错误,请联系技术支持')
187
+
188
+ def is_network_cancelled(self) -> bool:
189
+ """
190
+ 判断 CANCELLED 错误是否由网络中断导致
191
+
192
+ Returns:
193
+ bool: 如果是网络中断导致的 CANCELLED 返回 True
194
+ """
195
+ if self.error_code != grpc.StatusCode.CANCELLED:
196
+ return False
197
+
198
+ # 检查错误消息中是否包含网络相关的关键词
199
+ error_msg = (self.error_message or '').lower()
200
+ debug_msg = (self.error_debug_string or '').lower()
201
+
202
+ network_patterns = [
203
+ 'connection reset',
204
+ 'connection refused',
205
+ 'connection closed',
206
+ 'network unreachable',
207
+ 'broken pipe',
208
+ 'socket closed',
209
+ 'eof',
210
+ 'transport'
211
+ ]
212
+
213
+ for pattern in network_patterns:
214
+ if pattern in error_msg or pattern in debug_msg:
215
+ return True
216
+
217
+ return False
187
218
 
188
219
 
189
220
  # ===== 异常类层级 =====
@@ -201,10 +201,17 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
201
201
  """
202
202
  使用增强的错误处理器进行重试(同步版本)
203
203
  """
204
+ # 记录方法开始时间
205
+ method_start_time = time.time()
206
+
207
+ # 从kwargs中提取request_id(如果有的话),然后移除它
208
+ request_id = kwargs.pop('request_id', None) or get_request_id()
209
+
204
210
  # 构建请求上下文
205
211
  context = {
206
212
  'method': func.__name__ if hasattr(func, '__name__') else 'unknown',
207
213
  'client_version': 'sync',
214
+ 'request_id': request_id,
208
215
  }
209
216
 
210
217
  last_exception = None
@@ -222,9 +229,14 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
222
229
  should_retry = self._should_retry(e, attempt)
223
230
  if not should_retry or attempt >= self.max_retries:
224
231
  # 不可重试或已达到最大重试次数
232
+ current_duration = time.time() - method_start_time
233
+ context['duration'] = current_duration
225
234
  last_exception = self.error_handler.handle_error(e, context)
226
235
  break
227
236
 
237
+ # 计算当前的耗时
238
+ current_duration = time.time() - method_start_time
239
+
228
240
  # 记录重试日志
229
241
  log_data = {
230
242
  "log_type": "info",
@@ -234,7 +246,8 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
234
246
  "retry_count": attempt,
235
247
  "max_retries": self.max_retries,
236
248
  "method": context.get('method', 'unknown')
237
- }
249
+ },
250
+ "duration": current_duration
238
251
  }
239
252
  logger.warning(
240
253
  f"Attempt {attempt + 1}/{self.max_retries + 1} failed: {e.code()}",
@@ -246,6 +259,7 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
246
259
  delay = self._calculate_backoff(attempt, e.code())
247
260
  time.sleep(delay)
248
261
 
262
+ context['duration'] = current_duration
249
263
  last_exception = self.error_handler.handle_error(e, context)
250
264
 
251
265
  except Exception as e:
@@ -260,6 +274,73 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
260
274
  else:
261
275
  raise TamarModelException("Unknown error occurred")
262
276
 
277
+ def _should_retry(self, error: grpc.RpcError, attempt: int) -> bool:
278
+ """
279
+ 判断是否应该重试
280
+
281
+ Args:
282
+ error: gRPC错误
283
+ attempt: 当前重试次数
284
+
285
+ Returns:
286
+ bool: 是否应该重试
287
+ """
288
+ error_code = error.code()
289
+ from .exceptions import get_retry_policy, ErrorContext
290
+ policy = get_retry_policy(error_code)
291
+
292
+ # 先检查错误级别的 max_attempts 配置
293
+ # max_attempts 表示最大重试次数(不包括初始请求)
294
+ error_max_attempts = policy.get('max_attempts', self.max_retries)
295
+ if attempt >= error_max_attempts:
296
+ return False
297
+
298
+ # 再检查全局的 max_retries
299
+ if attempt >= self.max_retries:
300
+ return False
301
+
302
+ retryable = policy.get('retryable', False)
303
+
304
+ if retryable == False:
305
+ return False
306
+ elif retryable == True:
307
+ return True
308
+ elif retryable == 'conditional':
309
+ # 条件重试,特殊处理
310
+ if error_code == grpc.StatusCode.CANCELLED:
311
+ # 检查是否是网络中断导致的取消
312
+ context = {'method': 'unknown', 'client_version': 'sync'}
313
+ error_context = ErrorContext(error, context)
314
+ return error_context.is_network_cancelled()
315
+ else:
316
+ return self._check_error_details_for_retry(error)
317
+
318
+ return False
319
+
320
+ def _check_error_details_for_retry(self, error: grpc.RpcError) -> bool:
321
+ """
322
+ 检查错误详情决定是否重试
323
+
324
+ Args:
325
+ error: gRPC错误
326
+
327
+ Returns:
328
+ bool: 是否应该重试
329
+ """
330
+ error_message = error.details().lower() if error.details() else ""
331
+
332
+ # 可重试的错误模式
333
+ retryable_patterns = [
334
+ 'temporary', 'timeout', 'unavailable',
335
+ 'connection', 'network', 'try again'
336
+ ]
337
+
338
+ for pattern in retryable_patterns:
339
+ if pattern in error_message:
340
+ return True
341
+
342
+ return False
343
+
263
344
  def _calculate_backoff(self, attempt: int, error_code: grpc.StatusCode = None) -> float:
264
345
  """
265
346
  计算退避时间,支持不同的退避策略
@@ -316,10 +397,17 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
316
397
  Yields:
317
398
  流式响应的每个元素
318
399
  """
400
+ # 记录方法开始时间
401
+ method_start_time = time.time()
402
+
403
+ # 从kwargs中提取request_id(如果有的话),然后移除它
404
+ request_id = kwargs.pop('request_id', None) or get_request_id()
405
+
319
406
  last_exception = None
320
407
  context = {
321
408
  'method': 'stream',
322
409
  'client_version': 'sync',
410
+ 'request_id': request_id,
323
411
  }
324
412
 
325
413
  for attempt in range(self.max_retries + 1):
@@ -334,6 +422,9 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
334
422
  # 使用智能重试判断
335
423
  context['retry_count'] = attempt
336
424
 
425
+ # 计算当前的耗时
426
+ current_duration = time.time() - method_start_time
427
+
337
428
  # 判断是否应该重试
338
429
  should_retry = self._should_retry(e, attempt)
339
430
  if not should_retry or attempt >= self.max_retries:
@@ -347,12 +438,14 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
347
438
  "max_retries": self.max_retries,
348
439
  "method": "stream",
349
440
  "will_retry": False
350
- }
441
+ },
442
+ "duration": current_duration
351
443
  }
352
444
  logger.error(
353
445
  f"Stream failed: {e.code()} (no retry)",
354
446
  extra=log_data
355
447
  )
448
+ context['duration'] = current_duration
356
449
  last_exception = self.error_handler.handle_error(e, context)
357
450
  break
358
451
 
@@ -365,7 +458,8 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
365
458
  "retry_count": attempt,
366
459
  "max_retries": self.max_retries,
367
460
  "method": "stream"
368
- }
461
+ },
462
+ "duration": current_duration
369
463
  }
370
464
  logger.warning(
371
465
  f"Stream attempt {attempt + 1}/{self.max_retries + 1} failed: {e.code()} (will retry)",
@@ -609,10 +703,11 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
609
703
  # 对于流式响应,使用重试包装器
610
704
  return self._retry_request_stream(
611
705
  self._stream_with_logging,
612
- request, metadata, invoke_timeout, start_time, model_request
706
+ request, metadata, invoke_timeout, start_time, model_request,
707
+ request_id=request_id
613
708
  )
614
709
  else:
615
- result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout)
710
+ result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id)
616
711
 
617
712
  # 记录非流式响应的成功日志
618
713
  duration = time.time() - start_time
@@ -742,7 +837,8 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
742
837
  self.stub.BatchInvoke,
743
838
  batch_request,
744
839
  metadata=metadata,
745
- timeout=invoke_timeout
840
+ timeout=invoke_timeout,
841
+ request_id=request_id
746
842
  )
747
843
 
748
844
  # 构建响应对象
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.1.21
3
+ Version: 0.1.22
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou
@@ -1,12 +1,12 @@
1
1
  tamar_model_client/__init__.py,sha256=4DEIUGlLTeiaECjJQbGYik7C0JO6hHwwfbLYpYpMdzg,444
2
- tamar_model_client/async_client.py,sha256=cU3cUrwP75zacyFa3KibcYfacJBsxRNLIu1vtQZrrcU,32836
2
+ tamar_model_client/async_client.py,sha256=8BbzeYX735xIaPJPF0oI3g-8oDKyl6GugxxCdBEGi5s,34400
3
3
  tamar_model_client/auth.py,sha256=gbwW5Aakeb49PMbmYvrYlVx1mfyn1LEDJ4qQVs-9DA4,438
4
4
  tamar_model_client/circuit_breaker.py,sha256=0XHJXBYA4O8vwsDGwqNrae9zxNJphY5Rfucc9ytVFGA,5419
5
- tamar_model_client/error_handler.py,sha256=kVfHL7DWvO3sIobjVuJbqjV4mtI4oqbS4Beax7Dmm9w,11788
6
- tamar_model_client/exceptions.py,sha256=FImLCBpYQ8DpsNbH-ZttxyClEZCL6ICmQGESIlbI--s,12038
5
+ tamar_model_client/error_handler.py,sha256=eEuwMcecJvQbFkIieri60uLoQX5sBMawnxxedxzxj5I,12745
6
+ tamar_model_client/exceptions.py,sha256=D6G8igA-YO4AroeCa-9CDDPt4hSqBFX5C_4w-NCIL1w,13063
7
7
  tamar_model_client/json_formatter.py,sha256=IyBv_pEEzjF-KaMF-7rxRpNc_fxRYK2A-pu_2n4Liow,1990
8
8
  tamar_model_client/logging_icons.py,sha256=MRTZ1Xvkep9ce_jdltj54_XZUXvIpQ95soRNmLdJ4qw,1837
9
- tamar_model_client/sync_client.py,sha256=AhNFlhk9aC7JhNrI2BEZJDLjXZwVT9pMy3u9jgjO1QU,32603
9
+ tamar_model_client/sync_client.py,sha256=mpWc6T9RjKnNDiETE8UYtKu2Zu0U6AME1kQMnDwnfMI,36225
10
10
  tamar_model_client/utils.py,sha256=Kn6pFz9GEC96H4eejEax66AkzvsrXI3WCSDtgDjnVTI,5238
11
11
  tamar_model_client/core/__init__.py,sha256=bJRJllrp4Xc0g_qu1pW9G-lsXNB7c1r0NBIfb2Ypxe0,832
12
12
  tamar_model_client/core/base_client.py,sha256=sYvJZsDu_66akddAMowSnihFtgOoVKaQJxxnVruF9Ms,8995
@@ -27,9 +27,10 @@ tamar_model_client/schemas/inputs.py,sha256=dz1m8NbUIxA99JXZc8WlyzbKpDuz1lEzx3Vg
27
27
  tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
28
28
  tests/__init__.py,sha256=kbmImddLDwdqlkkmkyKtl4bQy_ipe-R8eskpaBylU9w,38
29
29
  tests/stream_hanging_analysis.py,sha256=W3W48IhQbNAR6-xvMpoWZvnWOnr56CTaH4-aORNBuD4,14807
30
- tests/test_google_azure_final.py,sha256=wAnfodYCs8VIqYlgT6nm1YnLnufqSuYfXBaVqCXkmfU,17019
30
+ tests/test_google_azure_final.py,sha256=YFhjx2mQlFijcuHqOVnnS7ZD8mQWCf2Uv1oiqOFxASs,26393
31
+ tests/test_logging_issue.py,sha256=JTMbotfHpAEPMBj73pOwxPn-Zn4QVQJX6scMz48FRDQ,2427
31
32
  tests/test_simple.py,sha256=Xf0U-J9_xn_LzUsmYu06suK0_7DrPeko8OHoHldsNxE,7169
32
- tamar_model_client-0.1.21.dist-info/METADATA,sha256=gj8tUbP3goUZKi3pVVWMxEpmmK6W72IV23Ym2ohlcBs,23453
33
- tamar_model_client-0.1.21.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
- tamar_model_client-0.1.21.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
35
- tamar_model_client-0.1.21.dist-info/RECORD,,
33
+ tamar_model_client-0.1.22.dist-info/METADATA,sha256=jEEq8UTzqVcutF26FefqaxZ08WhNW58xiyEWZgDO7WA,23453
34
+ tamar_model_client-0.1.22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
35
+ tamar_model_client-0.1.22.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
36
+ tamar_model_client-0.1.22.dist-info/RECORD,,
@@ -8,6 +8,10 @@ import asyncio
8
8
  import logging
9
9
  import os
10
10
  import sys
11
+ import time
12
+ import threading
13
+ from concurrent.futures import ThreadPoolExecutor
14
+ from typing import List, Dict, Tuple
11
15
 
12
16
  # 配置测试脚本专用的日志
13
17
  # 使用特定的logger名称,避免影响客户端日志
@@ -22,8 +26,8 @@ test_logger.addHandler(test_handler)
22
26
 
23
27
  logger = test_logger
24
28
 
25
- os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "false"
26
- os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "localhost:50051"
29
+ os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "true"
30
+ os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "model-manager-server-grpc-131786869360.asia-northeast1.run.app"
27
31
  os.environ['MODEL_MANAGER_SERVER_JWT_SECRET_KEY'] = "model-manager-server-jwt-key"
28
32
 
29
33
  # 导入客户端模块
@@ -31,6 +35,12 @@ try:
31
35
  from tamar_model_client import TamarModelClient, AsyncTamarModelClient
32
36
  from tamar_model_client.schemas import ModelRequest, UserContext
33
37
  from tamar_model_client.enums import ProviderType, InvokeType, Channel
38
+
39
+ # 为了调试,临时启用 SDK 的日志输出
40
+ # 注意:这会输出 JSON 格式的日志
41
+ import os
42
+ os.environ['TAMAR_MODEL_CLIENT_LOG_LEVEL'] = 'INFO'
43
+
34
44
  except ImportError as e:
35
45
  logger.error(f"导入模块失败: {e}")
36
46
  sys.exit(1)
@@ -39,10 +49,10 @@ except ImportError as e:
39
49
  def test_google_ai_studio():
40
50
  """测试 Google AI Studio"""
41
51
  print("\n🔍 测试 Google AI Studio...")
42
-
52
+
43
53
  try:
44
54
  client = TamarModelClient()
45
-
55
+
46
56
  request = ModelRequest(
47
57
  provider=ProviderType.GOOGLE,
48
58
  channel=Channel.AI_STUDIO,
@@ -61,12 +71,12 @@ def test_google_ai_studio():
61
71
  "maxOutputTokens": 100
62
72
  }
63
73
  )
64
-
74
+
65
75
  response = client.invoke(request)
66
76
  print(f"✅ Google AI Studio 成功")
67
77
  print(f" 响应类型: {type(response)}")
68
78
  print(f" 响应内容: {str(response)[:200]}...")
69
-
79
+
70
80
  except Exception as e:
71
81
  print(f"❌ Google AI Studio 失败: {str(e)}")
72
82
 
@@ -74,10 +84,10 @@ def test_google_ai_studio():
74
84
  def test_google_vertex_ai():
75
85
  """测试 Google Vertex AI"""
76
86
  print("\n🔍 测试 Google Vertex AI...")
77
-
87
+
78
88
  try:
79
89
  client = TamarModelClient()
80
-
90
+
81
91
  request = ModelRequest(
82
92
  provider=ProviderType.GOOGLE,
83
93
  channel=Channel.VERTEXAI,
@@ -95,12 +105,12 @@ def test_google_vertex_ai():
95
105
  "temperature": 0.5
96
106
  }
97
107
  )
98
-
108
+
99
109
  response = client.invoke(request)
100
110
  print(f"✅ Google Vertex AI 成功")
101
111
  print(f" 响应类型: {type(response)}")
102
112
  print(f" 响应内容: {str(response)[:200]}...")
103
-
113
+
104
114
  except Exception as e:
105
115
  print(f"❌ Google Vertex AI 失败: {str(e)}")
106
116
 
@@ -108,10 +118,10 @@ def test_google_vertex_ai():
108
118
  def test_azure_openai():
109
119
  """测试 Azure OpenAI"""
110
120
  print("\n☁️ 测试 Azure OpenAI...")
111
-
121
+
112
122
  try:
113
123
  client = TamarModelClient()
114
-
124
+
115
125
  request = ModelRequest(
116
126
  provider=ProviderType.AZURE,
117
127
  invoke_type=InvokeType.CHAT_COMPLETIONS,
@@ -125,11 +135,11 @@ def test_azure_openai():
125
135
  client_type="test_client"
126
136
  ),
127
137
  )
128
-
138
+
129
139
  response = client.invoke(request)
130
140
  print(f"✅ Azure OpenAI 成功")
131
141
  print(f" 响应内容: {response.model_dump_json()}...")
132
-
142
+
133
143
  except Exception as e:
134
144
  print(f"❌ Azure OpenAI 失败: {str(e)}")
135
145
 
@@ -137,7 +147,7 @@ def test_azure_openai():
137
147
  async def test_google_streaming():
138
148
  """测试 Google 流式响应"""
139
149
  print("\n📡 测试 Google 流式响应...")
140
-
150
+
141
151
  try:
142
152
  async with AsyncTamarModelClient() as client:
143
153
  request = ModelRequest(
@@ -159,18 +169,18 @@ async def test_google_streaming():
159
169
  "maxOutputTokens": 50
160
170
  }
161
171
  )
162
-
172
+
163
173
  response_gen = await client.invoke(request)
164
174
  print(f"✅ Google 流式调用成功")
165
175
  print(f" 响应类型: {type(response_gen)}")
166
-
176
+
167
177
  chunk_count = 0
168
178
  async for chunk in response_gen:
169
179
  chunk_count += 1
170
180
  print(f" 数据块 {chunk_count}: {type(chunk)} - {chunk.model_dump_json()}...")
171
181
  if chunk_count >= 3: # 只显示前3个数据块
172
182
  break
173
-
183
+
174
184
  except Exception as e:
175
185
  print(f"❌ Google 流式响应失败: {str(e)}")
176
186
 
@@ -178,7 +188,7 @@ async def test_google_streaming():
178
188
  async def test_azure_streaming():
179
189
  """测试 Azure 流式响应"""
180
190
  print("\n📡 测试 Azure 流式响应...")
181
-
191
+
182
192
  try:
183
193
  async with AsyncTamarModelClient() as client:
184
194
  request = ModelRequest(
@@ -196,18 +206,18 @@ async def test_azure_streaming():
196
206
  ),
197
207
  stream=True # 添加流式参数
198
208
  )
199
-
209
+
200
210
  response_gen = await client.invoke(request)
201
211
  print(f"✅ Azure 流式调用成功")
202
212
  print(f" 响应类型: {type(response_gen)}")
203
-
213
+
204
214
  chunk_count = 0
205
215
  async for chunk in response_gen:
206
216
  chunk_count += 1
207
217
  print(f" 数据块 {chunk_count}: {type(chunk)} - {chunk.model_dump_json()}...")
208
218
  if chunk_count >= 3: # 只显示前3个数据块
209
219
  break
210
-
220
+
211
221
  except Exception as e:
212
222
  print(f"❌ Azure 流式响应失败: {str(e)}")
213
223
 
@@ -215,10 +225,10 @@ async def test_azure_streaming():
215
225
  def test_sync_batch_requests():
216
226
  """测试同步批量请求"""
217
227
  print("\n📦 测试同步批量请求...")
218
-
228
+
219
229
  try:
220
230
  from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
221
-
231
+
222
232
  with TamarModelClient() as client:
223
233
  # 构建批量请求,包含 Google 和 Azure 的多个请求
224
234
  batch_request = BatchModelRequest(
@@ -260,18 +270,18 @@ def test_sync_batch_requests():
260
270
  )
261
271
  ]
262
272
  )
263
-
273
+
264
274
  # 执行批量请求
265
275
  batch_response = client.invoke_batch(batch_request)
266
-
276
+
267
277
  print(f"✅ 同步批量请求成功")
268
278
  print(f" 请求数量: {len(batch_request.items)}")
269
279
  print(f" 响应数量: {len(batch_response.responses)}")
270
280
  print(f" 批量请求ID: {batch_response.request_id}")
271
-
281
+
272
282
  # 显示每个响应的结果
273
283
  for i, response in enumerate(batch_response.responses):
274
- print(f"\n 响应 {i+1}:")
284
+ print(f"\n 响应 {i + 1}:")
275
285
  print(f" - custom_id: {response.custom_id}")
276
286
  print(f" - 内容长度: {len(response.content) if response.content else 0}")
277
287
  print(f" - 有错误: {'是' if response.error else '否'}")
@@ -279,7 +289,7 @@ def test_sync_batch_requests():
279
289
  print(f" - 内容预览: {response.content[:100]}...")
280
290
  if response.error:
281
291
  print(f" - 错误信息: {response.error}")
282
-
292
+
283
293
  except Exception as e:
284
294
  print(f"❌ 同步批量请求失败: {str(e)}")
285
295
 
@@ -287,10 +297,10 @@ def test_sync_batch_requests():
287
297
  async def test_batch_requests():
288
298
  """测试异步批量请求"""
289
299
  print("\n📦 测试异步批量请求...")
290
-
300
+
291
301
  try:
292
302
  from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
293
-
303
+
294
304
  async with AsyncTamarModelClient() as client:
295
305
  # 构建批量请求,包含 Google 和 Azure 的多个请求
296
306
  batch_request = BatchModelRequest(
@@ -343,18 +353,18 @@ async def test_batch_requests():
343
353
  )
344
354
  ]
345
355
  )
346
-
356
+
347
357
  # 执行批量请求
348
358
  batch_response = await client.invoke_batch(batch_request)
349
-
359
+
350
360
  print(f"✅ 批量请求成功")
351
361
  print(f" 请求数量: {len(batch_request.items)}")
352
362
  print(f" 响应数量: {len(batch_response.responses)}")
353
363
  print(f" 批量请求ID: {batch_response.request_id}")
354
-
364
+
355
365
  # 显示每个响应的结果
356
366
  for i, response in enumerate(batch_response.responses):
357
- print(f"\n 响应 {i+1}:")
367
+ print(f"\n 响应 {i + 1}:")
358
368
  print(f" - custom_id: {response.custom_id}")
359
369
  print(f" - 内容长度: {len(response.content) if response.content else 0}")
360
370
  print(f" - 有错误: {'是' if response.error else '否'}")
@@ -362,34 +372,286 @@ async def test_batch_requests():
362
372
  print(f" - 内容预览: {response.content[:100]}...")
363
373
  if response.error:
364
374
  print(f" - 错误信息: {response.error}")
365
-
375
+
366
376
  except Exception as e:
367
377
  print(f"❌ 批量请求失败: {str(e)}")
368
378
 
369
379
 
380
+ def test_concurrent_requests(num_requests: int = 150):
381
+ """测试并发请求
382
+
383
+ Args:
384
+ num_requests: 要发送的总请求数,默认150个
385
+ """
386
+ print(f"\n🚀 测试并发请求 ({num_requests} 个请求)...")
387
+
388
+ # 统计变量
389
+ total_requests = 0
390
+ successful_requests = 0
391
+ failed_requests = 0
392
+ request_times: List[float] = []
393
+ errors: Dict[str, int] = {}
394
+
395
+ # 线程安全的锁
396
+ stats_lock = threading.Lock()
397
+
398
+ def make_single_request(request_id: int) -> Tuple[bool, float, str]:
399
+ """执行单个请求并返回结果
400
+
401
+ Returns:
402
+ (success, duration, error_msg)
403
+ """
404
+ start_time = time.time()
405
+ try:
406
+ # 每个线程创建自己的客户端实例
407
+ client = TamarModelClient()
408
+
409
+ # Google Vertex AI
410
+ request = ModelRequest(
411
+ provider=ProviderType.GOOGLE,
412
+ channel=Channel.VERTEXAI,
413
+ invoke_type=InvokeType.GENERATION,
414
+ model="tamar-google-gemini-flash-lite",
415
+ contents="1+1等于几?",
416
+ user_context=UserContext(
417
+ user_id=f"concurrent_user_{request_id:03d}",
418
+ org_id="test_org",
419
+ client_type="concurrent_test"
420
+ ),
421
+ config={"temperature": 0.1}
422
+ )
423
+
424
+ response = client.invoke(request, timeout=300000.0)
425
+ duration = time.time() - start_time
426
+ return (True, duration, "")
427
+
428
+ except Exception as e:
429
+ duration = time.time() - start_time
430
+ error_msg = str(e)
431
+ return (False, duration, error_msg)
432
+
433
+ def worker(request_id: int):
434
+ """工作线程函数"""
435
+ nonlocal total_requests, successful_requests, failed_requests
436
+
437
+ success, duration, error_msg = make_single_request(request_id)
438
+
439
+ with stats_lock:
440
+ total_requests += 1
441
+ request_times.append(duration)
442
+
443
+ if success:
444
+ successful_requests += 1
445
+ else:
446
+ failed_requests += 1
447
+ # 统计错误类型
448
+ error_type = error_msg.split(':')[0] if ':' in error_msg else error_msg[:50]
449
+ errors[error_type] = errors.get(error_type, 0) + 1
450
+
451
+ # 每20个请求输出一次进度
452
+ if total_requests % 20 == 0:
453
+ print(
454
+ f" 进度: {total_requests}/{num_requests} (成功: {successful_requests}, 失败: {failed_requests})")
455
+
456
+ # 使用线程池执行并发请求
457
+ start_time = time.time()
458
+
459
+ # 使用线程池,最多50个并发线程
460
+ with ThreadPoolExecutor(max_workers=50) as executor:
461
+ # 提交所有任务
462
+ futures = [executor.submit(worker, i) for i in range(num_requests)]
463
+
464
+ # 等待所有任务完成
465
+ for future in futures:
466
+ future.result()
467
+
468
+ total_duration = time.time() - start_time
469
+
470
+ # 计算统计信息
471
+ avg_request_time = sum(request_times) / len(request_times) if request_times else 0
472
+ min_request_time = min(request_times) if request_times else 0
473
+ max_request_time = max(request_times) if request_times else 0
474
+
475
+ # 输出结果
476
+ print(f"\n📊 并发测试结果:")
477
+ print(f" 总请求数: {total_requests}")
478
+ print(f" 成功请求: {successful_requests} ({successful_requests / total_requests * 100:.1f}%)")
479
+ print(f" 失败请求: {failed_requests} ({failed_requests / total_requests * 100:.1f}%)")
480
+ print(f" 总耗时: {total_duration:.2f} 秒")
481
+ print(f" 平均QPS: {total_requests / total_duration:.2f}")
482
+ print(f"\n 请求耗时统计:")
483
+ print(f" - 平均: {avg_request_time:.3f} 秒")
484
+ print(f" - 最小: {min_request_time:.3f} 秒")
485
+ print(f" - 最大: {max_request_time:.3f} 秒")
486
+
487
+ if errors:
488
+ print(f"\n 错误统计:")
489
+ for error_type, count in sorted(errors.items(), key=lambda x: x[1], reverse=True):
490
+ print(f" - {error_type}: {count} 次")
491
+
492
+ return {
493
+ "total": total_requests,
494
+ "successful": successful_requests,
495
+ "failed": failed_requests,
496
+ "duration": total_duration,
497
+ "qps": total_requests / total_duration
498
+ }
499
+
500
+
501
+ async def test_async_concurrent_requests(num_requests: int = 150):
502
+ """测试异步并发请求
503
+
504
+ Args:
505
+ num_requests: 要发送的总请求数,默认150个
506
+ """
507
+ print(f"\n🚀 测试异步并发请求 ({num_requests} 个请求)...")
508
+
509
+ # 统计变量
510
+ total_requests = 0
511
+ successful_requests = 0
512
+ failed_requests = 0
513
+ request_times: List[float] = []
514
+ errors: Dict[str, int] = {}
515
+
516
+ # 异步锁
517
+ stats_lock = asyncio.Lock()
518
+
519
+ async def make_single_async_request(client: AsyncTamarModelClient, request_id: int) -> Tuple[bool, float, str]:
520
+ """执行单个异步请求并返回结果
521
+
522
+ Returns:
523
+ (success, duration, error_msg)
524
+ """
525
+ start_time = time.time()
526
+ try:
527
+ # 根据请求ID选择不同的provider,以增加测试多样性
528
+ # Google Vertex AI
529
+ request = ModelRequest(
530
+ provider=ProviderType.GOOGLE,
531
+ channel=Channel.VERTEXAI,
532
+ invoke_type=InvokeType.GENERATION,
533
+ model="tamar-google-gemini-flash-lite",
534
+ contents="1+1等于几?",
535
+ user_context=UserContext(
536
+ user_id=f"async_concurrent_user_{request_id:03d}",
537
+ org_id="test_org",
538
+ client_type="async_concurrent_test"
539
+ ),
540
+ config={"temperature": 0.1}
541
+ )
542
+
543
+ response = await client.invoke(request, timeout=300000.0)
544
+ duration = time.time() - start_time
545
+ return (True, duration, "")
546
+
547
+ except Exception as e:
548
+ duration = time.time() - start_time
549
+ error_msg = str(e)
550
+ return (False, duration, error_msg)
551
+
552
+ async def async_worker(client: AsyncTamarModelClient, request_id: int):
553
+ """异步工作协程"""
554
+ nonlocal total_requests, successful_requests, failed_requests
555
+
556
+ success, duration, error_msg = await make_single_async_request(client, request_id)
557
+
558
+ async with stats_lock:
559
+ total_requests += 1
560
+ request_times.append(duration)
561
+
562
+ if success:
563
+ successful_requests += 1
564
+ else:
565
+ failed_requests += 1
566
+ # 统计错误类型
567
+ error_type = error_msg.split(':')[0] if ':' in error_msg else error_msg[:50]
568
+ errors[error_type] = errors.get(error_type, 0) + 1
569
+
570
+ # 每20个请求输出一次进度
571
+ if total_requests % 20 == 0:
572
+ print(
573
+ f" 进度: {total_requests}/{num_requests} (成功: {successful_requests}, 失败: {failed_requests})")
574
+
575
+ # 使用异步客户端执行并发请求
576
+ start_time = time.time()
577
+
578
+ # 创建一个共享的异步客户端
579
+ async with AsyncTamarModelClient() as client:
580
+ # 创建所有任务,但限制并发数
581
+ semaphore = asyncio.Semaphore(50) # 限制最多50个并发请求
582
+
583
+ async def limited_worker(request_id: int):
584
+ async with semaphore:
585
+ await async_worker(client, request_id)
586
+
587
+ # 创建所有任务
588
+ tasks = [limited_worker(i) for i in range(num_requests)]
589
+
590
+ # 等待所有任务完成
591
+ await asyncio.gather(*tasks)
592
+
593
+ total_duration = time.time() - start_time
594
+
595
+ # 计算统计信息
596
+ avg_request_time = sum(request_times) / len(request_times) if request_times else 0
597
+ min_request_time = min(request_times) if request_times else 0
598
+ max_request_time = max(request_times) if request_times else 0
599
+
600
+ # 输出结果
601
+ print(f"\n📊 异步并发测试结果:")
602
+ print(f" 总请求数: {total_requests}")
603
+ print(f" 成功请求: {successful_requests} ({successful_requests / total_requests * 100:.1f}%)")
604
+ print(f" 失败请求: {failed_requests} ({failed_requests / total_requests * 100:.1f}%)")
605
+ print(f" 总耗时: {total_duration:.2f} 秒")
606
+ print(f" 平均QPS: {total_requests / total_duration:.2f}")
607
+ print(f"\n 请求耗时统计:")
608
+ print(f" - 平均: {avg_request_time:.3f} 秒")
609
+ print(f" - 最小: {min_request_time:.3f} 秒")
610
+ print(f" - 最大: {max_request_time:.3f} 秒")
611
+
612
+ if errors:
613
+ print(f"\n 错误统计:")
614
+ for error_type, count in sorted(errors.items(), key=lambda x: x[1], reverse=True):
615
+ print(f" - {error_type}: {count} 次")
616
+
617
+ return {
618
+ "total": total_requests,
619
+ "successful": successful_requests,
620
+ "failed": failed_requests,
621
+ "duration": total_duration,
622
+ "qps": total_requests / total_duration
623
+ }
624
+
625
+
370
626
  async def main():
371
627
  """主函数"""
372
628
  print("🚀 简化版 Google/Azure 测试")
373
629
  print("=" * 50)
374
-
630
+
375
631
  try:
376
- # 同步测试
377
- test_google_ai_studio()
378
- test_google_vertex_ai()
379
- test_azure_openai()
380
-
381
- # 同步批量测试
382
- test_sync_batch_requests()
383
-
384
- # 异步流式测试
385
- await asyncio.wait_for(test_google_streaming(), timeout=60.0)
386
- await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
387
-
388
- # 异步批量测试
389
- await asyncio.wait_for(test_batch_requests(), timeout=120.0)
390
-
632
+ # # 同步测试
633
+ # test_google_ai_studio()
634
+ # test_google_vertex_ai()
635
+ # test_azure_openai()
636
+ #
637
+ # # 同步批量测试
638
+ # test_sync_batch_requests()
639
+ #
640
+ # # 异步流式测试
641
+ # await asyncio.wait_for(test_google_streaming(), timeout=60.0)
642
+ # await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
643
+ #
644
+ # # 异步批量测试
645
+ # await asyncio.wait_for(test_batch_requests(), timeout=120.0)
646
+
647
+ # 同步并发测试
648
+ #test_concurrent_requests(150) # 测试150个并发请求
649
+
650
+ # 异步并发测试
651
+ await test_async_concurrent_requests(1000) # 测试150个异步并发请求
652
+
391
653
  print("\n✅ 测试完成")
392
-
654
+
393
655
  except asyncio.TimeoutError:
394
656
  print("\n⏰ 测试超时")
395
657
  except KeyboardInterrupt:
@@ -402,12 +664,12 @@ async def main():
402
664
  try:
403
665
  # 短暂等待让正在完成的任务自然结束
404
666
  await asyncio.sleep(0.5)
405
-
667
+
406
668
  # 检查是否还有未完成的任务
407
669
  current_task = asyncio.current_task()
408
- tasks = [task for task in asyncio.all_tasks()
409
- if not task.done() and task != current_task]
410
-
670
+ tasks = [task for task in asyncio.all_tasks()
671
+ if not task.done() and task != current_task]
672
+
411
673
  if tasks:
412
674
  print(f" 发现 {len(tasks)} 个未完成任务,等待自然完成...")
413
675
  # 简单等待,不强制取消
@@ -418,12 +680,12 @@ async def main():
418
680
  )
419
681
  except asyncio.TimeoutError:
420
682
  pass
421
-
683
+
422
684
  print(" 任务清理完成")
423
-
685
+
424
686
  except Exception as e:
425
687
  print(f" ⚠️ 任务清理时出现异常: {e}")
426
-
688
+
427
689
  print("🔚 程序即将退出")
428
690
 
429
691
 
@@ -433,16 +695,16 @@ if __name__ == "__main__":
433
695
  asyncio_logger = logging.getLogger('asyncio')
434
696
  original_level = asyncio_logger.level
435
697
  asyncio_logger.setLevel(logging.ERROR)
436
-
698
+
437
699
  try:
438
700
  asyncio.run(main())
439
701
  finally:
440
702
  # 恢复原始日志级别
441
703
  asyncio_logger.setLevel(original_level)
442
-
704
+
443
705
  except KeyboardInterrupt:
444
706
  print("\n⚠️ 程序被用户中断")
445
707
  except Exception as e:
446
708
  print(f"\n❌ 程序执行出错: {e}")
447
709
  finally:
448
- print("🏁 程序已退出")
710
+ print("🏁 程序已退出")
@@ -0,0 +1,75 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 测试日志格式问题
4
+ """
5
+
6
+ import asyncio
7
+ import logging
8
+ import os
9
+ import sys
10
+
11
+ # 设置环境变量
12
+ os.environ['MODEL_MANAGER_SERVER_GRPC_USE_TLS'] = "false"
13
+ os.environ['MODEL_MANAGER_SERVER_ADDRESS'] = "localhost:50051"
14
+ os.environ['MODEL_MANAGER_SERVER_JWT_SECRET_KEY'] = "model-manager-server-jwt-key"
15
+
16
+ # 先导入 SDK
17
+ from tamar_model_client import AsyncTamarModelClient
18
+ from tamar_model_client.schemas import ModelRequest, UserContext
19
+ from tamar_model_client.enums import ProviderType, InvokeType, Channel
20
+
21
+ # 检查 SDK 的日志配置
22
+ print("=== SDK Logger Configuration ===")
23
+ sdk_loggers = [
24
+ 'tamar_model_client',
25
+ 'tamar_model_client.async_client',
26
+ 'tamar_model_client.error_handler',
27
+ 'tamar_model_client.core.base_client'
28
+ ]
29
+
30
+ for logger_name in sdk_loggers:
31
+ logger = logging.getLogger(logger_name)
32
+ print(f"\nLogger: {logger_name}")
33
+ print(f" Level: {logging.getLevelName(logger.level)}")
34
+ print(f" Handlers: {len(logger.handlers)}")
35
+ for i, handler in enumerate(logger.handlers):
36
+ print(f" Handler {i}: {type(handler).__name__}")
37
+ if hasattr(handler, 'formatter'):
38
+ print(f" Formatter: {type(handler.formatter).__name__ if handler.formatter else 'None'}")
39
+ print(f" Propagate: {logger.propagate}")
40
+
41
+
42
+ async def test_error_logging():
43
+ """测试错误日志格式"""
44
+ print("\n=== Testing Error Logging ===")
45
+
46
+ try:
47
+ async with AsyncTamarModelClient() as client:
48
+ # 故意创建一个会失败的请求
49
+ request = ModelRequest(
50
+ provider=ProviderType.GOOGLE,
51
+ channel=Channel.VERTEXAI,
52
+ invoke_type=InvokeType.GENERATION,
53
+ model="invalid-model",
54
+ contents="test",
55
+ user_context=UserContext(
56
+ user_id="test_user",
57
+ org_id="test_org",
58
+ client_type="test_client"
59
+ )
60
+ )
61
+
62
+ response = await client.invoke(request, timeout=5.0)
63
+ print(f"Response: {response}")
64
+
65
+ except Exception as e:
66
+ print(f"Exception caught: {type(e).__name__}: {str(e)}")
67
+
68
+
69
+ async def main():
70
+ await test_error_logging()
71
+
72
+
73
+ if __name__ == "__main__":
74
+ print("Starting logging test...")
75
+ asyncio.run(main())