tamar-model-client 0.1.20__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tamar_model_client/async_client.py +195 -15
- tamar_model_client/circuit_breaker.py +140 -0
- tamar_model_client/core/__init__.py +6 -0
- tamar_model_client/core/base_client.py +56 -3
- tamar_model_client/core/http_fallback.py +249 -0
- tamar_model_client/core/logging_setup.py +124 -14
- tamar_model_client/error_handler.py +60 -6
- tamar_model_client/exceptions.py +49 -1
- tamar_model_client/sync_client.py +239 -27
- {tamar_model_client-0.1.20.dist-info → tamar_model_client-0.1.22.dist-info}/METADATA +73 -1
- {tamar_model_client-0.1.20.dist-info → tamar_model_client-0.1.22.dist-info}/RECORD +15 -12
- tests/test_google_azure_final.py +325 -63
- tests/test_logging_issue.py +75 -0
- {tamar_model_client-0.1.20.dist-info → tamar_model_client-0.1.22.dist-info}/WHEEL +0 -0
- {tamar_model_client-0.1.20.dist-info → tamar_model_client-0.1.22.dist-info}/top_level.txt +0 -0
@@ -30,22 +30,23 @@ import grpc
|
|
30
30
|
from .core import (
|
31
31
|
generate_request_id,
|
32
32
|
set_request_id,
|
33
|
-
|
33
|
+
get_protected_logger,
|
34
34
|
MAX_MESSAGE_LENGTH
|
35
35
|
)
|
36
36
|
from .core.base_client import BaseClient
|
37
37
|
from .core.request_builder import RequestBuilder
|
38
38
|
from .core.response_handler import ResponseHandler
|
39
|
-
from .exceptions import ConnectionError, TamarModelException
|
39
|
+
from .exceptions import ConnectionError, TamarModelException
|
40
40
|
from .generated import model_service_pb2, model_service_pb2_grpc
|
41
41
|
from .schemas import BatchModelResponse, ModelResponse
|
42
42
|
from .schemas.inputs import BatchModelRequest, ModelRequest
|
43
|
+
from .core.http_fallback import HttpFallbackMixin
|
43
44
|
|
44
|
-
#
|
45
|
-
logger =
|
45
|
+
# 配置日志记录器(使用受保护的logger)
|
46
|
+
logger = get_protected_logger(__name__)
|
46
47
|
|
47
48
|
|
48
|
-
class TamarModelClient(BaseClient):
|
49
|
+
class TamarModelClient(BaseClient, HttpFallbackMixin):
|
49
50
|
"""
|
50
51
|
Tamar Model Client 同步客户端
|
51
52
|
|
@@ -200,10 +201,17 @@ class TamarModelClient(BaseClient):
|
|
200
201
|
"""
|
201
202
|
使用增强的错误处理器进行重试(同步版本)
|
202
203
|
"""
|
204
|
+
# 记录方法开始时间
|
205
|
+
method_start_time = time.time()
|
206
|
+
|
207
|
+
# 从kwargs中提取request_id(如果有的话),然后移除它
|
208
|
+
request_id = kwargs.pop('request_id', None) or get_request_id()
|
209
|
+
|
203
210
|
# 构建请求上下文
|
204
211
|
context = {
|
205
212
|
'method': func.__name__ if hasattr(func, '__name__') else 'unknown',
|
206
213
|
'client_version': 'sync',
|
214
|
+
'request_id': request_id,
|
207
215
|
}
|
208
216
|
|
209
217
|
last_exception = None
|
@@ -218,22 +226,40 @@ class TamarModelClient(BaseClient):
|
|
218
226
|
context['retry_count'] = attempt
|
219
227
|
|
220
228
|
# 判断是否可以重试
|
221
|
-
|
229
|
+
should_retry = self._should_retry(e, attempt)
|
230
|
+
if not should_retry or attempt >= self.max_retries:
|
222
231
|
# 不可重试或已达到最大重试次数
|
232
|
+
current_duration = time.time() - method_start_time
|
233
|
+
context['duration'] = current_duration
|
223
234
|
last_exception = self.error_handler.handle_error(e, context)
|
224
235
|
break
|
225
236
|
|
237
|
+
# 计算当前的耗时
|
238
|
+
current_duration = time.time() - method_start_time
|
239
|
+
|
226
240
|
# 记录重试日志
|
241
|
+
log_data = {
|
242
|
+
"log_type": "info",
|
243
|
+
"request_id": context.get('request_id'),
|
244
|
+
"data": {
|
245
|
+
"error_code": e.code().name if e.code() else 'UNKNOWN',
|
246
|
+
"retry_count": attempt,
|
247
|
+
"max_retries": self.max_retries,
|
248
|
+
"method": context.get('method', 'unknown')
|
249
|
+
},
|
250
|
+
"duration": current_duration
|
251
|
+
}
|
227
252
|
logger.warning(
|
228
253
|
f"Attempt {attempt + 1}/{self.max_retries + 1} failed: {e.code()}",
|
229
|
-
extra=
|
254
|
+
extra=log_data
|
230
255
|
)
|
231
256
|
|
232
257
|
# 执行退避等待
|
233
258
|
if attempt < self.max_retries:
|
234
|
-
delay = self._calculate_backoff(attempt)
|
259
|
+
delay = self._calculate_backoff(attempt, e.code())
|
235
260
|
time.sleep(delay)
|
236
261
|
|
262
|
+
context['duration'] = current_duration
|
237
263
|
last_exception = self.error_handler.handle_error(e, context)
|
238
264
|
|
239
265
|
except Exception as e:
|
@@ -248,14 +274,114 @@ class TamarModelClient(BaseClient):
|
|
248
274
|
else:
|
249
275
|
raise TamarModelException("Unknown error occurred")
|
250
276
|
|
251
|
-
def
|
252
|
-
"""
|
253
|
-
|
254
|
-
|
277
|
+
def _should_retry(self, error: grpc.RpcError, attempt: int) -> bool:
|
278
|
+
"""
|
279
|
+
判断是否应该重试
|
280
|
+
|
281
|
+
Args:
|
282
|
+
error: gRPC错误
|
283
|
+
attempt: 当前重试次数
|
284
|
+
|
285
|
+
Returns:
|
286
|
+
bool: 是否应该重试
|
287
|
+
"""
|
288
|
+
error_code = error.code()
|
289
|
+
from .exceptions import get_retry_policy, ErrorContext
|
290
|
+
policy = get_retry_policy(error_code)
|
291
|
+
|
292
|
+
# 先检查错误级别的 max_attempts 配置
|
293
|
+
# max_attempts 表示最大重试次数(不包括初始请求)
|
294
|
+
error_max_attempts = policy.get('max_attempts', self.max_retries)
|
295
|
+
if attempt >= error_max_attempts:
|
296
|
+
return False
|
297
|
+
|
298
|
+
# 再检查全局的 max_retries
|
299
|
+
if attempt >= self.max_retries:
|
300
|
+
return False
|
301
|
+
|
302
|
+
retryable = policy.get('retryable', False)
|
303
|
+
|
304
|
+
if retryable == False:
|
305
|
+
return False
|
306
|
+
elif retryable == True:
|
307
|
+
return True
|
308
|
+
elif retryable == 'conditional':
|
309
|
+
# 条件重试,特殊处理
|
310
|
+
if error_code == grpc.StatusCode.CANCELLED:
|
311
|
+
# 检查是否是网络中断导致的取消
|
312
|
+
context = {'method': 'unknown', 'client_version': 'sync'}
|
313
|
+
error_context = ErrorContext(error, context)
|
314
|
+
return error_context.is_network_cancelled()
|
315
|
+
else:
|
316
|
+
return self._check_error_details_for_retry(error)
|
317
|
+
|
318
|
+
return False
|
319
|
+
|
320
|
+
def _check_error_details_for_retry(self, error: grpc.RpcError) -> bool:
|
321
|
+
"""
|
322
|
+
检查错误详情决定是否重试
|
323
|
+
|
324
|
+
Args:
|
325
|
+
error: gRPC错误
|
326
|
+
|
327
|
+
Returns:
|
328
|
+
bool: 是否应该重试
|
329
|
+
"""
|
330
|
+
error_message = error.details().lower() if error.details() else ""
|
331
|
+
|
332
|
+
# 可重试的错误模式
|
333
|
+
retryable_patterns = [
|
334
|
+
'temporary', 'timeout', 'unavailable',
|
335
|
+
'connection', 'network', 'try again'
|
336
|
+
]
|
337
|
+
|
338
|
+
for pattern in retryable_patterns:
|
339
|
+
if pattern in error_message:
|
340
|
+
return True
|
341
|
+
|
342
|
+
return False
|
255
343
|
|
256
|
-
|
257
|
-
|
258
|
-
|
344
|
+
def _calculate_backoff(self, attempt: int, error_code: grpc.StatusCode = None) -> float:
|
345
|
+
"""
|
346
|
+
计算退避时间,支持不同的退避策略
|
347
|
+
|
348
|
+
Args:
|
349
|
+
attempt: 当前重试次数
|
350
|
+
error_code: gRPC错误码,用于确定退避策略
|
351
|
+
"""
|
352
|
+
max_delay = 60.0
|
353
|
+
base_delay = self.retry_delay
|
354
|
+
|
355
|
+
# 获取错误的重试策略
|
356
|
+
if error_code:
|
357
|
+
from .exceptions import get_retry_policy
|
358
|
+
policy = get_retry_policy(error_code)
|
359
|
+
backoff_type = policy.get('backoff', 'exponential')
|
360
|
+
use_jitter = policy.get('jitter', False)
|
361
|
+
else:
|
362
|
+
backoff_type = 'exponential'
|
363
|
+
use_jitter = False
|
364
|
+
|
365
|
+
# 根据退避类型计算延迟
|
366
|
+
if backoff_type == 'linear':
|
367
|
+
# 线性退避:delay * (attempt + 1)
|
368
|
+
delay = min(base_delay * (attempt + 1), max_delay)
|
369
|
+
else:
|
370
|
+
# 指数退避:delay * 2^attempt
|
371
|
+
delay = min(base_delay * (2 ** attempt), max_delay)
|
372
|
+
|
373
|
+
# 添加抖动
|
374
|
+
if use_jitter:
|
375
|
+
jitter_factor = 0.2 # 增加抖动范围,减少竞争
|
376
|
+
jitter = random.uniform(0, delay * jitter_factor)
|
377
|
+
delay += jitter
|
378
|
+
else:
|
379
|
+
# 默认的小量抖动,避免完全同步
|
380
|
+
jitter_factor = 0.05
|
381
|
+
jitter = random.uniform(0, delay * jitter_factor)
|
382
|
+
delay += jitter
|
383
|
+
|
384
|
+
return delay
|
259
385
|
|
260
386
|
def _retry_request_stream(self, func, *args, **kwargs):
|
261
387
|
"""
|
@@ -271,30 +397,91 @@ class TamarModelClient(BaseClient):
|
|
271
397
|
Yields:
|
272
398
|
流式响应的每个元素
|
273
399
|
"""
|
400
|
+
# 记录方法开始时间
|
401
|
+
method_start_time = time.time()
|
402
|
+
|
403
|
+
# 从kwargs中提取request_id(如果有的话),然后移除它
|
404
|
+
request_id = kwargs.pop('request_id', None) or get_request_id()
|
405
|
+
|
274
406
|
last_exception = None
|
407
|
+
context = {
|
408
|
+
'method': 'stream',
|
409
|
+
'client_version': 'sync',
|
410
|
+
'request_id': request_id,
|
411
|
+
}
|
275
412
|
|
276
413
|
for attempt in range(self.max_retries + 1):
|
277
414
|
try:
|
415
|
+
context['retry_count'] = attempt
|
278
416
|
# 尝试创建流
|
279
417
|
for item in func(*args, **kwargs):
|
280
418
|
yield item
|
281
419
|
return
|
282
420
|
|
283
421
|
except grpc.RpcError as e:
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
422
|
+
# 使用智能重试判断
|
423
|
+
context['retry_count'] = attempt
|
424
|
+
|
425
|
+
# 计算当前的耗时
|
426
|
+
current_duration = time.time() - method_start_time
|
427
|
+
|
428
|
+
# 判断是否应该重试
|
429
|
+
should_retry = self._should_retry(e, attempt)
|
430
|
+
if not should_retry or attempt >= self.max_retries:
|
431
|
+
# 不重试或已达到最大重试次数
|
432
|
+
log_data = {
|
433
|
+
"log_type": "info",
|
434
|
+
"request_id": context.get('request_id'),
|
435
|
+
"data": {
|
436
|
+
"error_code": e.code().name if e.code() else 'UNKNOWN',
|
437
|
+
"retry_count": attempt,
|
438
|
+
"max_retries": self.max_retries,
|
439
|
+
"method": "stream",
|
440
|
+
"will_retry": False
|
441
|
+
},
|
442
|
+
"duration": current_duration
|
443
|
+
}
|
444
|
+
logger.error(
|
445
|
+
f"Stream failed: {e.code()} (no retry)",
|
446
|
+
extra=log_data
|
289
447
|
)
|
290
|
-
|
291
|
-
|
448
|
+
context['duration'] = current_duration
|
449
|
+
last_exception = self.error_handler.handle_error(e, context)
|
292
450
|
break
|
451
|
+
|
452
|
+
# 记录重试日志
|
453
|
+
log_data = {
|
454
|
+
"log_type": "info",
|
455
|
+
"request_id": context.get('request_id'),
|
456
|
+
"data": {
|
457
|
+
"error_code": e.code().name if e.code() else 'UNKNOWN',
|
458
|
+
"retry_count": attempt,
|
459
|
+
"max_retries": self.max_retries,
|
460
|
+
"method": "stream"
|
461
|
+
},
|
462
|
+
"duration": current_duration
|
463
|
+
}
|
464
|
+
logger.warning(
|
465
|
+
f"Stream attempt {attempt + 1}/{self.max_retries + 1} failed: {e.code()} (will retry)",
|
466
|
+
extra=log_data
|
467
|
+
)
|
468
|
+
|
469
|
+
# 执行退避等待
|
470
|
+
if attempt < self.max_retries:
|
471
|
+
delay = self._calculate_backoff(attempt, e.code())
|
472
|
+
time.sleep(delay)
|
473
|
+
|
474
|
+
last_exception = e
|
475
|
+
|
293
476
|
except Exception as e:
|
477
|
+
context['retry_count'] = attempt
|
294
478
|
raise TamarModelException(str(e)) from e
|
295
479
|
|
296
480
|
if last_exception:
|
297
|
-
|
481
|
+
if isinstance(last_exception, TamarModelException):
|
482
|
+
raise last_exception
|
483
|
+
else:
|
484
|
+
raise self.error_handler.handle_error(last_exception, context)
|
298
485
|
else:
|
299
486
|
raise TamarModelException("Unknown streaming error occurred")
|
300
487
|
|
@@ -457,6 +644,12 @@ class TamarModelClient(BaseClient):
|
|
457
644
|
ValidationError: 输入验证失败。
|
458
645
|
ConnectionError: 连接服务端失败。
|
459
646
|
"""
|
647
|
+
# 如果启用了熔断且熔断器打开,直接走 HTTP
|
648
|
+
if self.resilient_enabled and self.circuit_breaker and self.circuit_breaker.is_open:
|
649
|
+
if self.http_fallback_url:
|
650
|
+
logger.warning("🔻 Circuit breaker is OPEN, using HTTP fallback")
|
651
|
+
return self._invoke_http_fallback(model_request, timeout, request_id)
|
652
|
+
|
460
653
|
self._ensure_initialized()
|
461
654
|
|
462
655
|
if not self.default_payload:
|
@@ -510,10 +703,11 @@ class TamarModelClient(BaseClient):
|
|
510
703
|
# 对于流式响应,使用重试包装器
|
511
704
|
return self._retry_request_stream(
|
512
705
|
self._stream_with_logging,
|
513
|
-
request, metadata, invoke_timeout, start_time, model_request
|
706
|
+
request, metadata, invoke_timeout, start_time, model_request,
|
707
|
+
request_id=request_id
|
514
708
|
)
|
515
709
|
else:
|
516
|
-
result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout)
|
710
|
+
result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id)
|
517
711
|
|
518
712
|
# 记录非流式响应的成功日志
|
519
713
|
duration = time.time() - start_time
|
@@ -527,9 +721,14 @@ class TamarModelClient(BaseClient):
|
|
527
721
|
"data": ResponseHandler.build_log_data(model_request, result)
|
528
722
|
}
|
529
723
|
)
|
724
|
+
|
725
|
+
# 记录成功(如果启用了熔断)
|
726
|
+
if self.resilient_enabled and self.circuit_breaker:
|
727
|
+
self.circuit_breaker.record_success()
|
728
|
+
|
530
729
|
return result
|
531
730
|
|
532
|
-
except grpc.RpcError as e:
|
731
|
+
except (ConnectionError, grpc.RpcError) as e:
|
533
732
|
duration = time.time() - start_time
|
534
733
|
error_message = f"❌ Invoke gRPC failed: {str(e)}"
|
535
734
|
logger.error(error_message, exc_info=True,
|
@@ -542,6 +741,18 @@ class TamarModelClient(BaseClient):
|
|
542
741
|
error=e
|
543
742
|
)
|
544
743
|
})
|
744
|
+
|
745
|
+
# 记录失败并尝试降级(如果启用了熔断)
|
746
|
+
if self.resilient_enabled and self.circuit_breaker:
|
747
|
+
# 将错误码传递给熔断器,用于智能失败统计
|
748
|
+
error_code = e.code() if hasattr(e, 'code') else None
|
749
|
+
self.circuit_breaker.record_failure(error_code)
|
750
|
+
|
751
|
+
# 如果可以降级,则降级
|
752
|
+
if self.http_fallback_url and self.circuit_breaker.should_fallback():
|
753
|
+
logger.warning(f"🔻 gRPC failed, falling back to HTTP: {str(e)}")
|
754
|
+
return self._invoke_http_fallback(model_request, timeout, request_id)
|
755
|
+
|
545
756
|
raise e
|
546
757
|
except Exception as e:
|
547
758
|
duration = time.time() - start_time
|
@@ -626,7 +837,8 @@ class TamarModelClient(BaseClient):
|
|
626
837
|
self.stub.BatchInvoke,
|
627
838
|
batch_request,
|
628
839
|
metadata=metadata,
|
629
|
-
timeout=invoke_timeout
|
840
|
+
timeout=invoke_timeout,
|
841
|
+
request_id=request_id
|
630
842
|
)
|
631
843
|
|
632
844
|
# 构建响应对象
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tamar-model-client
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.22
|
4
4
|
Summary: A Python SDK for interacting with the Model Manager gRPC service
|
5
5
|
Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
|
6
6
|
Author: Oscar Ou
|
@@ -532,6 +532,61 @@ response = client.invoke(
|
|
532
532
|
- 启用流式响应减少首字延迟
|
533
533
|
- 合理设置 max_tokens 避免浪费
|
534
534
|
|
535
|
+
### 🛡️ 熔断降级功能(高可用保障)
|
536
|
+
|
537
|
+
SDK 内置了熔断降级机制,当 gRPC 服务不可用时自动切换到 HTTP 服务,确保业务连续性。
|
538
|
+
|
539
|
+
#### 工作原理
|
540
|
+
1. **正常状态**:所有请求通过高性能的 gRPC 协议
|
541
|
+
2. **熔断触发**:当连续失败达到阈值时,熔断器打开
|
542
|
+
3. **自动降级**:切换到 HTTP 协议继续提供服务
|
543
|
+
4. **定期恢复**:熔断器会定期尝试恢复到 gRPC
|
544
|
+
|
545
|
+
#### 启用方式
|
546
|
+
```bash
|
547
|
+
# 设置环境变量
|
548
|
+
export MODEL_CLIENT_RESILIENT_ENABLED=true
|
549
|
+
export MODEL_CLIENT_HTTP_FALLBACK_URL=http://localhost:8080
|
550
|
+
export MODEL_CLIENT_CIRCUIT_BREAKER_THRESHOLD=5
|
551
|
+
export MODEL_CLIENT_CIRCUIT_BREAKER_TIMEOUT=60
|
552
|
+
```
|
553
|
+
|
554
|
+
#### 使用示例
|
555
|
+
```python
|
556
|
+
from tamar_model_client import TamarModelClient
|
557
|
+
|
558
|
+
# 客户端会自动处理熔断降级,对使用者透明
|
559
|
+
client = TamarModelClient()
|
560
|
+
|
561
|
+
# 正常使用,无需关心底层协议
|
562
|
+
response = client.invoke(request)
|
563
|
+
|
564
|
+
# 获取熔断器状态(可选)
|
565
|
+
metrics = client.get_resilient_metrics()
|
566
|
+
if metrics:
|
567
|
+
print(f"熔断器状态: {metrics['circuit_state']}")
|
568
|
+
print(f"失败次数: {metrics['failure_count']}")
|
569
|
+
```
|
570
|
+
|
571
|
+
#### 熔断器状态
|
572
|
+
- **CLOSED**(关闭):正常工作状态,请求正常通过
|
573
|
+
- **OPEN**(打开):熔断状态,所有请求直接降级到 HTTP
|
574
|
+
- **HALF_OPEN**(半开):恢复测试状态,允许少量请求测试 gRPC 是否恢复
|
575
|
+
|
576
|
+
#### 监控指标
|
577
|
+
```python
|
578
|
+
# 获取熔断降级指标
|
579
|
+
metrics = client.get_resilient_metrics()
|
580
|
+
# 返回示例:
|
581
|
+
# {
|
582
|
+
# "enabled": true,
|
583
|
+
# "circuit_state": "closed",
|
584
|
+
# "failure_count": 0,
|
585
|
+
# "last_failure_time": null,
|
586
|
+
# "http_fallback_url": "http://localhost:8080"
|
587
|
+
# }
|
588
|
+
```
|
589
|
+
|
535
590
|
### ⚠️ 注意事项
|
536
591
|
|
537
592
|
1. **参数说明**
|
@@ -595,6 +650,23 @@ MODEL_MANAGER_SERVER_GRPC_MAX_RETRIES=3
|
|
595
650
|
|
596
651
|
# 初始重试延迟(秒,默认 1.0),指数退避
|
597
652
|
MODEL_MANAGER_SERVER_GRPC_RETRY_DELAY=1.0
|
653
|
+
|
654
|
+
|
655
|
+
# ========================
|
656
|
+
# 🛡️ 熔断降级配置(可选)
|
657
|
+
# ========================
|
658
|
+
|
659
|
+
# 是否启用熔断降级功能(默认 false)
|
660
|
+
MODEL_CLIENT_RESILIENT_ENABLED=false
|
661
|
+
|
662
|
+
# HTTP 降级服务地址(当 gRPC 不可用时的备用地址)
|
663
|
+
MODEL_CLIENT_HTTP_FALLBACK_URL=http://localhost:8080
|
664
|
+
|
665
|
+
# 熔断器触发阈值(连续失败多少次后熔断,默认 5)
|
666
|
+
MODEL_CLIENT_CIRCUIT_BREAKER_THRESHOLD=5
|
667
|
+
|
668
|
+
# 熔断器恢复超时(秒,熔断后多久尝试恢复,默认 60)
|
669
|
+
MODEL_CLIENT_CIRCUIT_BREAKER_TIMEOUT=60
|
598
670
|
```
|
599
671
|
|
600
672
|
加载后,初始化时无需传参:
|
@@ -1,15 +1,17 @@
|
|
1
1
|
tamar_model_client/__init__.py,sha256=4DEIUGlLTeiaECjJQbGYik7C0JO6hHwwfbLYpYpMdzg,444
|
2
|
-
tamar_model_client/async_client.py,sha256=
|
2
|
+
tamar_model_client/async_client.py,sha256=8BbzeYX735xIaPJPF0oI3g-8oDKyl6GugxxCdBEGi5s,34400
|
3
3
|
tamar_model_client/auth.py,sha256=gbwW5Aakeb49PMbmYvrYlVx1mfyn1LEDJ4qQVs-9DA4,438
|
4
|
-
tamar_model_client/
|
5
|
-
tamar_model_client/
|
4
|
+
tamar_model_client/circuit_breaker.py,sha256=0XHJXBYA4O8vwsDGwqNrae9zxNJphY5Rfucc9ytVFGA,5419
|
5
|
+
tamar_model_client/error_handler.py,sha256=eEuwMcecJvQbFkIieri60uLoQX5sBMawnxxedxzxj5I,12745
|
6
|
+
tamar_model_client/exceptions.py,sha256=D6G8igA-YO4AroeCa-9CDDPt4hSqBFX5C_4w-NCIL1w,13063
|
6
7
|
tamar_model_client/json_formatter.py,sha256=IyBv_pEEzjF-KaMF-7rxRpNc_fxRYK2A-pu_2n4Liow,1990
|
7
8
|
tamar_model_client/logging_icons.py,sha256=MRTZ1Xvkep9ce_jdltj54_XZUXvIpQ95soRNmLdJ4qw,1837
|
8
|
-
tamar_model_client/sync_client.py,sha256=
|
9
|
+
tamar_model_client/sync_client.py,sha256=mpWc6T9RjKnNDiETE8UYtKu2Zu0U6AME1kQMnDwnfMI,36225
|
9
10
|
tamar_model_client/utils.py,sha256=Kn6pFz9GEC96H4eejEax66AkzvsrXI3WCSDtgDjnVTI,5238
|
10
|
-
tamar_model_client/core/__init__.py,sha256=
|
11
|
-
tamar_model_client/core/base_client.py,sha256=
|
12
|
-
tamar_model_client/core/
|
11
|
+
tamar_model_client/core/__init__.py,sha256=bJRJllrp4Xc0g_qu1pW9G-lsXNB7c1r0NBIfb2Ypxe0,832
|
12
|
+
tamar_model_client/core/base_client.py,sha256=sYvJZsDu_66akddAMowSnihFtgOoVKaQJxxnVruF9Ms,8995
|
13
|
+
tamar_model_client/core/http_fallback.py,sha256=1OuSMxzhDyxy07JZa5artMTNdPNMyAhI7By3RUCSPDw,9872
|
14
|
+
tamar_model_client/core/logging_setup.py,sha256=h1aky1uslIQnx4NxMqjoDMxwlc4Vg46KYTjW9yPu2xQ,6032
|
13
15
|
tamar_model_client/core/request_builder.py,sha256=yi8iy2Ps2m4d1YwIFiQLRxTvxQxgEGV576aXnNYRl7E,8507
|
14
16
|
tamar_model_client/core/response_handler.py,sha256=_q5galAT0_RaUT5C_yZsjg-9VnT9CBjmIASOt28BUmQ,4616
|
15
17
|
tamar_model_client/core/utils.py,sha256=8jSx8UOE6ukbiIgruCX7SXN8J5FyuGbqENOmJDsxaSM,5084
|
@@ -25,9 +27,10 @@ tamar_model_client/schemas/inputs.py,sha256=dz1m8NbUIxA99JXZc8WlyzbKpDuz1lEzx3Vg
|
|
25
27
|
tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
|
26
28
|
tests/__init__.py,sha256=kbmImddLDwdqlkkmkyKtl4bQy_ipe-R8eskpaBylU9w,38
|
27
29
|
tests/stream_hanging_analysis.py,sha256=W3W48IhQbNAR6-xvMpoWZvnWOnr56CTaH4-aORNBuD4,14807
|
28
|
-
tests/test_google_azure_final.py,sha256=
|
30
|
+
tests/test_google_azure_final.py,sha256=YFhjx2mQlFijcuHqOVnnS7ZD8mQWCf2Uv1oiqOFxASs,26393
|
31
|
+
tests/test_logging_issue.py,sha256=JTMbotfHpAEPMBj73pOwxPn-Zn4QVQJX6scMz48FRDQ,2427
|
29
32
|
tests/test_simple.py,sha256=Xf0U-J9_xn_LzUsmYu06suK0_7DrPeko8OHoHldsNxE,7169
|
30
|
-
tamar_model_client-0.1.
|
31
|
-
tamar_model_client-0.1.
|
32
|
-
tamar_model_client-0.1.
|
33
|
-
tamar_model_client-0.1.
|
33
|
+
tamar_model_client-0.1.22.dist-info/METADATA,sha256=jEEq8UTzqVcutF26FefqaxZ08WhNW58xiyEWZgDO7WA,23453
|
34
|
+
tamar_model_client-0.1.22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
35
|
+
tamar_model_client-0.1.22.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
|
36
|
+
tamar_model_client-0.1.22.dist-info/RECORD,,
|