tamar-model-client 0.1.17__py3-none-any.whl → 0.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tamar_model_client/__init__.py +4 -0
- tamar_model_client/async_client.py +222 -31
- tamar_model_client/json_formatter.py +26 -0
- tamar_model_client/logging_icons.py +60 -0
- tamar_model_client/sync_client.py +206 -24
- {tamar_model_client-0.1.17.dist-info → tamar_model_client-0.1.19.dist-info}/METADATA +1 -1
- {tamar_model_client-0.1.17.dist-info → tamar_model_client-0.1.19.dist-info}/RECORD +9 -7
- {tamar_model_client-0.1.17.dist-info → tamar_model_client-0.1.19.dist-info}/WHEEL +0 -0
- {tamar_model_client-0.1.17.dist-info → tamar_model_client-0.1.19.dist-info}/top_level.txt +0 -0
tamar_model_client/__init__.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
from .sync_client import TamarModelClient
|
2
2
|
from .async_client import AsyncTamarModelClient
|
3
3
|
from .exceptions import ModelManagerClientError, ConnectionError, ValidationError
|
4
|
+
from .json_formatter import JSONFormatter
|
5
|
+
from . import logging_icons
|
4
6
|
|
5
7
|
__all__ = [
|
6
8
|
"TamarModelClient",
|
@@ -8,4 +10,6 @@ __all__ = [
|
|
8
10
|
"ModelManagerClientError",
|
9
11
|
"ConnectionError",
|
10
12
|
"ValidationError",
|
13
|
+
"JSONFormatter",
|
14
|
+
"logging_icons",
|
11
15
|
]
|
@@ -4,6 +4,7 @@ import base64
|
|
4
4
|
import json
|
5
5
|
import logging
|
6
6
|
import os
|
7
|
+
import time
|
7
8
|
import uuid
|
8
9
|
from contextvars import ContextVar
|
9
10
|
|
@@ -20,6 +21,7 @@ from .schemas import ModelRequest, ModelResponse, BatchModelRequest, BatchModelR
|
|
20
21
|
from .generated import model_service_pb2, model_service_pb2_grpc
|
21
22
|
from .schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput, \
|
22
23
|
GoogleVertexAIImagesInput, OpenAIImagesInput, OpenAIImagesEditInput
|
24
|
+
from .json_formatter import JSONFormatter
|
23
25
|
|
24
26
|
logger = logging.getLogger(__name__)
|
25
27
|
|
@@ -40,8 +42,8 @@ if not logger.hasHandlers():
|
|
40
42
|
# 创建日志处理器,输出到控制台
|
41
43
|
console_handler = logging.StreamHandler()
|
42
44
|
|
43
|
-
#
|
44
|
-
formatter =
|
45
|
+
# 使用 JSON 格式化器
|
46
|
+
formatter = JSONFormatter()
|
45
47
|
console_handler.setFormatter(formatter)
|
46
48
|
|
47
49
|
# 为当前记录器添加处理器
|
@@ -181,26 +183,31 @@ class AsyncTamarModelClient:
|
|
181
183
|
# 对于取消的情况进行指数退避重试
|
182
184
|
if isinstance(e, grpc.aio.AioRpcError) and e.code() == grpc.StatusCode.CANCELLED:
|
183
185
|
retry_count += 1
|
184
|
-
logger.warning(f"
|
186
|
+
logger.warning(f"⚠️ RPC cancelled, retrying {retry_count}/{self.max_retries}...",
|
187
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "error_code": "CANCELLED"}})
|
185
188
|
if retry_count < self.max_retries:
|
186
189
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
187
190
|
await asyncio.sleep(delay)
|
188
191
|
else:
|
189
|
-
logger.error("❌ Max retry reached for CANCELLED"
|
192
|
+
logger.error("❌ Max retry reached for CANCELLED",
|
193
|
+
extra={"log_type": "info", "data": {"error_code": "CANCELLED", "max_retries_reached": True}})
|
190
194
|
raise
|
191
195
|
# 针对其他 RPC 错误类型,如暂时的连接问题、服务器超时等
|
192
196
|
elif isinstance(e, grpc.RpcError) and e.code() in {grpc.StatusCode.UNAVAILABLE,
|
193
197
|
grpc.StatusCode.DEADLINE_EXCEEDED}:
|
194
198
|
retry_count += 1
|
195
|
-
logger.warning(f"
|
199
|
+
logger.warning(f"⚠️ gRPC error {e.code()}, retrying {retry_count}/{self.max_retries}...",
|
200
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "error_code": str(e.code())}})
|
196
201
|
if retry_count < self.max_retries:
|
197
202
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
198
203
|
await asyncio.sleep(delay)
|
199
204
|
else:
|
200
|
-
logger.error(f"❌ Max retry reached for {e.code()}"
|
205
|
+
logger.error(f"❌ Max retry reached for {e.code()}",
|
206
|
+
extra={"log_type": "info", "data": {"error_code": str(e.code()), "max_retries_reached": True}})
|
201
207
|
raise
|
202
208
|
else:
|
203
|
-
logger.error(f"❌ Non-retryable gRPC error: {e}", exc_info=True
|
209
|
+
logger.error(f"❌ Non-retryable gRPC error: {e}", exc_info=True,
|
210
|
+
extra={"log_type": "info", "data": {"error_code": str(e.code()) if hasattr(e, 'code') else None, "retryable": False}})
|
204
211
|
raise
|
205
212
|
|
206
213
|
async def _retry_request_stream(self, func, *args, **kwargs):
|
@@ -212,26 +219,31 @@ class AsyncTamarModelClient:
|
|
212
219
|
# 对于取消的情况进行指数退避重试
|
213
220
|
if isinstance(e, grpc.aio.AioRpcError) and e.code() == grpc.StatusCode.CANCELLED:
|
214
221
|
retry_count += 1
|
215
|
-
logger.warning(f"
|
222
|
+
logger.warning(f"⚠️ RPC cancelled, retrying {retry_count}/{self.max_retries}...",
|
223
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "error_code": "CANCELLED"}})
|
216
224
|
if retry_count < self.max_retries:
|
217
225
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
218
226
|
await asyncio.sleep(delay)
|
219
227
|
else:
|
220
|
-
logger.error("❌ Max retry reached for CANCELLED"
|
228
|
+
logger.error("❌ Max retry reached for CANCELLED",
|
229
|
+
extra={"log_type": "info", "data": {"error_code": "CANCELLED", "max_retries_reached": True}})
|
221
230
|
raise
|
222
231
|
# 针对其他 RPC 错误类型,如暂时的连接问题、服务器超时等
|
223
232
|
elif isinstance(e, grpc.RpcError) and e.code() in {grpc.StatusCode.UNAVAILABLE,
|
224
233
|
grpc.StatusCode.DEADLINE_EXCEEDED}:
|
225
234
|
retry_count += 1
|
226
|
-
logger.warning(f"
|
235
|
+
logger.warning(f"⚠️ gRPC error {e.code()}, retrying {retry_count}/{self.max_retries}...",
|
236
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "error_code": str(e.code())}})
|
227
237
|
if retry_count < self.max_retries:
|
228
238
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
229
239
|
await asyncio.sleep(delay)
|
230
240
|
else:
|
231
|
-
logger.error(f"❌ Max retry reached for {e.code()}"
|
241
|
+
logger.error(f"❌ Max retry reached for {e.code()}",
|
242
|
+
extra={"log_type": "info", "data": {"error_code": str(e.code()), "max_retries_reached": True}})
|
232
243
|
raise
|
233
244
|
else:
|
234
|
-
logger.error(f"❌ Non-retryable gRPC error: {e}", exc_info=True
|
245
|
+
logger.error(f"❌ Non-retryable gRPC error: {e}", exc_info=True,
|
246
|
+
extra={"log_type": "info", "data": {"error_code": str(e.code()) if hasattr(e, 'code') else None, "retryable": False}})
|
235
247
|
raise
|
236
248
|
|
237
249
|
def _build_auth_metadata(self, request_id: str) -> list:
|
@@ -266,32 +278,40 @@ class AsyncTamarModelClient:
|
|
266
278
|
credentials,
|
267
279
|
options=options
|
268
280
|
)
|
269
|
-
logger.info("🔐 Using secure gRPC channel (TLS enabled)"
|
281
|
+
logger.info("🔐 Using secure gRPC channel (TLS enabled)",
|
282
|
+
extra={"log_type": "info", "data": {"tls_enabled": True, "server_address": self.server_address}})
|
270
283
|
else:
|
271
284
|
self.channel = grpc.aio.insecure_channel(
|
272
285
|
self.server_address,
|
273
286
|
options=options
|
274
287
|
)
|
275
|
-
logger.info("🔓 Using insecure gRPC channel (TLS disabled)"
|
288
|
+
logger.info("🔓 Using insecure gRPC channel (TLS disabled)",
|
289
|
+
extra={"log_type": "info", "data": {"tls_enabled": False, "server_address": self.server_address}})
|
276
290
|
await self.channel.channel_ready()
|
277
291
|
self.stub = model_service_pb2_grpc.ModelServiceStub(self.channel)
|
278
|
-
logger.info(f"✅ gRPC channel initialized to {self.server_address}"
|
292
|
+
logger.info(f"✅ gRPC channel initialized to {self.server_address}",
|
293
|
+
extra={"log_type": "info", "data": {"status": "success", "server_address": self.server_address}})
|
279
294
|
return
|
280
295
|
except grpc.FutureTimeoutError as e:
|
281
|
-
logger.error(f"❌ gRPC channel initialization timed out: {str(e)}", exc_info=True
|
296
|
+
logger.error(f"❌ gRPC channel initialization timed out: {str(e)}", exc_info=True,
|
297
|
+
extra={"log_type": "info", "data": {"error_type": "timeout", "server_address": self.server_address}})
|
282
298
|
except grpc.RpcError as e:
|
283
|
-
logger.error(f"❌ gRPC channel initialization failed: {str(e)}", exc_info=True
|
299
|
+
logger.error(f"❌ gRPC channel initialization failed: {str(e)}", exc_info=True,
|
300
|
+
extra={"log_type": "info", "data": {"error_type": "rpc_error", "server_address": self.server_address}})
|
284
301
|
except Exception as e:
|
285
|
-
logger.error(f"❌ Unexpected error during channel initialization: {str(e)}", exc_info=True
|
302
|
+
logger.error(f"❌ Unexpected error during channel initialization: {str(e)}", exc_info=True,
|
303
|
+
extra={"log_type": "info", "data": {"error_type": "unexpected", "server_address": self.server_address}})
|
286
304
|
|
287
305
|
retry_count += 1
|
288
306
|
if retry_count > self.max_retries:
|
289
|
-
logger.error(f"❌ Failed to initialize gRPC channel after {self.max_retries} retries.", exc_info=True
|
307
|
+
logger.error(f"❌ Failed to initialize gRPC channel after {self.max_retries} retries.", exc_info=True,
|
308
|
+
extra={"log_type": "info", "data": {"max_retries_reached": True, "server_address": self.server_address}})
|
290
309
|
raise ConnectionError(f"❌ Failed to initialize gRPC channel after {self.max_retries} retries.")
|
291
310
|
|
292
311
|
# 指数退避:延迟时间 = retry_delay * (2 ^ (retry_count - 1))
|
293
312
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
294
|
-
logger.
|
313
|
+
logger.warning(f"🔄 Retrying connection (attempt {retry_count}/{self.max_retries}) after {delay:.2f}s delay...",
|
314
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "delay": delay}})
|
295
315
|
await asyncio.sleep(delay)
|
296
316
|
|
297
317
|
async def _stream(self, request, metadata, invoke_timeout) -> AsyncIterator[ModelResponse]:
|
@@ -303,6 +323,66 @@ class AsyncTamarModelClient:
|
|
303
323
|
raw_response=json.loads(response.raw_response) if response.raw_response else None,
|
304
324
|
request_id=response.request_id if response.request_id else None,
|
305
325
|
)
|
326
|
+
|
327
|
+
async def _stream_with_logging(self, request, metadata, invoke_timeout, start_time, model_request) -> AsyncIterator[ModelResponse]:
|
328
|
+
"""流式响应的包装器,用于记录完整的响应日志"""
|
329
|
+
total_content = ""
|
330
|
+
final_usage = None
|
331
|
+
error_occurred = None
|
332
|
+
chunk_count = 0
|
333
|
+
|
334
|
+
try:
|
335
|
+
async for response in self._stream(request, metadata, invoke_timeout):
|
336
|
+
chunk_count += 1
|
337
|
+
if response.content:
|
338
|
+
total_content += response.content
|
339
|
+
if response.usage:
|
340
|
+
final_usage = response.usage
|
341
|
+
if response.error:
|
342
|
+
error_occurred = response.error
|
343
|
+
yield response
|
344
|
+
|
345
|
+
# 流式响应完成,记录成功日志
|
346
|
+
duration = time.time() - start_time
|
347
|
+
logger.info(
|
348
|
+
f"✅ Stream completed successfully | chunks: {chunk_count}",
|
349
|
+
extra={
|
350
|
+
"log_type": "response",
|
351
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
352
|
+
"duration": duration,
|
353
|
+
"data": {
|
354
|
+
"provider": model_request.provider.value,
|
355
|
+
"invoke_type": model_request.invoke_type.value,
|
356
|
+
"model": model_request.model,
|
357
|
+
"stream": True,
|
358
|
+
"chunks_count": chunk_count,
|
359
|
+
"total_length": len(total_content),
|
360
|
+
"usage": final_usage
|
361
|
+
}
|
362
|
+
}
|
363
|
+
)
|
364
|
+
except Exception as e:
|
365
|
+
# 流式响应出错,记录错误日志
|
366
|
+
duration = time.time() - start_time
|
367
|
+
logger.error(
|
368
|
+
f"❌ Stream failed after {chunk_count} chunks: {str(e)}",
|
369
|
+
exc_info=True,
|
370
|
+
extra={
|
371
|
+
"log_type": "response",
|
372
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
373
|
+
"duration": duration,
|
374
|
+
"data": {
|
375
|
+
"provider": model_request.provider.value,
|
376
|
+
"invoke_type": model_request.invoke_type.value,
|
377
|
+
"model": model_request.model,
|
378
|
+
"stream": True,
|
379
|
+
"chunks_count": chunk_count,
|
380
|
+
"error_type": type(e).__name__,
|
381
|
+
"partial_content_length": len(total_content)
|
382
|
+
}
|
383
|
+
}
|
384
|
+
)
|
385
|
+
raise
|
306
386
|
|
307
387
|
async def _invoke_request(self, request, metadata, invoke_timeout):
|
308
388
|
async for response in self.stub.Invoke(request, metadata=metadata, timeout=invoke_timeout):
|
@@ -345,8 +425,22 @@ class AsyncTamarModelClient:
|
|
345
425
|
metadata = self._build_auth_metadata(request_id) # 将 request_id 加入到请求头
|
346
426
|
|
347
427
|
# 记录开始日志
|
428
|
+
start_time = time.time()
|
348
429
|
logger.info(
|
349
|
-
f"🔵 Request Start | request_id: {request_id} | provider: {model_request.provider} | invoke_type: {model_request.invoke_type}"
|
430
|
+
f"🔵 Request Start | request_id: {request_id} | provider: {model_request.provider} | invoke_type: {model_request.invoke_type}",
|
431
|
+
extra={
|
432
|
+
"log_type": "request",
|
433
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
434
|
+
"data": {
|
435
|
+
"provider": model_request.provider.value,
|
436
|
+
"invoke_type": model_request.invoke_type.value,
|
437
|
+
"model": model_request.model,
|
438
|
+
"stream": model_request.stream,
|
439
|
+
"org_id": model_request.user_context.org_id,
|
440
|
+
"user_id": model_request.user_context.user_id,
|
441
|
+
"client_type": model_request.user_context.client_type
|
442
|
+
}
|
443
|
+
})
|
350
444
|
|
351
445
|
# 动态根据 provider/invoke_type 决定使用哪个 input 字段
|
352
446
|
try:
|
@@ -404,16 +498,63 @@ class AsyncTamarModelClient:
|
|
404
498
|
try:
|
405
499
|
invoke_timeout = timeout or self.default_invoke_timeout
|
406
500
|
if model_request.stream:
|
407
|
-
|
501
|
+
# 对于流式响应,使用带日志记录的包装器
|
502
|
+
stream_generator = await self._retry_request_stream(self._stream, request, metadata, invoke_timeout)
|
503
|
+
return self._stream_with_logging(request, metadata, invoke_timeout, start_time, model_request)
|
408
504
|
else:
|
409
|
-
|
505
|
+
result = await self._retry_request(self._invoke_request, request, metadata, invoke_timeout)
|
506
|
+
|
507
|
+
# 记录非流式响应的成功日志
|
508
|
+
duration = time.time() - start_time
|
509
|
+
logger.info(
|
510
|
+
f"✅ Request completed successfully",
|
511
|
+
extra={
|
512
|
+
"log_type": "response",
|
513
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
514
|
+
"duration": duration,
|
515
|
+
"data": {
|
516
|
+
"provider": model_request.provider.value,
|
517
|
+
"invoke_type": model_request.invoke_type.value,
|
518
|
+
"model": model_request.model,
|
519
|
+
"stream": False,
|
520
|
+
"content_length": len(result.content) if result.content else 0,
|
521
|
+
"usage": result.usage
|
522
|
+
}
|
523
|
+
}
|
524
|
+
)
|
525
|
+
return result
|
410
526
|
except grpc.RpcError as e:
|
527
|
+
duration = time.time() - start_time
|
411
528
|
error_message = f"❌ Invoke gRPC failed: {str(e)}"
|
412
|
-
logger.error(error_message, exc_info=True
|
529
|
+
logger.error(error_message, exc_info=True,
|
530
|
+
extra={
|
531
|
+
"log_type": "response",
|
532
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
533
|
+
"duration": duration,
|
534
|
+
"data": {
|
535
|
+
"error_type": "grpc_error",
|
536
|
+
"error_code": str(e.code()) if hasattr(e, 'code') else None,
|
537
|
+
"provider": model_request.provider.value,
|
538
|
+
"invoke_type": model_request.invoke_type.value,
|
539
|
+
"model": model_request.model
|
540
|
+
}
|
541
|
+
})
|
413
542
|
raise e
|
414
543
|
except Exception as e:
|
544
|
+
duration = time.time() - start_time
|
415
545
|
error_message = f"❌ Invoke other error: {str(e)}"
|
416
|
-
logger.error(error_message, exc_info=True
|
546
|
+
logger.error(error_message, exc_info=True,
|
547
|
+
extra={
|
548
|
+
"log_type": "response",
|
549
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
550
|
+
"duration": duration,
|
551
|
+
"data": {
|
552
|
+
"error_type": "other_error",
|
553
|
+
"provider": model_request.provider.value,
|
554
|
+
"invoke_type": model_request.invoke_type.value,
|
555
|
+
"model": model_request.model
|
556
|
+
}
|
557
|
+
})
|
417
558
|
raise e
|
418
559
|
|
419
560
|
async def invoke_batch(self, batch_request_model: BatchModelRequest, timeout: Optional[float] = None,
|
@@ -444,8 +585,19 @@ class AsyncTamarModelClient:
|
|
444
585
|
metadata = self._build_auth_metadata(request_id) # 将 request_id 加入到请求头
|
445
586
|
|
446
587
|
# 记录开始日志
|
588
|
+
start_time = time.time()
|
447
589
|
logger.info(
|
448
|
-
f"🔵 Batch Request Start | request_id: {request_id} | batch_size: {len(batch_request_model.items)}"
|
590
|
+
f"🔵 Batch Request Start | request_id: {request_id} | batch_size: {len(batch_request_model.items)}",
|
591
|
+
extra={
|
592
|
+
"log_type": "request",
|
593
|
+
"uri": "/batch_invoke",
|
594
|
+
"data": {
|
595
|
+
"batch_size": len(batch_request_model.items),
|
596
|
+
"org_id": batch_request_model.user_context.org_id,
|
597
|
+
"user_id": batch_request_model.user_context.user_id,
|
598
|
+
"client_type": batch_request_model.user_context.client_type
|
599
|
+
}
|
600
|
+
})
|
449
601
|
|
450
602
|
# 构造批量请求
|
451
603
|
items = []
|
@@ -520,17 +672,54 @@ class AsyncTamarModelClient:
|
|
520
672
|
error=res_item.error or None,
|
521
673
|
custom_id=res_item.custom_id if res_item.custom_id else None
|
522
674
|
))
|
523
|
-
|
675
|
+
batch_response = BatchModelResponse(
|
524
676
|
request_id=response.request_id if response.request_id else None,
|
525
677
|
responses=result
|
526
678
|
)
|
679
|
+
|
680
|
+
# 记录成功日志
|
681
|
+
duration = time.time() - start_time
|
682
|
+
logger.info(
|
683
|
+
f"✅ Batch request completed successfully",
|
684
|
+
extra={
|
685
|
+
"log_type": "response",
|
686
|
+
"uri": "/batch_invoke",
|
687
|
+
"duration": duration,
|
688
|
+
"data": {
|
689
|
+
"batch_size": len(batch_request_model.items),
|
690
|
+
"responses_count": len(result)
|
691
|
+
}
|
692
|
+
}
|
693
|
+
)
|
694
|
+
return batch_response
|
527
695
|
except grpc.RpcError as e:
|
696
|
+
duration = time.time() - start_time
|
528
697
|
error_message = f"❌ BatchInvoke gRPC failed: {str(e)}"
|
529
|
-
logger.error(error_message, exc_info=True
|
698
|
+
logger.error(error_message, exc_info=True,
|
699
|
+
extra={
|
700
|
+
"log_type": "response",
|
701
|
+
"uri": "/batch_invoke",
|
702
|
+
"duration": duration,
|
703
|
+
"data": {
|
704
|
+
"error_type": "grpc_error",
|
705
|
+
"error_code": str(e.code()) if hasattr(e, 'code') else None,
|
706
|
+
"batch_size": len(batch_request_model.items)
|
707
|
+
}
|
708
|
+
})
|
530
709
|
raise e
|
531
710
|
except Exception as e:
|
711
|
+
duration = time.time() - start_time
|
532
712
|
error_message = f"❌ BatchInvoke other error: {str(e)}"
|
533
|
-
logger.error(error_message, exc_info=True
|
713
|
+
logger.error(error_message, exc_info=True,
|
714
|
+
extra={
|
715
|
+
"log_type": "response",
|
716
|
+
"uri": "/batch_invoke",
|
717
|
+
"duration": duration,
|
718
|
+
"data": {
|
719
|
+
"error_type": "other_error",
|
720
|
+
"batch_size": len(batch_request_model.items)
|
721
|
+
}
|
722
|
+
})
|
534
723
|
raise e
|
535
724
|
|
536
725
|
async def close(self):
|
@@ -538,7 +727,8 @@ class AsyncTamarModelClient:
|
|
538
727
|
if self.channel and not self._closed:
|
539
728
|
await self.channel.close()
|
540
729
|
self._closed = True
|
541
|
-
logger.info("✅ gRPC channel closed"
|
730
|
+
logger.info("✅ gRPC channel closed",
|
731
|
+
extra={"log_type": "info", "data": {"status": "success"}})
|
542
732
|
|
543
733
|
def _safe_sync_close(self):
|
544
734
|
"""进程退出时自动关闭 channel(事件循环处理兼容)"""
|
@@ -550,7 +740,8 @@ class AsyncTamarModelClient:
|
|
550
740
|
else:
|
551
741
|
loop.run_until_complete(self.close())
|
552
742
|
except Exception as e:
|
553
|
-
logger.warning(f"
|
743
|
+
logger.warning(f"⚠️ gRPC channel close failed at exit: {e}",
|
744
|
+
extra={"log_type": "info", "data": {"status": "failed", "error": str(e)}})
|
554
745
|
|
555
746
|
async def __aenter__(self):
|
556
747
|
"""支持 async with 自动初始化连接"""
|
@@ -0,0 +1,26 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
from datetime import datetime
|
4
|
+
|
5
|
+
|
6
|
+
class JSONFormatter(logging.Formatter):
|
7
|
+
def format(self, record):
|
8
|
+
# log_type 只能是 request、response 或 info
|
9
|
+
log_type = getattr(record, "log_type", "info")
|
10
|
+
if log_type not in ["request", "response", "info"]:
|
11
|
+
log_type = "info"
|
12
|
+
|
13
|
+
log_data = {
|
14
|
+
"timestamp": datetime.fromtimestamp(record.created).isoformat(),
|
15
|
+
"level": record.levelname,
|
16
|
+
"type": log_type,
|
17
|
+
"uri": getattr(record, "uri", None),
|
18
|
+
"request_id": getattr(record, "request_id", None),
|
19
|
+
"data": getattr(record, "data", None),
|
20
|
+
"message": record.getMessage(),
|
21
|
+
"duration": getattr(record, "duration", None),
|
22
|
+
}
|
23
|
+
# 增加 trace 支持
|
24
|
+
if hasattr(record, "trace"):
|
25
|
+
log_data["trace"] = getattr(record, "trace")
|
26
|
+
return json.dumps(log_data, ensure_ascii=False)
|
@@ -0,0 +1,60 @@
|
|
1
|
+
"""
|
2
|
+
日志图标规范
|
3
|
+
|
4
|
+
本模块定义了统一的日志图标标准,确保整个项目中日志消息的视觉一致性。
|
5
|
+
"""
|
6
|
+
|
7
|
+
# 请求生命周期图标
|
8
|
+
REQUEST_START = "🔵" # 请求开始
|
9
|
+
RESPONSE_SUCCESS = "✅" # 响应成功
|
10
|
+
RESPONSE_ERROR = "❌" # 响应错误
|
11
|
+
|
12
|
+
# 连接和网络图标
|
13
|
+
SECURE_CONNECTION = "🔐" # 安全连接 (TLS)
|
14
|
+
INSECURE_CONNECTION = "🔓" # 不安全连接 (无TLS)
|
15
|
+
CONNECTION_SUCCESS = "✅" # 连接成功
|
16
|
+
CONNECTION_RETRY = "🔄" # 连接重试
|
17
|
+
CONNECTION_ERROR = "❌" # 连接错误
|
18
|
+
|
19
|
+
# 操作状态图标
|
20
|
+
SUCCESS = "✅" # 成功
|
21
|
+
ERROR = "❌" # 错误
|
22
|
+
WARNING = "⚠️" # 警告
|
23
|
+
INFO = "ℹ️" # 信息
|
24
|
+
RETRY = "🔄" # 重试
|
25
|
+
PROCESSING = "⚙️" # 处理中
|
26
|
+
|
27
|
+
# 流式响应图标
|
28
|
+
STREAM_SUCCESS = "✅" # 流完成
|
29
|
+
STREAM_ERROR = "❌" # 流错误
|
30
|
+
STREAM_CHUNK = "📦" # 流数据块
|
31
|
+
|
32
|
+
# 批量操作图标
|
33
|
+
BATCH_START = "🔵" # 批量开始
|
34
|
+
BATCH_SUCCESS = "✅" # 批量成功
|
35
|
+
BATCH_ERROR = "❌" # 批量错误
|
36
|
+
|
37
|
+
# 系统操作图标
|
38
|
+
INIT = "🚀" # 初始化
|
39
|
+
CLOSE = "🔚" # 关闭
|
40
|
+
CLEANUP = "🧹" # 清理
|
41
|
+
|
42
|
+
def get_icon_for_log_type(log_type: str, is_success: bool = True) -> str:
|
43
|
+
"""
|
44
|
+
根据日志类型和状态获取合适的图标
|
45
|
+
|
46
|
+
Args:
|
47
|
+
log_type: 日志类型 (request, response, info)
|
48
|
+
is_success: 是否成功
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
对应的图标字符串
|
52
|
+
"""
|
53
|
+
if log_type == "request":
|
54
|
+
return REQUEST_START
|
55
|
+
elif log_type == "response":
|
56
|
+
return RESPONSE_SUCCESS if is_success else RESPONSE_ERROR
|
57
|
+
elif log_type == "info":
|
58
|
+
return INFO if is_success else WARNING
|
59
|
+
else:
|
60
|
+
return INFO
|
@@ -18,6 +18,7 @@ from .generated import model_service_pb2, model_service_pb2_grpc
|
|
18
18
|
from .schemas import BatchModelResponse, ModelResponse
|
19
19
|
from .schemas.inputs import GoogleGenAiInput, GoogleVertexAIImagesInput, OpenAIResponsesInput, \
|
20
20
|
OpenAIChatCompletionsInput, OpenAIImagesInput, OpenAIImagesEditInput, BatchModelRequest, ModelRequest
|
21
|
+
from .json_formatter import JSONFormatter
|
21
22
|
|
22
23
|
logger = logging.getLogger(__name__)
|
23
24
|
|
@@ -37,8 +38,8 @@ if not logger.hasHandlers():
|
|
37
38
|
# 创建日志处理器,输出到控制台
|
38
39
|
console_handler = logging.StreamHandler()
|
39
40
|
|
40
|
-
#
|
41
|
-
formatter =
|
41
|
+
# 使用 JSON 格式化器
|
42
|
+
formatter = JSONFormatter()
|
42
43
|
console_handler.setFormatter(formatter)
|
43
44
|
|
44
45
|
# 为当前记录器添加处理器
|
@@ -175,15 +176,18 @@ class TamarModelClient:
|
|
175
176
|
except (grpc.RpcError) as e:
|
176
177
|
if e.code() in {grpc.StatusCode.UNAVAILABLE, grpc.StatusCode.DEADLINE_EXCEEDED}:
|
177
178
|
retry_count += 1
|
178
|
-
logger.
|
179
|
+
logger.warning(f"⚠️ gRPC error {e.code()}, retrying {retry_count}/{self.max_retries}...",
|
180
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "error_code": str(e.code())}})
|
179
181
|
if retry_count < self.max_retries:
|
180
182
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
181
183
|
time.sleep(delay)
|
182
184
|
else:
|
183
|
-
logger.error(f"❌ Max retry reached for {e.code()}"
|
185
|
+
logger.error(f"❌ Max retry reached for {e.code()}",
|
186
|
+
extra={"log_type": "info", "data": {"error_code": str(e.code()), "max_retries_reached": True}})
|
184
187
|
raise
|
185
188
|
else:
|
186
|
-
logger.error(f"❌ Non-retryable gRPC error: {e}", exc_info=True
|
189
|
+
logger.error(f"❌ Non-retryable gRPC error: {e}", exc_info=True,
|
190
|
+
extra={"log_type": "info", "data": {"error_code": str(e.code()) if hasattr(e, 'code') else None, "retryable": False}})
|
187
191
|
raise
|
188
192
|
|
189
193
|
def _build_auth_metadata(self, request_id: str) -> list:
|
@@ -216,35 +220,43 @@ class TamarModelClient:
|
|
216
220
|
credentials,
|
217
221
|
options=options
|
218
222
|
)
|
219
|
-
logger.info("🔐 Using secure gRPC channel (TLS enabled)"
|
223
|
+
logger.info("🔐 Using secure gRPC channel (TLS enabled)",
|
224
|
+
extra={"log_type": "info", "data": {"tls_enabled": True, "server_address": self.server_address}})
|
220
225
|
else:
|
221
226
|
self.channel = grpc.insecure_channel(
|
222
227
|
self.server_address,
|
223
228
|
options=options
|
224
229
|
)
|
225
|
-
logger.info("🔓 Using insecure gRPC channel (TLS disabled)"
|
230
|
+
logger.info("🔓 Using insecure gRPC channel (TLS disabled)",
|
231
|
+
extra={"log_type": "info", "data": {"tls_enabled": False, "server_address": self.server_address}})
|
226
232
|
|
227
233
|
# Wait for the channel to be ready (synchronously)
|
228
234
|
grpc.channel_ready_future(self.channel).result() # This is blocking in sync mode
|
229
235
|
|
230
236
|
self.stub = model_service_pb2_grpc.ModelServiceStub(self.channel)
|
231
|
-
logger.info(f"✅ gRPC channel initialized to {self.server_address}"
|
237
|
+
logger.info(f"✅ gRPC channel initialized to {self.server_address}",
|
238
|
+
extra={"log_type": "info", "data": {"status": "success", "server_address": self.server_address}})
|
232
239
|
return
|
233
240
|
except grpc.FutureTimeoutError as e:
|
234
|
-
logger.error(f"❌ gRPC channel initialization timed out: {str(e)}", exc_info=True
|
241
|
+
logger.error(f"❌ gRPC channel initialization timed out: {str(e)}", exc_info=True,
|
242
|
+
extra={"log_type": "info", "data": {"error_type": "timeout", "server_address": self.server_address}})
|
235
243
|
except grpc.RpcError as e:
|
236
|
-
logger.error(f"❌ gRPC channel initialization failed: {str(e)}", exc_info=True
|
244
|
+
logger.error(f"❌ gRPC channel initialization failed: {str(e)}", exc_info=True,
|
245
|
+
extra={"log_type": "info", "data": {"error_type": "rpc_error", "server_address": self.server_address}})
|
237
246
|
except Exception as e:
|
238
|
-
logger.error(f"❌ Unexpected error during channel initialization: {str(e)}", exc_info=True
|
247
|
+
logger.error(f"❌ Unexpected error during channel initialization: {str(e)}", exc_info=True,
|
248
|
+
extra={"log_type": "info", "data": {"error_type": "unexpected", "server_address": self.server_address}})
|
239
249
|
|
240
250
|
retry_count += 1
|
241
251
|
if retry_count > self.max_retries:
|
242
|
-
logger.error(f"❌ Failed to initialize gRPC channel after {self.max_retries} retries.", exc_info=True
|
252
|
+
logger.error(f"❌ Failed to initialize gRPC channel after {self.max_retries} retries.", exc_info=True,
|
253
|
+
extra={"log_type": "info", "data": {"max_retries_reached": True, "server_address": self.server_address}})
|
243
254
|
raise ConnectionError(f"❌ Failed to initialize gRPC channel after {self.max_retries} retries.")
|
244
255
|
|
245
256
|
# 指数退避:延迟时间 = retry_delay * (2 ^ (retry_count - 1))
|
246
257
|
delay = self.retry_delay * (2 ** (retry_count - 1))
|
247
|
-
logger.
|
258
|
+
logger.warning(f"🔄 Retrying connection (attempt {retry_count}/{self.max_retries}) after {delay:.2f}s delay...",
|
259
|
+
extra={"log_type": "info", "data": {"retry_count": retry_count, "max_retries": self.max_retries, "delay": delay}})
|
248
260
|
time.sleep(delay) # Blocking sleep in sync version
|
249
261
|
|
250
262
|
def _stream(self, request, metadata, invoke_timeout) -> Iterator[ModelResponse]:
|
@@ -256,6 +268,66 @@ class TamarModelClient:
|
|
256
268
|
raw_response=json.loads(response.raw_response) if response.raw_response else None,
|
257
269
|
request_id=response.request_id if response.request_id else None,
|
258
270
|
)
|
271
|
+
|
272
|
+
def _stream_with_logging(self, request, metadata, invoke_timeout, start_time, model_request) -> Iterator[ModelResponse]:
|
273
|
+
"""流式响应的包装器,用于记录完整的响应日志"""
|
274
|
+
total_content = ""
|
275
|
+
final_usage = None
|
276
|
+
error_occurred = None
|
277
|
+
chunk_count = 0
|
278
|
+
|
279
|
+
try:
|
280
|
+
for response in self._stream(request, metadata, invoke_timeout):
|
281
|
+
chunk_count += 1
|
282
|
+
if response.content:
|
283
|
+
total_content += response.content
|
284
|
+
if response.usage:
|
285
|
+
final_usage = response.usage
|
286
|
+
if response.error:
|
287
|
+
error_occurred = response.error
|
288
|
+
yield response
|
289
|
+
|
290
|
+
# 流式响应完成,记录成功日志
|
291
|
+
duration = time.time() - start_time
|
292
|
+
logger.info(
|
293
|
+
f"✅ Stream completed successfully | chunks: {chunk_count}",
|
294
|
+
extra={
|
295
|
+
"log_type": "response",
|
296
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
297
|
+
"duration": duration,
|
298
|
+
"data": {
|
299
|
+
"provider": model_request.provider.value,
|
300
|
+
"invoke_type": model_request.invoke_type.value,
|
301
|
+
"model": model_request.model,
|
302
|
+
"stream": True,
|
303
|
+
"chunks_count": chunk_count,
|
304
|
+
"total_length": len(total_content),
|
305
|
+
"usage": final_usage
|
306
|
+
}
|
307
|
+
}
|
308
|
+
)
|
309
|
+
except Exception as e:
|
310
|
+
# 流式响应出错,记录错误日志
|
311
|
+
duration = time.time() - start_time
|
312
|
+
logger.error(
|
313
|
+
f"❌ Stream failed after {chunk_count} chunks: {str(e)}",
|
314
|
+
exc_info=True,
|
315
|
+
extra={
|
316
|
+
"log_type": "response",
|
317
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
318
|
+
"duration": duration,
|
319
|
+
"data": {
|
320
|
+
"provider": model_request.provider.value,
|
321
|
+
"invoke_type": model_request.invoke_type.value,
|
322
|
+
"model": model_request.model,
|
323
|
+
"stream": True,
|
324
|
+
"chunks_count": chunk_count,
|
325
|
+
"error_type": type(e).__name__,
|
326
|
+
"partial_content_length": len(total_content)
|
327
|
+
}
|
328
|
+
}
|
329
|
+
)
|
330
|
+
raise
|
259
331
|
|
260
332
|
def _invoke_request(self, request, metadata, invoke_timeout):
|
261
333
|
response = self.stub.Invoke(request, metadata=metadata, timeout=invoke_timeout)
|
@@ -298,8 +370,22 @@ class TamarModelClient:
|
|
298
370
|
metadata = self._build_auth_metadata(request_id) # 将 request_id 加入到请求头
|
299
371
|
|
300
372
|
# 记录开始日志
|
373
|
+
start_time = time.time()
|
301
374
|
logger.info(
|
302
|
-
f"🔵 Request Start |provider: {model_request.provider} | invoke_type: {model_request.invoke_type}"
|
375
|
+
f"🔵 Request Start | request_id: {request_id} | provider: {model_request.provider} | invoke_type: {model_request.invoke_type}",
|
376
|
+
extra={
|
377
|
+
"log_type": "request",
|
378
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
379
|
+
"data": {
|
380
|
+
"provider": model_request.provider.value,
|
381
|
+
"invoke_type": model_request.invoke_type.value,
|
382
|
+
"model": model_request.model,
|
383
|
+
"stream": model_request.stream,
|
384
|
+
"org_id": model_request.user_context.org_id,
|
385
|
+
"user_id": model_request.user_context.user_id,
|
386
|
+
"client_type": model_request.user_context.client_type
|
387
|
+
}
|
388
|
+
})
|
303
389
|
|
304
390
|
# 动态根据 provider/invoke_type 决定使用哪个 input 字段
|
305
391
|
try:
|
@@ -357,16 +443,62 @@ class TamarModelClient:
|
|
357
443
|
try:
|
358
444
|
invoke_timeout = timeout or self.default_invoke_timeout
|
359
445
|
if model_request.stream:
|
360
|
-
|
446
|
+
# 对于流式响应,使用带日志记录的包装器
|
447
|
+
return self._stream_with_logging(request, metadata, invoke_timeout, start_time, model_request)
|
361
448
|
else:
|
362
|
-
|
449
|
+
result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout)
|
450
|
+
|
451
|
+
# 记录非流式响应的成功日志
|
452
|
+
duration = time.time() - start_time
|
453
|
+
logger.info(
|
454
|
+
f"✅ Request completed successfully",
|
455
|
+
extra={
|
456
|
+
"log_type": "response",
|
457
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
458
|
+
"duration": duration,
|
459
|
+
"data": {
|
460
|
+
"provider": model_request.provider.value,
|
461
|
+
"invoke_type": model_request.invoke_type.value,
|
462
|
+
"model": model_request.model,
|
463
|
+
"stream": False,
|
464
|
+
"content_length": len(result.content) if result.content else 0,
|
465
|
+
"usage": result.usage
|
466
|
+
}
|
467
|
+
}
|
468
|
+
)
|
469
|
+
return result
|
363
470
|
except grpc.RpcError as e:
|
471
|
+
duration = time.time() - start_time
|
364
472
|
error_message = f"❌ Invoke gRPC failed: {str(e)}"
|
365
|
-
logger.error(error_message, exc_info=True
|
473
|
+
logger.error(error_message, exc_info=True,
|
474
|
+
extra={
|
475
|
+
"log_type": "response",
|
476
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
477
|
+
"duration": duration,
|
478
|
+
"data": {
|
479
|
+
"error_type": "grpc_error",
|
480
|
+
"error_code": str(e.code()) if hasattr(e, 'code') else None,
|
481
|
+
"provider": model_request.provider.value,
|
482
|
+
"invoke_type": model_request.invoke_type.value,
|
483
|
+
"model": model_request.model
|
484
|
+
}
|
485
|
+
})
|
366
486
|
raise e
|
367
487
|
except Exception as e:
|
488
|
+
duration = time.time() - start_time
|
368
489
|
error_message = f"❌ Invoke other error: {str(e)}"
|
369
|
-
logger.error(error_message, exc_info=True
|
490
|
+
logger.error(error_message, exc_info=True,
|
491
|
+
extra={
|
492
|
+
"log_type": "response",
|
493
|
+
"uri": f"/invoke/{model_request.provider.value}/{model_request.invoke_type.value}",
|
494
|
+
"duration": duration,
|
495
|
+
"data": {
|
496
|
+
"error_type": "other_error",
|
497
|
+
"provider": model_request.provider.value,
|
498
|
+
"invoke_type": model_request.invoke_type.value,
|
499
|
+
"model": model_request.model
|
500
|
+
}
|
501
|
+
})
|
370
502
|
raise e
|
371
503
|
|
372
504
|
def invoke_batch(self, batch_request_model: BatchModelRequest, timeout: Optional[float] = None,
|
@@ -396,8 +528,19 @@ class TamarModelClient:
|
|
396
528
|
metadata = self._build_auth_metadata(request_id) # 将 request_id 加入到请求头
|
397
529
|
|
398
530
|
# 记录开始日志
|
531
|
+
start_time = time.time()
|
399
532
|
logger.info(
|
400
|
-
f"🔵 Batch Request Start | batch_size: {len(batch_request_model.items)}"
|
533
|
+
f"🔵 Batch Request Start | request_id: {request_id} | batch_size: {len(batch_request_model.items)}",
|
534
|
+
extra={
|
535
|
+
"log_type": "request",
|
536
|
+
"uri": "/batch_invoke",
|
537
|
+
"data": {
|
538
|
+
"batch_size": len(batch_request_model.items),
|
539
|
+
"org_id": batch_request_model.user_context.org_id,
|
540
|
+
"user_id": batch_request_model.user_context.user_id,
|
541
|
+
"client_type": batch_request_model.user_context.client_type
|
542
|
+
}
|
543
|
+
})
|
401
544
|
|
402
545
|
# 构造批量请求
|
403
546
|
items = []
|
@@ -472,17 +615,54 @@ class TamarModelClient:
|
|
472
615
|
error=res_item.error or None,
|
473
616
|
custom_id=res_item.custom_id if res_item.custom_id else None
|
474
617
|
))
|
475
|
-
|
618
|
+
batch_response = BatchModelResponse(
|
476
619
|
request_id=response.request_id if response.request_id else None,
|
477
620
|
responses=result
|
478
621
|
)
|
622
|
+
|
623
|
+
# 记录成功日志
|
624
|
+
duration = time.time() - start_time
|
625
|
+
logger.info(
|
626
|
+
f"✅ Batch request completed successfully",
|
627
|
+
extra={
|
628
|
+
"log_type": "response",
|
629
|
+
"uri": "/batch_invoke",
|
630
|
+
"duration": duration,
|
631
|
+
"data": {
|
632
|
+
"batch_size": len(batch_request_model.items),
|
633
|
+
"responses_count": len(result)
|
634
|
+
}
|
635
|
+
}
|
636
|
+
)
|
637
|
+
return batch_response
|
479
638
|
except grpc.RpcError as e:
|
639
|
+
duration = time.time() - start_time
|
480
640
|
error_message = f"❌ BatchInvoke gRPC failed: {str(e)}"
|
481
|
-
logger.error(error_message, exc_info=True
|
641
|
+
logger.error(error_message, exc_info=True,
|
642
|
+
extra={
|
643
|
+
"log_type": "response",
|
644
|
+
"uri": "/batch_invoke",
|
645
|
+
"duration": duration,
|
646
|
+
"data": {
|
647
|
+
"error_type": "grpc_error",
|
648
|
+
"error_code": str(e.code()) if hasattr(e, 'code') else None,
|
649
|
+
"batch_size": len(batch_request_model.items)
|
650
|
+
}
|
651
|
+
})
|
482
652
|
raise e
|
483
653
|
except Exception as e:
|
654
|
+
duration = time.time() - start_time
|
484
655
|
error_message = f"❌ BatchInvoke other error: {str(e)}"
|
485
|
-
logger.error(error_message, exc_info=True
|
656
|
+
logger.error(error_message, exc_info=True,
|
657
|
+
extra={
|
658
|
+
"log_type": "response",
|
659
|
+
"uri": "/batch_invoke",
|
660
|
+
"duration": duration,
|
661
|
+
"data": {
|
662
|
+
"error_type": "other_error",
|
663
|
+
"batch_size": len(batch_request_model.items)
|
664
|
+
}
|
665
|
+
})
|
486
666
|
raise e
|
487
667
|
|
488
668
|
def close(self):
|
@@ -490,7 +670,8 @@ class TamarModelClient:
|
|
490
670
|
if self.channel and not self._closed:
|
491
671
|
self.channel.close()
|
492
672
|
self._closed = True
|
493
|
-
logger.info("✅ gRPC channel closed"
|
673
|
+
logger.info("✅ gRPC channel closed",
|
674
|
+
extra={"log_type": "info", "data": {"status": "success"}})
|
494
675
|
|
495
676
|
def _safe_sync_close(self):
|
496
677
|
"""进程退出时自动关闭 channel(事件循环处理兼容)"""
|
@@ -498,7 +679,8 @@ class TamarModelClient:
|
|
498
679
|
try:
|
499
680
|
self.close() # 直接调用关闭方法
|
500
681
|
except Exception as e:
|
501
|
-
logger.
|
682
|
+
logger.warning(f"⚠️ gRPC channel close failed at exit: {e}",
|
683
|
+
extra={"log_type": "info", "data": {"status": "failed", "error": str(e)}})
|
502
684
|
|
503
685
|
def __enter__(self):
|
504
686
|
"""同步初始化连接"""
|
@@ -1,8 +1,10 @@
|
|
1
|
-
tamar_model_client/__init__.py,sha256=
|
2
|
-
tamar_model_client/async_client.py,sha256=
|
1
|
+
tamar_model_client/__init__.py,sha256=4DEIUGlLTeiaECjJQbGYik7C0JO6hHwwfbLYpYpMdzg,444
|
2
|
+
tamar_model_client/async_client.py,sha256=PaI0a-hSkp7s99Aib-eL2-Ouh8d3A4E-L6EdSsfeE3w,36843
|
3
3
|
tamar_model_client/auth.py,sha256=gbwW5Aakeb49PMbmYvrYlVx1mfyn1LEDJ4qQVs-9DA4,438
|
4
4
|
tamar_model_client/exceptions.py,sha256=jYU494OU_NeIa4X393V-Y73mTNm0JZ9yZApnlOM9CJQ,332
|
5
|
-
tamar_model_client/
|
5
|
+
tamar_model_client/json_formatter.py,sha256=9iO4Qn7FiyPTjcn07uHuP4q80upVlmqI_P1UV12YPxI,991
|
6
|
+
tamar_model_client/logging_icons.py,sha256=MRTZ1Xvkep9ce_jdltj54_XZUXvIpQ95soRNmLdJ4qw,1837
|
7
|
+
tamar_model_client/sync_client.py,sha256=FXdBeAqCHuLDPKzOyLMsPi33ibbNFLf9hWYfP1NFHuw,32507
|
6
8
|
tamar_model_client/utils.py,sha256=Kn6pFz9GEC96H4eejEax66AkzvsrXI3WCSDtgDjnVTI,5238
|
7
9
|
tamar_model_client/enums/__init__.py,sha256=3cYYn8ztNGBa_pI_5JGRVYf2QX8fkBVWdjID1PLvoBQ,182
|
8
10
|
tamar_model_client/enums/channel.py,sha256=wCzX579nNpTtwzGeS6S3Ls0UzVAgsOlfy4fXMzQTCAw,199
|
@@ -14,7 +16,7 @@ tamar_model_client/generated/model_service_pb2_grpc.py,sha256=k4tIbp3XBxdyuOVR18
|
|
14
16
|
tamar_model_client/schemas/__init__.py,sha256=AxuI-TcvA4OMTj2FtK4wAItvz9LrK_293pu3cmMLE7k,394
|
15
17
|
tamar_model_client/schemas/inputs.py,sha256=dz1m8NbUIxA99JXZc8WlyzbKpDuz1lEzx3VghC33zYI,14625
|
16
18
|
tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
|
17
|
-
tamar_model_client-0.1.
|
18
|
-
tamar_model_client-0.1.
|
19
|
-
tamar_model_client-0.1.
|
20
|
-
tamar_model_client-0.1.
|
19
|
+
tamar_model_client-0.1.19.dist-info/METADATA,sha256=-eCR9u_ULva97BJ0GwuaAuwEL7xyXGlmD2TwQUNSMxw,16562
|
20
|
+
tamar_model_client-0.1.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
21
|
+
tamar_model_client-0.1.19.dist-info/top_level.txt,sha256=_LfDhPv_fvON0PoZgQuo4M7EjoWtxPRoQOBJziJmip8,19
|
22
|
+
tamar_model_client-0.1.19.dist-info/RECORD,,
|
File without changes
|
File without changes
|