tamar-model-client 0.1.30__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -598,7 +598,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
598
598
 
599
599
  return delay
600
600
 
601
- async def _stream(self, request, metadata, invoke_timeout) -> AsyncIterator[ModelResponse]:
601
+ async def _stream(self, request, metadata, invoke_timeout, request_id=None, origin_request_id=None) -> AsyncIterator[ModelResponse]:
602
602
  """
603
603
  处理流式响应
604
604
 
@@ -606,8 +606,10 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
606
606
 
607
607
  Args:
608
608
  request: gRPC 请求对象
609
- metadata: 请求元数据
609
+ metadata: 请求元数据(为了兼容性保留,但会被忽略)
610
610
  invoke_timeout: 总体超时时间
611
+ request_id: 请求ID
612
+ origin_request_id: 原始请求ID
611
613
 
612
614
  Yields:
613
615
  ModelResponse: 流式响应的每个数据块
@@ -615,7 +617,12 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
615
617
  Raises:
616
618
  TimeoutError: 当等待下一个数据块超时时
617
619
  """
618
- stream_iter = self.stub.Invoke(request, metadata=metadata, timeout=invoke_timeout).__aiter__()
620
+ # 每次调用时重新生成metadata,确保JWT token是最新的
621
+ fresh_metadata = self._build_auth_metadata(
622
+ request_id or get_request_id(),
623
+ origin_request_id
624
+ )
625
+ stream_iter = self.stub.Invoke(request, metadata=fresh_metadata, timeout=invoke_timeout).__aiter__()
619
626
  chunk_timeout = 30.0 # 单个数据块的超时时间
620
627
 
621
628
  try:
@@ -636,7 +643,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
636
643
  except Exception as e:
637
644
  raise
638
645
 
639
- async def _stream_with_logging(self, request, metadata, invoke_timeout, start_time, model_request) -> AsyncIterator[
646
+ async def _stream_with_logging(self, request, metadata, invoke_timeout, start_time, model_request, request_id=None, origin_request_id=None) -> AsyncIterator[
640
647
  ModelResponse]:
641
648
  """流式响应的包装器,用于记录完整的响应日志并处理重试"""
642
649
  total_content = ""
@@ -645,7 +652,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
645
652
  chunk_count = 0
646
653
 
647
654
  # 使用重试逻辑获取流生成器
648
- stream_generator = self._retry_request_stream(self._stream, request, metadata, invoke_timeout, request_id=get_request_id())
655
+ stream_generator = self._retry_request_stream(self._stream, request, metadata, invoke_timeout, request_id=request_id or get_request_id(), origin_request_id=origin_request_id)
649
656
 
650
657
  try:
651
658
  async for response in stream_generator:
@@ -719,9 +726,22 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
719
726
  )
720
727
  raise
721
728
 
722
- async def _invoke_request(self, request, metadata, invoke_timeout):
723
- """执行单个非流式请求"""
724
- async for response in self.stub.Invoke(request, metadata=metadata, timeout=invoke_timeout):
729
+ async def _invoke_request(self, request, metadata, invoke_timeout, request_id=None, origin_request_id=None):
730
+ """执行单个非流式请求
731
+
732
+ Args:
733
+ request: gRPC请求对象
734
+ metadata: 请求元数据(为了兼容性保留,但会被忽略)
735
+ invoke_timeout: 请求超时时间
736
+ request_id: 请求ID
737
+ origin_request_id: 原始请求ID
738
+ """
739
+ # 每次调用时重新生成metadata,确保JWT token是最新的
740
+ fresh_metadata = self._build_auth_metadata(
741
+ request_id or get_request_id(),
742
+ origin_request_id
743
+ )
744
+ async for response in self.stub.Invoke(request, metadata=fresh_metadata, timeout=invoke_timeout):
725
745
  return ResponseHandler.build_model_response(response)
726
746
 
727
747
  async def invoke(self, model_request: ModelRequest, timeout: Optional[float] = None,
@@ -817,13 +837,13 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
817
837
  invoke_timeout = timeout or self.default_invoke_timeout
818
838
  if model_request.stream:
819
839
  # 对于流式响应,直接返回带日志记录的包装器
820
- return self._stream_with_logging(request, metadata, invoke_timeout, start_time, model_request)
840
+ return self._stream_with_logging(request, metadata, invoke_timeout, start_time, model_request, request_id, origin_request_id)
821
841
  else:
822
842
  # 存储model_request和origin_request_id供重试方法使用
823
843
  self._current_model_request = model_request
824
844
  self._current_origin_request_id = origin_request_id
825
845
  try:
826
- result = await self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id)
846
+ result = await self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id, origin_request_id=origin_request_id)
827
847
  finally:
828
848
  # 清理临时存储
829
849
  if hasattr(self, '_current_model_request'):
@@ -1,14 +1,43 @@
1
1
  import time
2
2
  import jwt
3
+ from typing import Optional
3
4
 
4
5
 
5
6
  # JWT 处理类
6
7
  class JWTAuthHandler:
7
8
  def __init__(self, secret_key: str):
8
9
  self.secret_key = secret_key
10
+ self._token_cache: Optional[str] = None
11
+ self._token_exp_time: Optional[int] = None
9
12
 
10
13
  def encode_token(self, payload: dict, expires_in: int = 3600) -> str:
11
14
  """生成带过期时间的 JWT Token"""
12
15
  payload = payload.copy()
13
- payload["exp"] = int(time.time()) + expires_in
14
- return jwt.encode(payload, self.secret_key, algorithm="HS256")
16
+ exp_time = int(time.time()) + expires_in
17
+ payload["exp"] = exp_time
18
+ token = jwt.encode(payload, self.secret_key, algorithm="HS256")
19
+
20
+ # 缓存token和过期时间
21
+ self._token_cache = token
22
+ self._token_exp_time = exp_time
23
+
24
+ return token
25
+
26
+ def is_token_expiring_soon(self, buffer_seconds: int = 60) -> bool:
27
+ """检查token是否即将过期
28
+
29
+ Args:
30
+ buffer_seconds: 提前多少秒认为token即将过期,默认60秒
31
+
32
+ Returns:
33
+ bool: True表示token即将过期或已过期
34
+ """
35
+ if not self._token_exp_time:
36
+ return True
37
+
38
+ current_time = int(time.time())
39
+ return current_time >= (self._token_exp_time - buffer_seconds)
40
+
41
+ def get_cached_token(self) -> Optional[str]:
42
+ """获取缓存的token"""
43
+ return self._token_cache
@@ -165,10 +165,27 @@ class BaseClient(ABC):
165
165
  metadata.append(("x-origin-request-id", origin_request_id))
166
166
 
167
167
  if self.jwt_handler:
168
- self.jwt_token = self.jwt_handler.encode_token(
169
- self.default_payload,
170
- expires_in=self.token_expires_in
171
- )
168
+ # 检查token是否即将过期,如果是则刷新
169
+ if self.jwt_handler.is_token_expiring_soon():
170
+ self.jwt_token = self.jwt_handler.encode_token(
171
+ self.default_payload,
172
+ expires_in=self.token_expires_in
173
+ )
174
+ else:
175
+ # 使用缓存的token
176
+ cached_token = self.jwt_handler.get_cached_token()
177
+ if cached_token:
178
+ self.jwt_token = cached_token
179
+ else:
180
+ # 如果没有缓存,生成新token
181
+ self.jwt_token = self.jwt_handler.encode_token(
182
+ self.default_payload,
183
+ expires_in=self.token_expires_in
184
+ )
185
+
186
+ metadata.append(("authorization", f"Bearer {self.jwt_token}"))
187
+ elif self.jwt_token:
188
+ # 使用用户提供的预生成token
172
189
  metadata.append(("authorization", f"Bearer {self.jwt_token}"))
173
190
 
174
191
  return metadata
@@ -322,14 +322,35 @@ class AsyncHttpFallbackMixin:
322
322
  """
323
323
 
324
324
  async def _ensure_http_client(self) -> None:
325
- """Ensure async HTTP client is initialized"""
325
+ """Ensure async HTTP client is initialized in the correct event loop"""
326
+ import asyncio
327
+ import aiohttp
328
+
329
+ # Get current event loop
330
+ current_loop = asyncio.get_running_loop()
331
+
332
+ # Check if we need to recreate the session
333
+ need_new_session = False
334
+
326
335
  if not hasattr(self, '_http_session') or not self._http_session:
327
- import aiohttp
336
+ need_new_session = True
337
+ elif hasattr(self, '_http_session_loop') and self._http_session_loop != current_loop:
338
+ # Session was created in a different event loop
339
+ logger.warning("🔄 HTTP session bound to different event loop, recreating...")
340
+ # Close old session if possible
341
+ try:
342
+ await self._http_session.close()
343
+ except Exception as e:
344
+ logger.debug(f"Error closing old session: {e}")
345
+ need_new_session = True
346
+
347
+ if need_new_session:
328
348
  self._http_session = aiohttp.ClientSession(
329
349
  headers={
330
350
  'User-Agent': 'AsyncTamarModelClient/1.0'
331
351
  }
332
352
  )
353
+ self._http_session_loop = current_loop
333
354
 
334
355
  # Note: JWT token will be set per request in headers
335
356
 
@@ -528,4 +549,6 @@ class AsyncHttpFallbackMixin:
528
549
  """Clean up HTTP session"""
529
550
  if hasattr(self, '_http_session') and self._http_session:
530
551
  await self._http_session.close()
531
- self._http_session = None
552
+ self._http_session = None
553
+ if hasattr(self, '_http_session_loop'):
554
+ self._http_session_loop = None
@@ -137,10 +137,12 @@ class OpenAIImagesInput(BaseModel):
137
137
  n: Optional[int] | NotGiven = NOT_GIVEN
138
138
  output_compression: Optional[int] | NotGiven = NOT_GIVEN
139
139
  output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN
140
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
140
141
  quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN
141
142
  response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN
142
143
  size: Optional[Literal[
143
144
  "auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN
145
+ stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
144
146
  style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN
145
147
  user: str | NotGiven = NOT_GIVEN
146
148
  extra_headers: Headers | None = None
@@ -157,15 +159,18 @@ class OpenAIImagesEditInput(BaseModel):
157
159
  image: Union[FileTypes, List[FileTypes]]
158
160
  prompt: str
159
161
  background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN
162
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
160
163
  mask: FileTypes | NotGiven = NOT_GIVEN
161
164
  model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN
162
165
  n: Optional[int] | NotGiven = NOT_GIVEN
166
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
167
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
168
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
163
169
  quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN
164
170
  response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN
165
171
  size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN
172
+ stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
166
173
  user: str | NotGiven = NOT_GIVEN
167
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
168
- # The extra values given here take precedence over values defined on the client or passed to this method.
169
174
  extra_headers: Headers | None = None
170
175
  extra_query: Query | None = None
171
176
  extra_body: Body | None = None
@@ -742,14 +742,16 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
742
742
  else:
743
743
  raise TamarModelException("Unknown streaming error occurred")
744
744
 
745
- def _stream(self, request, metadata, invoke_timeout) -> Iterator[ModelResponse]:
745
+ def _stream(self, request, metadata, invoke_timeout, request_id=None, origin_request_id=None) -> Iterator[ModelResponse]:
746
746
  """
747
747
  处理流式响应
748
748
 
749
749
  Args:
750
750
  request: gRPC 请求对象
751
- metadata: 请求元数据
751
+ metadata: 请求元数据(为了兼容性保留,但会被忽略)
752
752
  invoke_timeout: 总体超时时间
753
+ request_id: 请求ID
754
+ origin_request_id: 原始请求ID
753
755
 
754
756
  Yields:
755
757
  ModelResponse: 流式响应的每个数据块
@@ -757,6 +759,11 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
757
759
  Raises:
758
760
  TimeoutError: 当等待下一个数据块超时时
759
761
  """
762
+ # 每次调用时重新生成metadata,确保JWT token是最新的
763
+ fresh_metadata = self._build_auth_metadata(
764
+ request_id or get_request_id(),
765
+ origin_request_id
766
+ )
760
767
  import threading
761
768
  import queue
762
769
 
@@ -767,7 +774,7 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
767
774
  def fetch_responses():
768
775
  """在单独线程中获取流式响应"""
769
776
  try:
770
- for response in self.stub.Invoke(request, metadata=metadata, timeout=invoke_timeout):
777
+ for response in self.stub.Invoke(request, metadata=fresh_metadata, timeout=invoke_timeout):
771
778
  response_queue.put(response)
772
779
  response_queue.put(None) # 标记流结束
773
780
  except Exception as e:
@@ -799,7 +806,7 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
799
806
  except queue.Empty:
800
807
  raise TimeoutError(f"流式响应在等待下一个数据块时超时 ({chunk_timeout}s)")
801
808
 
802
- def _stream_with_logging(self, request, metadata, invoke_timeout, start_time, model_request) -> Iterator[
809
+ def _stream_with_logging(self, request, metadata, invoke_timeout, start_time, model_request, request_id=None, origin_request_id=None) -> Iterator[
803
810
  ModelResponse]:
804
811
  """流式响应的包装器,用于记录完整的响应日志并处理重试"""
805
812
  total_content = ""
@@ -808,7 +815,7 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
808
815
  chunk_count = 0
809
816
 
810
817
  try:
811
- for response in self._stream(request, metadata, invoke_timeout):
818
+ for response in self._stream(request, metadata, invoke_timeout, request_id, origin_request_id):
812
819
  chunk_count += 1
813
820
  if response.content:
814
821
  total_content += response.content
@@ -879,9 +886,22 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
879
886
  )
880
887
  raise
881
888
 
882
- def _invoke_request(self, request, metadata, invoke_timeout):
883
- """执行单个非流式请求"""
884
- response = self.stub.Invoke(request, metadata=metadata, timeout=invoke_timeout)
889
+ def _invoke_request(self, request, metadata, invoke_timeout, request_id=None, origin_request_id=None):
890
+ """执行单个非流式请求
891
+
892
+ Args:
893
+ request: gRPC请求对象
894
+ metadata: 请求元数据(为了兼容性保留,但会被忽略)
895
+ invoke_timeout: 请求超时时间
896
+ request_id: 请求ID
897
+ origin_request_id: 原始请求ID
898
+ """
899
+ # 每次调用时重新生成metadata,确保JWT token是最新的
900
+ fresh_metadata = self._build_auth_metadata(
901
+ request_id or get_request_id(),
902
+ origin_request_id
903
+ )
904
+ response = self.stub.Invoke(request, metadata=fresh_metadata, timeout=invoke_timeout)
885
905
  for response in response:
886
906
  return ResponseHandler.build_model_response(response)
887
907
 
@@ -980,14 +1000,14 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
980
1000
  return self._retry_request_stream(
981
1001
  self._stream_with_logging,
982
1002
  request, metadata, invoke_timeout, start_time, model_request,
983
- request_id=request_id
1003
+ request_id=request_id, origin_request_id=origin_request_id
984
1004
  )
985
1005
  else:
986
1006
  # 存储model_request和origin_request_id供重试方法使用
987
1007
  self._current_model_request = model_request
988
1008
  self._current_origin_request_id = origin_request_id
989
1009
  try:
990
- result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id)
1010
+ result = self._retry_request(self._invoke_request, request, metadata, invoke_timeout, request_id=request_id, origin_request_id=origin_request_id)
991
1011
  finally:
992
1012
  # 清理临时存储
993
1013
  if hasattr(self, '_current_model_request'):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.1.30
3
+ Version: 0.2.1
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou
@@ -1,16 +1,16 @@
1
1
  tamar_model_client/__init__.py,sha256=4DEIUGlLTeiaECjJQbGYik7C0JO6hHwwfbLYpYpMdzg,444
2
- tamar_model_client/async_client.py,sha256=J3787otYGW2ycizLC5HDPibm_USNy4oKI5QQUo3L-aE,45328
3
- tamar_model_client/auth.py,sha256=gbwW5Aakeb49PMbmYvrYlVx1mfyn1LEDJ4qQVs-9DA4,438
2
+ tamar_model_client/async_client.py,sha256=047a3Ts6Qe2Wcs4xyxtG71kvYngTuSHFzB3V8D97_ec,46431
3
+ tamar_model_client/auth.py,sha256=DrtnFpG0ZKFUnTnV_Y-FuLRiC2kobcgg0W5Gr1ywg1k,1398
4
4
  tamar_model_client/circuit_breaker.py,sha256=Y3AVp7WzVYU-ubcmovKsJ8DRJbbO4G7vdZgSjnwcWJQ,5550
5
5
  tamar_model_client/error_handler.py,sha256=y7EipcqkXbCecSAOsnoSP3SH7hvZSNF_NUHooTi3hP0,18364
6
6
  tamar_model_client/exceptions.py,sha256=EOr4JMYI7hVszRvNYJ1JqsUNpVmd16T2KpJ0MkFTsUE,13073
7
7
  tamar_model_client/json_formatter.py,sha256=XT8XPMKKM2M22tuYR2e1rvWHcpz3UD9iLLgGPsGOjCI,2410
8
8
  tamar_model_client/logging_icons.py,sha256=MRTZ1Xvkep9ce_jdltj54_XZUXvIpQ95soRNmLdJ4qw,1837
9
- tamar_model_client/sync_client.py,sha256=vBxVvDFeY_Sd7JRLJwkdOcm6sCxmGaDW0tyCspp-n7E,52671
9
+ tamar_model_client/sync_client.py,sha256=FbyjuyDRiXklSS_l5h5fwNxvABI-hpLGiIWAXqhPHoI,53760
10
10
  tamar_model_client/utils.py,sha256=Kn6pFz9GEC96H4eejEax66AkzvsrXI3WCSDtgDjnVTI,5238
11
11
  tamar_model_client/core/__init__.py,sha256=RMiZjV1S4csWPLxB_JfdOea8fYPz97Oj3humQSBw1OI,1054
12
- tamar_model_client/core/base_client.py,sha256=XUbMDM6B3ZtAnAdgSDF-bdLgACY46igCcMVoiMC3faQ,13056
13
- tamar_model_client/core/http_fallback.py,sha256=ULmHXfKPwP4T32xo7yQV_z2bGaI_L71BQIcylFs8dTM,21243
12
+ tamar_model_client/core/base_client.py,sha256=spb4zjDuPczqnXNlDcIq_bDQ09TOpxeeuX7IxpTS_38,13859
13
+ tamar_model_client/core/http_fallback.py,sha256=2N7-N_TZrtffDjuv9s3-CD8Xy7qw9AuI5xeWGUnGQ0w,22217
14
14
  tamar_model_client/core/logging_setup.py,sha256=-MXzTR4Ax50H16cbq1jCXbxgayf5fZ0U3o0--fMmxD8,6692
15
15
  tamar_model_client/core/request_builder.py,sha256=yi8iy2Ps2m4d1YwIFiQLRxTvxQxgEGV576aXnNYRl7E,8507
16
16
  tamar_model_client/core/request_id_manager.py,sha256=S-Mliaby9zN_bx-B85FvVnttal-w0skkjy2ZvWoQ5vw,3689
@@ -24,7 +24,7 @@ tamar_model_client/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
24
24
  tamar_model_client/generated/model_service_pb2.py,sha256=RI6wNSmgmylzWPedFfPxx938UzS7kcPR58YTzYshcL8,3066
25
25
  tamar_model_client/generated/model_service_pb2_grpc.py,sha256=k4tIbp3XBxdyuOVR18Ung_4SUryONB51UYf_uUEl6V4,5145
26
26
  tamar_model_client/schemas/__init__.py,sha256=AxuI-TcvA4OMTj2FtK4wAItvz9LrK_293pu3cmMLE7k,394
27
- tamar_model_client/schemas/inputs.py,sha256=dz1m8NbUIxA99JXZc8WlyzbKpDuz1lEzx3VghC33zYI,14625
27
+ tamar_model_client/schemas/inputs.py,sha256=vrBym9ywj0Zp9vegp-t6EWDnBiI_bAIQVWYhOY7Vh1A,14892
28
28
  tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
29
29
  tests/__init__.py,sha256=kbmImddLDwdqlkkmkyKtl4bQy_ipe-R8eskpaBylU9w,38
30
30
  tests/stream_hanging_analysis.py,sha256=W3W48IhQbNAR6-xvMpoWZvnWOnr56CTaH4-aORNBuD4,14807
@@ -32,7 +32,7 @@ tests/test_circuit_breaker.py,sha256=nhEBnyXFjIYjRWlUdu7Z9PnPq48ypbBK6fxN6deHedw
32
32
  tests/test_google_azure_final.py,sha256=Cx2lfnoj48_7pUjpCYbrx6OLJF4cI79McV24_EYt_8s,55093
33
33
  tests/test_logging_issue.py,sha256=JTMbotfHpAEPMBj73pOwxPn-Zn4QVQJX6scMz48FRDQ,2427
34
34
  tests/test_simple.py,sha256=Xf0U-J9_xn_LzUsmYu06suK0_7DrPeko8OHoHldsNxE,7169
35
- tamar_model_client-0.1.30.dist-info/METADATA,sha256=QrJLOVUNNXKGNv4ZJRPKDFLP9d9JUlfrqQNN-FQuAvA,41310
36
- tamar_model_client-0.1.30.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
- tamar_model_client-0.1.30.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
38
- tamar_model_client-0.1.30.dist-info/RECORD,,
35
+ tamar_model_client-0.2.1.dist-info/METADATA,sha256=sB0xQbFo6EAeT8ALLXbE9HEB4sQz9u8fdJMswLKMnAQ,41309
36
+ tamar_model_client-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
+ tamar_model_client-0.2.1.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
38
+ tamar_model_client-0.2.1.dist-info/RECORD,,