LLM-Bridge 1.12.6__py3-none-any.whl → 1.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -58,27 +58,17 @@ class NonStreamClaudeClient(ClaudeClient):
58
58
  try:
59
59
  logging.info(f"messages: {self.messages}")
60
60
 
61
- if self.thinking:
62
- message = await self.client.beta.messages.create(
63
- model=self.model,
64
- max_tokens=self.max_tokens,
65
- temperature=self.temperature,
66
- system=self.system,
67
- messages=serialize(self.messages),
68
- betas=self.betas,
69
- tools=self.tools,
70
- thinking=self.thinking,
71
- )
72
- else:
73
- message = await self.client.beta.messages.create(
74
- model=self.model,
75
- max_tokens=self.max_tokens,
76
- temperature=self.temperature,
77
- system=self.system,
78
- messages=serialize(self.messages),
79
- betas=self.betas,
80
- tools=self.tools,
81
- )
61
+ message = await self.client.beta.messages.create(
62
+ model=self.model,
63
+ max_tokens=self.max_tokens,
64
+ temperature=self.temperature,
65
+ system=self.system,
66
+ messages=serialize(self.messages),
67
+ betas=self.betas,
68
+ tools=self.tools,
69
+ thinking=self.thinking,
70
+ output_format=self.output_format,
71
+ )
82
72
 
83
73
  return await process_claude_non_stream_response(
84
74
  message=message,
@@ -16,41 +16,24 @@ class StreamClaudeClient(ClaudeClient):
16
16
  logging.info(f"messages: {self.messages}")
17
17
 
18
18
  try:
19
- if self.thinking:
20
- async with self.client.beta.messages.stream(
21
- model=self.model,
22
- max_tokens=self.max_tokens,
23
- temperature=self.temperature,
24
- system=self.system,
25
- messages=serialize(self.messages),
26
- betas=self.betas,
27
- tools=self.tools,
28
- thinking=self.thinking,
29
- ) as stream:
30
- async for event in stream:
31
- yield await process_claude_stream_response(
32
- event=event,
33
- input_tokens=self.input_tokens,
34
- client=self.client,
35
- model=self.model,
36
- )
37
- else:
38
- async with self.client.beta.messages.stream(
19
+ async with self.client.beta.messages.stream(
20
+ model=self.model,
21
+ max_tokens=self.max_tokens,
22
+ temperature=self.temperature,
23
+ system=self.system,
24
+ messages=serialize(self.messages),
25
+ betas=self.betas,
26
+ tools=self.tools,
27
+ thinking=self.thinking,
28
+ output_format=self.output_format,
29
+ ) as stream:
30
+ async for event in stream:
31
+ yield await process_claude_stream_response(
32
+ event=event,
33
+ input_tokens=self.input_tokens,
34
+ client=self.client,
39
35
  model=self.model,
40
- max_tokens=self.max_tokens,
41
- temperature=self.temperature,
42
- system=self.system,
43
- messages=serialize(self.messages),
44
- betas=self.betas,
45
- tools=self.tools,
46
- ) as stream:
47
- async for event in stream:
48
- yield await process_claude_stream_response(
49
- event=event,
50
- input_tokens=self.input_tokens,
51
- client=self.client,
52
- model=self.model,
53
- )
36
+ )
54
37
 
55
38
  except Exception as e:
56
39
  logging.exception(e)
@@ -86,6 +86,7 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
86
86
  temperature=self.temperature,
87
87
  stream=False,
88
88
  tools=self.tools,
89
+ # text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
89
90
  )
90
91
 
91
92
  return process_openai_responses_non_stream_response(
@@ -86,6 +86,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
86
86
  temperature=self.temperature,
87
87
  stream=True,
88
88
  tools=self.tools,
89
+ # text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
89
90
  )
90
91
 
91
92
  except httpx.HTTPStatusError as e:
@@ -22,6 +22,7 @@ class ClaudeClient(ChatClient):
22
22
  input_tokens: int,
23
23
  tools: list[BetaToolUnionParam],
24
24
  thinking: ThinkingConfigEnabledParam,
25
+ output_format: dict[str, Any] | None,
25
26
  ):
26
27
  self.model = model
27
28
  self.messages = messages
@@ -33,8 +34,10 @@ class ClaudeClient(ChatClient):
33
34
  self.input_tokens = input_tokens
34
35
  self.tools = tools
35
36
  self.thinking = thinking
37
+ self.output_format = output_format
36
38
 
37
39
  async def generate_non_stream_response(self) -> ChatResponse:
38
40
  raise NotImplementedError
41
+
39
42
  async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
40
43
  raise NotImplementedError
@@ -1,4 +1,5 @@
1
- from typing import AsyncGenerator, Iterable
1
+ from typing import AsyncGenerator, Iterable, Type, Any
2
+ from pydantic import BaseModel
2
3
 
3
4
  import openai.lib.azure
4
5
  from openai.types import Reasoning
@@ -20,6 +21,7 @@ class OpenAIClient(ChatClient):
20
21
  client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
21
22
  tools: Iterable[ToolParam],
22
23
  reasoning: Reasoning,
24
+ structured_output_base_model: Type[BaseModel] | None = None,
23
25
  ):
24
26
  self.model = model
25
27
  self.messages = messages
@@ -28,8 +30,10 @@ class OpenAIClient(ChatClient):
28
30
  self.client = client
29
31
  self.tools = tools
30
32
  self.reasoning = reasoning
33
+ self.structured_output_base_model = structured_output_base_model
31
34
 
32
35
  async def generate_non_stream_response(self) -> ChatResponse:
33
36
  raise NotImplementedError
37
+
34
38
  async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
35
39
  raise NotImplementedError
@@ -1,3 +1,5 @@
1
+ from typing import Any
2
+
1
3
  from fastapi import HTTPException
2
4
 
3
5
  from llm_bridge.client.chat_client import ChatClient
@@ -16,7 +18,7 @@ async def create_chat_client(
16
18
  stream: bool,
17
19
  thought: bool,
18
20
  code_execution: bool,
19
- structured_output_schema: dict | None = None,
21
+ structured_output_schema: dict[str, Any] | None,
20
22
  ) -> ChatClient:
21
23
  if api_type == 'OpenAI':
22
24
  return await create_openai_client(
@@ -28,6 +30,7 @@ async def create_chat_client(
28
30
  stream=stream,
29
31
  thought=thought,
30
32
  code_execution=code_execution,
33
+ structured_output_schema=structured_output_schema,
31
34
  )
32
35
  elif api_type == 'OpenAI-Azure':
33
36
  return await create_openai_client(
@@ -42,6 +45,7 @@ async def create_chat_client(
42
45
  stream=stream,
43
46
  thought=thought,
44
47
  code_execution=code_execution,
48
+ structured_output_schema=structured_output_schema,
45
49
  )
46
50
  elif api_type == 'OpenAI-GitHub':
47
51
  return await create_openai_client(
@@ -53,6 +57,7 @@ async def create_chat_client(
53
57
  stream=stream,
54
58
  thought=thought,
55
59
  code_execution=code_execution,
60
+ structured_output_schema=structured_output_schema,
56
61
  )
57
62
  elif api_type == 'Grok':
58
63
  return await create_openai_client(
@@ -64,6 +69,7 @@ async def create_chat_client(
64
69
  stream=stream,
65
70
  thought=thought,
66
71
  code_execution=code_execution,
72
+ structured_output_schema=structured_output_schema,
67
73
  )
68
74
  elif api_type == 'Gemini-Free':
69
75
  return await create_gemini_client(
@@ -110,6 +116,7 @@ async def create_chat_client(
110
116
  stream=stream,
111
117
  thought=thought,
112
118
  code_execution=code_execution,
119
+ structured_output_schema=structured_output_schema,
113
120
  )
114
121
  else:
115
122
  raise HTTPException(status_code=400, detail="Invalid API type")
@@ -1,4 +1,7 @@
1
+ from typing import Any
2
+
1
3
  import anthropic
4
+ from anthropic import Omit, transform_schema
2
5
  from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
3
6
  from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param
4
7
 
@@ -18,7 +21,10 @@ async def create_claude_client(
18
21
  stream: bool,
19
22
  thought: bool,
20
23
  code_execution: bool,
24
+ structured_output_schema: dict[str, Any] | None
21
25
  ):
26
+ omit = Omit()
27
+
22
28
  client = anthropic.AsyncAnthropic(
23
29
  api_key=api_key,
24
30
  )
@@ -44,7 +50,7 @@ async def create_claude_client(
44
50
  max_output,
45
51
  context_window - input_tokens,
46
52
  )
47
- thinking = None
53
+ thinking = omit
48
54
  if thought:
49
55
  thinking = ThinkingConfigEnabledParam(
50
56
  type="enabled",
@@ -55,6 +61,7 @@ async def create_claude_client(
55
61
  "context-1m-2025-08-07",
56
62
  "output-128k-2025-02-19",
57
63
  "code-execution-2025-08-25",
64
+ "structured-outputs-2025-11-13"
58
65
  ]
59
66
  tools: list[BetaToolUnionParam] = []
60
67
  tools.append(
@@ -71,6 +78,13 @@ async def create_claude_client(
71
78
  )
72
79
  )
73
80
 
81
+ output_format = omit
82
+ # if structured_output_schema: # Claude output format raises: TypeError: unhashable type: 'dict'
83
+ # output_format = {
84
+ # "type": "json_schema",
85
+ # "schema": transform_schema(structured_output_schema),
86
+ # }
87
+
74
88
  if stream:
75
89
  return StreamClaudeClient(
76
90
  model=model,
@@ -83,6 +97,7 @@ async def create_claude_client(
83
97
  input_tokens=input_tokens,
84
98
  tools=tools,
85
99
  thinking=thinking,
100
+ output_format=output_format,
86
101
  )
87
102
  else:
88
103
  return NonStreamClaudeClient(
@@ -96,4 +111,5 @@ async def create_claude_client(
96
111
  input_tokens=input_tokens,
97
112
  tools=tools,
98
113
  thinking=thinking,
114
+ output_format=output_format,
99
115
  )
@@ -1,6 +1,8 @@
1
+ from typing import Any
2
+
1
3
  from google import genai
2
4
  from google.genai import types
3
- from google.genai.types import Modality, HttpOptions, MediaResolution
5
+ from google.genai.types import Modality, MediaResolution
4
6
 
5
7
  from llm_bridge.client.implementations.gemini.non_stream_gemini_client import NonStreamGeminiClient
6
8
  from llm_bridge.client.implementations.gemini.stream_gemini_client import StreamGeminiClient
@@ -18,7 +20,7 @@ async def create_gemini_client(
18
20
  stream: bool,
19
21
  thought: bool,
20
22
  code_execution: bool,
21
- structured_output_schema: dict | None = None,
23
+ structured_output_schema: dict[str, Any] | None,
22
24
  ):
23
25
  client = genai.Client(
24
26
  vertexai=vertexai,
@@ -87,7 +89,7 @@ async def create_gemini_client(
87
89
  response_modalities=response_modalities,
88
90
  )
89
91
 
90
- if structured_output_schema is not None:
92
+ if structured_output_schema:
91
93
  config.response_mime_type = "application/json"
92
94
  config.response_json_schema = structured_output_schema
93
95
 
@@ -1,4 +1,5 @@
1
1
  import re
2
+ from typing import Any
2
3
 
3
4
  import openai
4
5
  from fastapi import HTTPException
@@ -13,6 +14,7 @@ from llm_bridge.client.implementations.openai.steam_openai_responses_client impo
13
14
  from llm_bridge.client.implementations.openai.stream_openai_client import StreamOpenAIClient
14
15
  from llm_bridge.logic.chat_generate.chat_message_converter import convert_messages_to_openai_responses, \
15
16
  convert_messages_to_openai
17
+ from llm_bridge.logic.chat_generate.model_client_factory.schema_converter import json_schema_to_pydantic_model
16
18
  from llm_bridge.type.message import Message
17
19
 
18
20
 
@@ -25,6 +27,7 @@ async def create_openai_client(
25
27
  stream: bool,
26
28
  thought: bool,
27
29
  code_execution: bool,
30
+ structured_output_schema: dict[str, Any] | None,
28
31
  ):
29
32
  if api_type == "OpenAI":
30
33
  client = openai.AsyncOpenAI(
@@ -91,6 +94,11 @@ async def create_openai_client(
91
94
  )
92
95
  )
93
96
 
97
+ structured_output_base_model = None
98
+ if structured_output_schema:
99
+ structured_output_base_model = json_schema_to_pydantic_model(structured_output_schema)
100
+
101
+
94
102
  if use_responses_api:
95
103
  if stream:
96
104
  return StreamOpenAIResponsesClient(
@@ -101,6 +109,7 @@ async def create_openai_client(
101
109
  client=client,
102
110
  tools=tools,
103
111
  reasoning=reasoning,
112
+ structured_output_base_model=structured_output_base_model,
104
113
  )
105
114
  else:
106
115
  return NonStreamOpenAIResponsesClient(
@@ -111,6 +120,7 @@ async def create_openai_client(
111
120
  client=client,
112
121
  tools=tools,
113
122
  reasoning=reasoning,
123
+ structured_output_base_model=structured_output_base_model,
114
124
  )
115
125
  else:
116
126
  if stream:
@@ -0,0 +1,7 @@
1
+ from typing import Any
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ def json_schema_to_pydantic_model(json_schema: dict[str, Any]) -> BaseModel:
7
+ pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.6
3
+ Version: 1.13.0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.106.1
16
+ Requires-Dist: openai==2.9.0
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.46.0
19
- Requires-Dist: anthropic==0.66.0
19
+ Requires-Dist: anthropic==0.75.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -6,8 +6,8 @@ llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62He
6
6
  llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=d5e1rlxfao_rjhgT1Rky-xlCRJIK2M-e9LKNUATOczc,4143
8
8
  llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=m_aoLJkFPJqSBA3Thzv5vg3GnaucZh41SAgT28sLeBA,1324
9
- llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=IunPFhqFoHWlYqoz__fWO0vLTJbbjMVL1FuHGRAQwkg,3469
10
- llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=cu06_AcBVZJJE_SuSr26jBW8GEdaK4OQN9XA-6p4O0Q,2954
9
+ llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=EFseZ9LP6bPVwMqur9gxPZCRWGSH0p3EL6HDMYsCDss,3040
10
+ llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=8jzD9fVptQnSkRVz0oQ3QnQ22NyMm6hjsmEtoDvl8g8,2059
11
11
  llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=_bejFAjo07s4jBpXBGF5djPs3nxZZjHaDkr4w1S8lTs,4321
13
13
  llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=GdnwJWPhGZMB_xC0fz88zQRparIHzTemkQoqfDcxVEA,687
@@ -15,25 +15,26 @@ llm_bridge/client/implementations/gemini/non_stream_gemini_client.py,sha256=JGNN
15
15
  llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=vqPhQdr-jaHXzn-_1PSZfpo96zM-_89XOEXIx7UBBIw,1545
16
16
  llm_bridge/client/implementations/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  llm_bridge/client/implementations/openai/non_stream_openai_client.py,sha256=aceJm6FF6VdzVRECzJyTY8-aQjCekhhbrMPEcUN24fo,2171
18
- llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=E8bBefhgtGM0qF3WH3VtXWbgls60kWVDWu2UG2SfsXM,4216
18
+ llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=bvDvnENVKaGslk6cvZ_r-waOYx1SSDOU-gjT9Bbf1HA,4349
19
19
  llm_bridge/client/implementations/openai/openai_token_couter.py,sha256=ESl3L049NSE6Y1wfrH195ftQIFdr6XjJcmw5gJBeGaA,1472
20
- llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=HdaIYeJg9o5TjyqMlGUjfsPF2MDoxWF8tOqsqIbNTw8,4100
20
+ llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=hoYSCjljtsGhSLmheZ076ZcgN4ZLWee_7VsK6Gpy5X0,4233
21
21
  llm_bridge/client/implementations/openai/stream_openai_client.py,sha256=Izq4xH9EuLjUCBJsuSr6U4Kj6FN5c7w_oHf9wmQatXE,2988
22
22
  llm_bridge/client/model_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- llm_bridge/client/model_client/claude_client.py,sha256=Q-ciptb1cV3BNUOUj3Blwy-zu0loD2glFceRe5YDKKo,1352
23
+ llm_bridge/client/model_client/claude_client.py,sha256=xLRXYD9t5E3QBVIMe-GdD7eESC752cM9_3FCcp6MFIg,1446
24
24
  llm_bridge/client/model_client/gemini_client.py,sha256=4dcueIbpLFqkT98WxmeVmW9Vbq7Z5jbYbifAem-NL1E,906
25
- llm_bridge/client/model_client/openai_client.py,sha256=92nSIrlAhT0u6m8MvT31-VSqrtDUekkRwV3JpTd_WKE,1239
25
+ llm_bridge/client/model_client/openai_client.py,sha256=U3tg9vBXG8oqcURo-vYoUXEI7UgHl0_3aP30TlOcPj4,1428
26
26
  llm_bridge/logic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  llm_bridge/logic/file_fetch.py,sha256=Q8PGNj76E25sKD70TmlnSIdPgAxpNlb89syk87DbAGg,1341
28
28
  llm_bridge/logic/model_prices.py,sha256=hiXVbki3004Rrm5LQrmVfdm0lLABeygxtFB-Qn9_mm0,1219
29
29
  llm_bridge/logic/chat_generate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=x1x5rYUCSMvK2r7lbtwBEHI_-jAcBfoOQbuUY4kZano,4043
30
+ llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=g34iodMfCqopsPu2aDYp9hsEBY2ap7I_io620y1wy-Q,4385
31
31
  llm_bridge/logic/chat_generate/chat_message_converter.py,sha256=40VTBOPXg_ocrEZMdt1ObYlm-mhRL35zWzzxv8m2xRc,1538
32
32
  llm_bridge/logic/chat_generate/media_processor.py,sha256=ZR8G24EHwZZr2T9iFDRmScDGyJ_kvThApABzSzK0CL0,702
33
33
  llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=bdO-4LBSwe1x8_5kamVg6dpRkxGB8_FXgRaaNH53qUs,3059
35
- llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=KjxU3NkHRvQS4G8FXDRlKRM0Kdunol-gTZhUakBwngQ,3666
36
- llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=EoU5kyccnwOKjGdFi5yTozNVPrq402jRtWPjSmBJs7M,4517
34
+ llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=EMXEpNTmhOv_D8pQIPboW3taRX5DnLa1QIGJn0VjO4M,3597
35
+ llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=MllYs6dPVohY5Bm5dOlS7lR_oOB6_o9saR5fKE1_x20,3668
36
+ llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=xRnqYal5lBS76tCJKJHVvI0UsLCM49hTlqN6xYmzPco,5028
37
+ llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py,sha256=kQ3RGyg_9vbe7oYzbl11Dzu-tHPY1z2-SBBSgHHwPfM,143
37
38
  llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
39
  llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=SfDhQXR7L5nCPHS4MIjwgzK_wER7qOUCc8gh-K77kKY,2441
39
40
  llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=UjhzRX7sBa3-Zv1flMJd8bc8uRWMMav4UdJFhE6nVq4,1527
@@ -55,8 +56,8 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
55
56
  llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
56
57
  llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
57
58
  llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
58
- llm_bridge-1.12.6.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
- llm_bridge-1.12.6.dist-info/METADATA,sha256=4izHWE6c6BYkFoMPjsJaTW6SxpqOsjT3ZCFAONCZVtE,3502
60
- llm_bridge-1.12.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- llm_bridge-1.12.6.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
- llm_bridge-1.12.6.dist-info/RECORD,,
59
+ llm_bridge-1.13.0.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
60
+ llm_bridge-1.13.0.dist-info/METADATA,sha256=WEfH36m-VHEcwzIb-h2ixMhi8nxlrccp-84jhhbDKIs,3500
61
+ llm_bridge-1.13.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
+ llm_bridge-1.13.0.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
63
+ llm_bridge-1.13.0.dist-info/RECORD,,