LLM-Bridge 1.8.1__py3-none-any.whl → 1.9.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,9 +20,6 @@ class ClaudeStreamResponseHandler:
20
20
  thought += event.delta.thinking
21
21
  elif event.delta.type == "text_delta":
22
22
  text += event.delta.text
23
- elif event.type == "citation":
24
- citation = event.citation
25
- text += f"([{citation.title}]({citation.url})) "
26
23
 
27
24
  chat_response = ChatResponse(
28
25
  text=text,
@@ -26,10 +26,6 @@ async def process_claude_non_stream_response(
26
26
  thought += content.thinking
27
27
  if content.type == "text":
28
28
  text += content.text
29
- # Unable to test: non-streaming Claude is currently not allowed
30
- if citations := content.citations:
31
- for citation in citations:
32
- text += f"([{citation.title}]({citation.url})) "
33
29
 
34
30
  chat_response = ChatResponse(
35
31
  text=text,
@@ -20,6 +20,8 @@ class GeminiResponseHandler:
20
20
  ) -> ChatResponse:
21
21
  text = ""
22
22
  thought = ""
23
+ code = ""
24
+ code_output = ""
23
25
  display = None
24
26
  image_base64 = None
25
27
  citations = extract_citations(response)
@@ -29,16 +31,23 @@ class GeminiResponseHandler:
29
31
  if candidates := response.candidates:
30
32
  if candidates[0].content.parts:
31
33
  for part in response.candidates[0].content.parts:
32
- # Thought Output
33
- if part.text:
34
+ if part.text is not None:
35
+ # Thought
34
36
  if part.thought:
35
37
  printing_status = PrintingStatus.Thought
36
38
  thought += part.text
39
+ # Text
37
40
  elif not part.thought:
38
41
  printing_status = PrintingStatus.Response
39
42
  text += part.text
40
- # Image Output
41
- elif part.inline_data:
43
+ # Code (Causing Error)
44
+ # if part.executable_code is not None:
45
+ # code += part.executable_code.code
46
+ # Code Output
47
+ if part.code_execution_result is not None:
48
+ code_output += part.code_execution_result.output
49
+ # Image
50
+ if part.inline_data is not None:
42
51
  image_base64 = base64.b64encode(part.inline_data.data).decode('utf-8')
43
52
 
44
53
  # Grounding Sources
@@ -63,6 +72,8 @@ class GeminiResponseHandler:
63
72
  return ChatResponse(
64
73
  text=text,
65
74
  thought=thought,
75
+ code=code,
76
+ code_output=code_output,
66
77
  image=image_base64,
67
78
  display=display,
68
79
  citations=citations,
@@ -80,4 +91,4 @@ def extract_citations(response: types.GenerateContentResponse) -> list[Citation]
80
91
  citation_indices = [index + 1 for index in grounding_support.grounding_chunk_indices]
81
92
  citation_text = grounding_support.segment.text
82
93
  citations.append(Citation(text=citation_text, indices=citation_indices))
83
- return citations
94
+ return citations
@@ -5,6 +5,8 @@ async def count_gemini_tokens(
5
5
  response: types.GenerateContentResponse
6
6
  ) -> tuple[int, int]:
7
7
  usage_metadata = response.usage_metadata
8
+ if usage_metadata is None:
9
+ return 0, 0
8
10
  input_tokens = usage_metadata.prompt_token_count
9
11
  output_tokens = usage_metadata.candidates_token_count
10
12
  if output_tokens is None:
@@ -4,7 +4,6 @@ from typing import AsyncGenerator
4
4
 
5
5
  import httpx
6
6
  from fastapi import HTTPException
7
- from google.genai import types
8
7
 
9
8
  from llm_bridge.client.implementations.gemini.gemini_response_handler import GeminiResponseHandler
10
9
  from llm_bridge.client.model_client.gemini_client import GeminiClient
@@ -39,7 +38,6 @@ class StreamGeminiClient(GeminiClient):
39
38
  response_handler = GeminiResponseHandler()
40
39
  async for response_delta in response:
41
40
  yield await response_handler.process_gemini_response(response_delta)
42
-
43
41
  except Exception as e:
44
42
  logging.exception(e)
45
43
  yield ChatResponse(error=repr(e))
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  import re
3
+ from pprint import pprint
3
4
 
4
5
  import httpx
5
6
  import openai
@@ -22,6 +23,7 @@ def process_openai_responses_non_stream_response(
22
23
  output_list = response.output
23
24
 
24
25
  text = ""
26
+ image = None
25
27
  citations: list[Citation] = []
26
28
 
27
29
  for output in output_list:
@@ -29,15 +31,25 @@ def process_openai_responses_non_stream_response(
29
31
  for content in output.content:
30
32
  if content.type == "output_text":
31
33
  text += content.text
32
- # Citation is currently not working well in OpenAI Responses API
33
- if annotations := content.annotations:
34
- for annotation in annotations:
35
- text = content.text[annotation.start_index:annotation.end_index]
34
+ # Citation is unavailable in OpenAI Responses API
35
+ # if annotations := content.annotations:
36
+ # for annotation in annotations:
37
+ # citations.append(
38
+ # Citation(
39
+ # text=content.text[annotation.start_index:annotation.end_index],
40
+ # url=annotation.url
41
+ # )
42
+ # )
43
+ # Unable to test due to organization verification requirement
44
+ # if output.type == "image_generation_call":
45
+ # image = output.result
36
46
 
37
- chat_response = ChatResponse(text=text, citations=citations)
47
+ chat_response = ChatResponse(text=text, image=image)
38
48
  output_tokens = count_openai_output_tokens(chat_response)
39
49
  return ChatResponse(
40
50
  text=text,
51
+ image=image,
52
+ citations=citations,
41
53
  input_tokens=input_tokens,
42
54
  output_tokens=output_tokens,
43
55
  )
@@ -54,6 +66,7 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
54
66
 
55
67
  response: Response = await self.client.responses.create(
56
68
  model=self.model,
69
+ reasoning=self.reasoning,
57
70
  input=serialize(self.messages),
58
71
  temperature=self.temperature,
59
72
  stream=False,
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  import re
3
+ from pprint import pprint
3
4
  from typing import AsyncGenerator
4
5
 
5
6
  import httpx
@@ -11,16 +12,24 @@ from openai.types.responses import ResponseStreamEvent
11
12
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
12
13
  count_openai_output_tokens
13
14
  from llm_bridge.client.model_client.openai_client import OpenAIClient
14
- from llm_bridge.type.chat_response import ChatResponse
15
+ from llm_bridge.type.chat_response import ChatResponse, Citation
15
16
  from llm_bridge.type.serializer import serialize
16
17
 
17
18
 
18
- def process_delta(event: ResponseStreamEvent) -> str:
19
- if event.type != "response.output_text.delta":
20
- return ""
19
+ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
20
+ text = ""
21
+ citations: list[Citation] = []
21
22
 
22
- content_delta = event.delta
23
- return content_delta
23
+ if event.type == "response.output_text.delta":
24
+ text = event.delta
25
+ # Citation is unavailable in OpenAI Responses API
26
+ if event.type == "response.output_text.annotation.added":
27
+ pass
28
+
29
+ chat_response = ChatResponse(
30
+ text=text,
31
+ )
32
+ return chat_response
24
33
 
25
34
 
26
35
  async def generate_chunk(
@@ -29,11 +38,10 @@ async def generate_chunk(
29
38
  ) -> AsyncGenerator[ChatResponse, None]:
30
39
  try:
31
40
  async for event in stream:
32
- content_delta = process_delta(event)
33
- chat_response = ChatResponse(text=content_delta)
41
+ chat_response = process_delta(event)
34
42
  output_tokens = count_openai_output_tokens(chat_response)
35
43
  yield ChatResponse(
36
- text=content_delta,
44
+ text=chat_response.text,
37
45
  input_tokens=input_tokens,
38
46
  output_tokens=output_tokens,
39
47
  )
@@ -53,6 +61,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
53
61
 
54
62
  stream: AsyncStream[ResponseStreamEvent] = await self.client.responses.create(
55
63
  model=self.model,
64
+ reasoning=self.reasoning,
56
65
  input=serialize(self.messages),
57
66
  temperature=self.temperature,
58
67
  stream=True,
@@ -1,6 +1,7 @@
1
1
  from typing import AsyncGenerator, Iterable
2
2
 
3
3
  import openai.lib.azure
4
+ from openai.types import Reasoning
4
5
  from openai.types.responses import ToolParam
5
6
 
6
7
  from llm_bridge.client.chat_client import ChatClient
@@ -18,6 +19,7 @@ class OpenAIClient(ChatClient):
18
19
  api_type: str,
19
20
  client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
20
21
  tools: Iterable[ToolParam],
22
+ reasoning: Reasoning,
21
23
  ):
22
24
  self.model = model
23
25
  self.messages = messages
@@ -25,6 +27,7 @@ class OpenAIClient(ChatClient):
25
27
  self.api_type = api_type
26
28
  self.client = client
27
29
  self.tools = tools
30
+ self.reasoning = reasoning
28
31
 
29
32
  async def generate_non_stream_response(self) -> ChatResponse:
30
33
  raise NotImplementedError
@@ -1,5 +1,6 @@
1
1
  import anthropic
2
- from anthropic.types import ThinkingConfigEnabledParam
2
+ from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
3
+ from anthropic.types.beta import BetaCodeExecutionTool20250825Param, BetaWebSearchTool20250305Param
3
4
 
4
5
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
5
6
  from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
@@ -39,11 +40,18 @@ async def create_claude_client(
39
40
  budget_tokens=16000
40
41
  )
41
42
  temperature = 1
42
- betas = ["output-128k-2025-02-19"]
43
- tools = [{
44
- "type": "web_search_20250305",
45
- "name": "web_search",
46
- }]
43
+ betas: list[AnthropicBetaParam] = ["output-128k-2025-02-19", "code-execution-2025-08-25"]
44
+ tools = [
45
+ BetaWebSearchTool20250305Param(
46
+ type="web_search_20250305",
47
+ name="web_search",
48
+ ),
49
+ # Code Execution is unavailable in Claude
50
+ # BetaCodeExecutionTool20250825Param(
51
+ # type="code_execution_20250825",
52
+ # name="code_execution",
53
+ # )
54
+ ]
47
55
 
48
56
  if stream:
49
57
  return StreamClaudeClient(
@@ -38,6 +38,11 @@ async def create_gemini_client(
38
38
  url_context=types.UrlContext()
39
39
  )
40
40
  )
41
+ tools.append(
42
+ types.Tool(
43
+ code_execution=types.ToolCodeExecution()
44
+ )
45
+ )
41
46
  if "image" not in model:
42
47
  thinking_config = types.ThinkingConfig(include_thoughts=True)
43
48
  if "image" in model:
@@ -2,7 +2,10 @@ import re
2
2
 
3
3
  import openai
4
4
  from fastapi import HTTPException
5
+ from openai.types import Reasoning
5
6
  from openai.types.responses import WebSearchToolParam
7
+ from openai.types.responses.tool_param import CodeInterpreter, CodeInterpreterContainerCodeInterpreterToolAuto, \
8
+ ImageGeneration
6
9
 
7
10
  from llm_bridge.client.implementations.openai.non_stream_openai_client import NonStreamOpenAIClient
8
11
  from llm_bridge.client.implementations.openai.non_stream_openai_responses_client import NonStreamOpenAIResponsesClient
@@ -54,20 +57,31 @@ async def create_openai_client(
54
57
  else:
55
58
  openai_messages = await convert_messages_to_openai(messages)
56
59
 
57
- tools = [
58
- WebSearchToolParam(
59
- type="web_search_preview",
60
- search_context_size="high",
61
- )
62
- ]
60
+ tools = []
61
+ reasoning = None
63
62
 
64
- if re.match(r"^o\d", model):
65
- tools = None
66
- temperature = 1
67
- if re.match(r"gpt-5.*", model):
63
+ if model != "gpt-5-chat-latest":
64
+ tools.append(
65
+ WebSearchToolParam(
66
+ type="web_search",
67
+ search_context_size="high",
68
+ )
69
+ )
70
+ tools.append(
71
+ CodeInterpreter(
72
+ type="code_interpreter",
73
+ container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
+ )
75
+ )
76
+ if re.match(r"^o\d", model) or (re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest"):
68
77
  temperature = 1
69
- if model == "gpt-5-chat-latest":
70
- tools = None
78
+ if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
79
+ reasoning = Reasoning(effort="high")
80
+ tools.append(
81
+ ImageGeneration(
82
+ type="image_generation",
83
+ )
84
+ )
71
85
 
72
86
  if use_responses_api:
73
87
  if stream:
@@ -78,6 +92,7 @@ async def create_openai_client(
78
92
  api_type=api_type,
79
93
  client=client,
80
94
  tools=tools,
95
+ reasoning=reasoning,
81
96
  )
82
97
  else:
83
98
  return NonStreamOpenAIResponsesClient(
@@ -87,6 +102,7 @@ async def create_openai_client(
87
102
  api_type=api_type,
88
103
  client=client,
89
104
  tools=tools,
105
+ reasoning=reasoning,
90
106
  )
91
107
  else:
92
108
  if stream:
@@ -97,6 +113,7 @@ async def create_openai_client(
97
113
  api_type=api_type,
98
114
  client=client,
99
115
  tools=tools,
116
+ reasoning=reasoning,
100
117
  )
101
118
  else:
102
119
  return NonStreamOpenAIClient(
@@ -106,4 +123,5 @@ async def create_openai_client(
106
123
  api_type=api_type,
107
124
  client=client,
108
125
  tools=tools,
126
+ reasoning=reasoning,
109
127
  )
@@ -8,10 +8,20 @@ class Citation:
8
8
  indices: list[int]
9
9
 
10
10
 
11
+ # TODO: adapt to different Citation formats
12
+ # @dataclass
13
+ # class Citation:
14
+ # text: str
15
+ # indices: Optional[list[int]] = None
16
+ # url: Optional[str] = None
17
+
18
+
11
19
  @dataclass
12
20
  class ChatResponse:
13
21
  text: Optional[str] = None
14
22
  thought: Optional[str] = None
23
+ code: Optional[str] = None
24
+ code_output: Optional[str] = None
15
25
  image: Optional[str] = None
16
26
  display: Optional[str] = None
17
27
  citations: Optional[list[Citation]] = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.8.1
3
+ Version: 1.9.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -49,14 +49,14 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
49
49
 
50
50
  ### Model Features
51
51
 
52
- The features listed represent the maximum capabilities of each API type, not necessarily those of every individual model.
52
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
53
53
 
54
- | Model Type | Input Format | Capabilities | Output Format |
55
- |------------|--------------------------------|----------------------|---------------|
56
- | OpenAI | Text, Image | Thinking, Web Search | Text |
57
- | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search | Text, Image |
58
- | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
- | Grok | Text, Image | | Text |
54
+ | API Type | Input Format | Capabilities | Output Format |
55
+ |----------|--------------------------------|------------------------------------------------------------|---------------|
56
+ | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
57
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking + Thought, Web Search + Citations, Code Execution | Text, Image |
58
+ | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
+ | Grok | Text, Image | | Text |
60
60
 
61
61
  ## Installation
62
62
 
@@ -4,25 +4,25 @@ llm_bridge/client/chat_client.py,sha256=XISF2BM-WkZJNbnvcLfMcbSzlrE0XMDulyE_VG9z
4
4
  llm_bridge/client/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62HeXTThDx1L6Zbs_IT0HPLPspI,102
6
6
  llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=alBobZZLaCUSf3O09IrwT8NSrpgQJVS893ViPRK-khI,1328
7
+ llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=qNy0w3ObKopYp3NBWaz25tGm_bEO9RnEk6qi94W1WIs,1190
8
8
  llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=g8M7BFY2zM0jrLFyfGPW-4KYetib3x098XKvEdbZh30,1182
9
- llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=Zv0QmpIpEL0rXvy9f1EB_WC4YkhtUBc7IadsTZlWIec,2735
9
+ llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=xnge1J-j_Er4K4L1UxhjuxAs_Pl6vralxTKk9yItwjI,2500
10
10
  llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=q4w1UYc1yZJw5UFOtnxCoeg8MFp5soc1d57YiCTCCGE,2109
11
11
  llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=qIEc4vC0vUfwkOgDjzq6sewQChpKuuayud1svvRU6Os,3510
13
- llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=7vWdqv00enbQ1C4z2e-b3Hw2VxgScaf2mNnAEgjruoQ,453
12
+ llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=2fnvtNecRlkwxwht93k93r-0uY1Zz86ffGbRAFNnyjk,3989
13
+ llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=M_mlrtu_dZTgEG9JgRaPDVyXqFtHSSVAIhsknhOaVrs,504
14
14
  llm_bridge/client/implementations/gemini/non_stream_gemini_client.py,sha256=JGNNpeln42SoXg2vGIC9xG5GGlBh6dIhz4BzYIkgraA,1302
15
- llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=GiLCZkl9mYtTP0RNqhFkYIuGT5UBJKw9ycWH5q49SHU,1577
15
+ llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=vqPhQdr-jaHXzn-_1PSZfpo96zM-_89XOEXIx7UBBIw,1545
16
16
  llm_bridge/client/implementations/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  llm_bridge/client/implementations/openai/non_stream_openai_client.py,sha256=aceJm6FF6VdzVRECzJyTY8-aQjCekhhbrMPEcUN24fo,2171
18
- llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=w4vT7kgJ5NpIWLimHPqvHYrcdzUXQfchuTGw3DYlnvM,3053
18
+ llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=xUQqeDTwsf61IV5V0MFN0C-q1KGK5vlNJFetoWehfEk,3534
19
19
  llm_bridge/client/implementations/openai/openai_token_couter.py,sha256=pWsuaUjoqXjnptVlRma-dItczEo9DMw2o_9uF7FPVAk,1449
20
- llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=jFVGJ2lg61wdgEKGLZR-9xb0ZeJtWz3PZ61T0eccsTU,2931
20
+ llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=SbfRNE03JacBCD2_u0pGt4JSXqGchDLAxbKvP1KIVhU,3177
21
21
  llm_bridge/client/implementations/openai/stream_openai_client.py,sha256=Izq4xH9EuLjUCBJsuSr6U4Kj6FN5c7w_oHf9wmQatXE,2988
22
22
  llm_bridge/client/model_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  llm_bridge/client/model_client/claude_client.py,sha256=cuYORseQY8HVt-COh2J0C_mhqPehDB3A4G4vrunoSFA,1352
24
24
  llm_bridge/client/model_client/gemini_client.py,sha256=4dcueIbpLFqkT98WxmeVmW9Vbq7Z5jbYbifAem-NL1E,906
25
- llm_bridge/client/model_client/openai_client.py,sha256=vGuqveYlCI8JKOsUjN3Ytp9xil7OqHi6mXA9mFuuEsA,1135
25
+ llm_bridge/client/model_client/openai_client.py,sha256=92nSIrlAhT0u6m8MvT31-VSqrtDUekkRwV3JpTd_WKE,1239
26
26
  llm_bridge/logic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  llm_bridge/logic/file_fetch.py,sha256=Q8PGNj76E25sKD70TmlnSIdPgAxpNlb89syk87DbAGg,1341
28
28
  llm_bridge/logic/model_prices.py,sha256=hiXVbki3004Rrm5LQrmVfdm0lLABeygxtFB-Qn9_mm0,1219
@@ -31,9 +31,9 @@ llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=huZO5pqRNFDUK9KpCk3
31
31
  llm_bridge/logic/chat_generate/chat_message_converter.py,sha256=40VTBOPXg_ocrEZMdt1ObYlm-mhRL35zWzzxv8m2xRc,1538
32
32
  llm_bridge/logic/chat_generate/media_processor.py,sha256=ZR8G24EHwZZr2T9iFDRmScDGyJ_kvThApABzSzK0CL0,702
33
33
  llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=_iX36A4WJx2-QeX4oAurOaRuCjvm8UDhaKL8lO9iP9o,2186
35
- llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=rEPUTLtUE94vU6usVY4gQHE0o3YhEQ4mAejTM8bGiMo,3125
36
- llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=KG5QF4D6ouPssD7g2512CW1M8Iv_xA2gmjxh8FQ8z84,3531
34
+ llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=j8RwLDul_zdZIIZfzrJji3VmqnYVAV61Xjrbp4NC69k,2603
35
+ llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=ORiyVkLytTN4nyvdzH6P7BCbpj07CHVNFWuNuL0d6UQ,3252
36
+ llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=5DLyWhl5Ab5cRA6awTdAlBTmzmy65iiXKxysaDc2XzU,4301
37
37
  llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=SfDhQXR7L5nCPHS4MIjwgzK_wER7qOUCc8gh-K77kKY,2441
39
39
  llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=UjhzRX7sBa3-Zv1flMJd8bc8uRWMMav4UdJFhE6nVq4,1527
@@ -47,7 +47,7 @@ llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=ERws57Dsu-f5L
47
47
  llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  llm_bridge/resources/model_prices.json,sha256=_2ZXKjnMDa6YSKfnWEPR_vUtuMw3cEi1d2L3IZ2kVxs,2707
49
49
  llm_bridge/type/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
- llm_bridge/type/chat_response.py,sha256=pIiSVjW9SLVnV-3xaGjqZMfRVd0hP1Rz2dbq1m_mCWQ,444
50
+ llm_bridge/type/chat_response.py,sha256=zggw5UGhTjyCCYqQjnp8l9rtjOe1LuJtsmeYLluj8Cc,680
51
51
  llm_bridge/type/message.py,sha256=NyWmSSrciFfvF81aBwAH8qFpo5IpRhh8QXMselbYen8,370
52
52
  llm_bridge/type/serializer.py,sha256=moCL9y_HTO2CFg2w_jc5MljDxKgHiCo_qiz-o4l2jYU,515
53
53
  llm_bridge/type/model_message/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -55,8 +55,8 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
55
55
  llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
56
56
  llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
57
57
  llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
58
- llm_bridge-1.8.1.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
- llm_bridge-1.8.1.dist-info/METADATA,sha256=zESjjaVZwkSL-M32zyF4gzfwoBPvBXa36lKZKGYSuqs,7504
60
- llm_bridge-1.8.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- llm_bridge-1.8.1.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
- llm_bridge-1.8.1.dist-info/RECORD,,
58
+ llm_bridge-1.9.0a0.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
+ llm_bridge-1.9.0a0.dist-info/METADATA,sha256=J2G_cF3GpKT83q7MTyUYkA-2Fi2Bgq1iKh0zc43Ydaw,7697
60
+ llm_bridge-1.9.0a0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
+ llm_bridge-1.9.0a0.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
+ llm_bridge-1.9.0a0.dist-info/RECORD,,