LLM-Bridge 1.8.1__py3-none-any.whl → 1.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -3
- llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -4
- llm_bridge/client/implementations/gemini/gemini_response_handler.py +23 -12
- llm_bridge/client/implementations/gemini/gemini_token_counter.py +2 -0
- llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -2
- llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +20 -6
- llm_bridge/client/implementations/openai/steam_openai_responses_client.py +27 -10
- llm_bridge/client/model_client/openai_client.py +3 -0
- llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +14 -6
- llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +5 -0
- llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +30 -12
- llm_bridge/type/chat_response.py +10 -0
- {llm_bridge-1.8.1.dist-info → llm_bridge-1.9.0.dist-info}/METADATA +16 -10
- {llm_bridge-1.8.1.dist-info → llm_bridge-1.9.0.dist-info}/RECORD +17 -17
- {llm_bridge-1.8.1.dist-info → llm_bridge-1.9.0.dist-info}/WHEEL +0 -0
- {llm_bridge-1.8.1.dist-info → llm_bridge-1.9.0.dist-info}/licenses/LICENSE +0 -0
- {llm_bridge-1.8.1.dist-info → llm_bridge-1.9.0.dist-info}/top_level.txt +0 -0
|
@@ -20,9 +20,6 @@ class ClaudeStreamResponseHandler:
|
|
|
20
20
|
thought += event.delta.thinking
|
|
21
21
|
elif event.delta.type == "text_delta":
|
|
22
22
|
text += event.delta.text
|
|
23
|
-
elif event.type == "citation":
|
|
24
|
-
citation = event.citation
|
|
25
|
-
text += f"([{citation.title}]({citation.url})) "
|
|
26
23
|
|
|
27
24
|
chat_response = ChatResponse(
|
|
28
25
|
text=text,
|
|
@@ -26,10 +26,6 @@ async def process_claude_non_stream_response(
|
|
|
26
26
|
thought += content.thinking
|
|
27
27
|
if content.type == "text":
|
|
28
28
|
text += content.text
|
|
29
|
-
# Unable to test: non-streaming Claude is currently not allowed
|
|
30
|
-
if citations := content.citations:
|
|
31
|
-
for citation in citations:
|
|
32
|
-
text += f"([{citation.title}]({citation.url})) "
|
|
33
29
|
|
|
34
30
|
chat_response = ChatResponse(
|
|
35
31
|
text=text,
|
|
@@ -18,28 +18,37 @@ class GeminiResponseHandler:
|
|
|
18
18
|
self,
|
|
19
19
|
response: types.GenerateContentResponse,
|
|
20
20
|
) -> ChatResponse:
|
|
21
|
-
text = ""
|
|
22
|
-
thought = ""
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
21
|
+
text: str = ""
|
|
22
|
+
thought: str = ""
|
|
23
|
+
code: str = ""
|
|
24
|
+
code_output: str = ""
|
|
25
|
+
image: Optional[str] = None
|
|
26
|
+
display: Optional[str] = None
|
|
27
|
+
citations: list[Citation] = extract_citations(response)
|
|
26
28
|
input_tokens, stage_output_tokens = await count_gemini_tokens(response)
|
|
27
29
|
|
|
28
30
|
printing_status = None
|
|
29
31
|
if candidates := response.candidates:
|
|
30
32
|
if candidates[0].content.parts:
|
|
31
33
|
for part in response.candidates[0].content.parts:
|
|
32
|
-
|
|
33
|
-
|
|
34
|
+
if part.text is not None:
|
|
35
|
+
# Thought
|
|
34
36
|
if part.thought:
|
|
35
37
|
printing_status = PrintingStatus.Thought
|
|
36
38
|
thought += part.text
|
|
39
|
+
# Text
|
|
37
40
|
elif not part.thought:
|
|
38
41
|
printing_status = PrintingStatus.Response
|
|
39
42
|
text += part.text
|
|
40
|
-
#
|
|
41
|
-
|
|
42
|
-
|
|
43
|
+
# Code (Causing Error)
|
|
44
|
+
# if part.executable_code is not None:
|
|
45
|
+
# code += part.executable_code.code
|
|
46
|
+
# Code Output
|
|
47
|
+
if part.code_execution_result is not None:
|
|
48
|
+
code_output += part.code_execution_result.output
|
|
49
|
+
# Image
|
|
50
|
+
if part.inline_data is not None:
|
|
51
|
+
image = base64.b64encode(part.inline_data.data).decode('utf-8')
|
|
43
52
|
|
|
44
53
|
# Grounding Sources
|
|
45
54
|
if candidates := response.candidates:
|
|
@@ -63,7 +72,9 @@ class GeminiResponseHandler:
|
|
|
63
72
|
return ChatResponse(
|
|
64
73
|
text=text,
|
|
65
74
|
thought=thought,
|
|
66
|
-
|
|
75
|
+
code=code,
|
|
76
|
+
code_output=code_output,
|
|
77
|
+
image=image,
|
|
67
78
|
display=display,
|
|
68
79
|
citations=citations,
|
|
69
80
|
input_tokens=input_tokens,
|
|
@@ -80,4 +91,4 @@ def extract_citations(response: types.GenerateContentResponse) -> list[Citation]
|
|
|
80
91
|
citation_indices = [index + 1 for index in grounding_support.grounding_chunk_indices]
|
|
81
92
|
citation_text = grounding_support.segment.text
|
|
82
93
|
citations.append(Citation(text=citation_text, indices=citation_indices))
|
|
83
|
-
return citations
|
|
94
|
+
return citations
|
|
@@ -5,6 +5,8 @@ async def count_gemini_tokens(
|
|
|
5
5
|
response: types.GenerateContentResponse
|
|
6
6
|
) -> tuple[int, int]:
|
|
7
7
|
usage_metadata = response.usage_metadata
|
|
8
|
+
if usage_metadata is None:
|
|
9
|
+
return 0, 0
|
|
8
10
|
input_tokens = usage_metadata.prompt_token_count
|
|
9
11
|
output_tokens = usage_metadata.candidates_token_count
|
|
10
12
|
if output_tokens is None:
|
|
@@ -4,7 +4,6 @@ from typing import AsyncGenerator
|
|
|
4
4
|
|
|
5
5
|
import httpx
|
|
6
6
|
from fastapi import HTTPException
|
|
7
|
-
from google.genai import types
|
|
8
7
|
|
|
9
8
|
from llm_bridge.client.implementations.gemini.gemini_response_handler import GeminiResponseHandler
|
|
10
9
|
from llm_bridge.client.model_client.gemini_client import GeminiClient
|
|
@@ -39,7 +38,6 @@ class StreamGeminiClient(GeminiClient):
|
|
|
39
38
|
response_handler = GeminiResponseHandler()
|
|
40
39
|
async for response_delta in response:
|
|
41
40
|
yield await response_handler.process_gemini_response(response_delta)
|
|
42
|
-
|
|
43
41
|
except Exception as e:
|
|
44
42
|
logging.exception(e)
|
|
45
43
|
yield ChatResponse(error=repr(e))
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import re
|
|
3
|
+
from pprint import pprint
|
|
4
|
+
from typing import Optional
|
|
3
5
|
|
|
4
6
|
import httpx
|
|
5
7
|
import openai
|
|
@@ -21,7 +23,8 @@ def process_openai_responses_non_stream_response(
|
|
|
21
23
|
|
|
22
24
|
output_list = response.output
|
|
23
25
|
|
|
24
|
-
text = ""
|
|
26
|
+
text: str = ""
|
|
27
|
+
image: Optional[str] = None
|
|
25
28
|
citations: list[Citation] = []
|
|
26
29
|
|
|
27
30
|
for output in output_list:
|
|
@@ -29,15 +32,25 @@ def process_openai_responses_non_stream_response(
|
|
|
29
32
|
for content in output.content:
|
|
30
33
|
if content.type == "output_text":
|
|
31
34
|
text += content.text
|
|
32
|
-
# Citation is
|
|
33
|
-
if annotations := content.annotations:
|
|
34
|
-
|
|
35
|
-
|
|
35
|
+
# Citation is unavailable in OpenAI Responses API
|
|
36
|
+
# if annotations := content.annotations:
|
|
37
|
+
# for annotation in annotations:
|
|
38
|
+
# citations.append(
|
|
39
|
+
# Citation(
|
|
40
|
+
# text=content.text[annotation.start_index:annotation.end_index],
|
|
41
|
+
# url=annotation.url
|
|
42
|
+
# )
|
|
43
|
+
# )
|
|
44
|
+
# Image Generation untestable due to organization verification requirement
|
|
45
|
+
# if output.type == "image_generation_call":
|
|
46
|
+
# image = output.result
|
|
36
47
|
|
|
37
|
-
chat_response = ChatResponse(text=text,
|
|
48
|
+
chat_response = ChatResponse(text=text, image=image)
|
|
38
49
|
output_tokens = count_openai_output_tokens(chat_response)
|
|
39
50
|
return ChatResponse(
|
|
40
51
|
text=text,
|
|
52
|
+
image=image,
|
|
53
|
+
citations=citations,
|
|
41
54
|
input_tokens=input_tokens,
|
|
42
55
|
output_tokens=output_tokens,
|
|
43
56
|
)
|
|
@@ -54,6 +67,7 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
|
|
|
54
67
|
|
|
55
68
|
response: Response = await self.client.responses.create(
|
|
56
69
|
model=self.model,
|
|
70
|
+
reasoning=self.reasoning,
|
|
57
71
|
input=serialize(self.messages),
|
|
58
72
|
temperature=self.temperature,
|
|
59
73
|
stream=False,
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import re
|
|
3
|
-
from
|
|
3
|
+
from pprint import pprint
|
|
4
|
+
from typing import AsyncGenerator, Optional
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
import openai
|
|
@@ -11,16 +12,30 @@ from openai.types.responses import ResponseStreamEvent
|
|
|
11
12
|
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
|
|
12
13
|
count_openai_output_tokens
|
|
13
14
|
from llm_bridge.client.model_client.openai_client import OpenAIClient
|
|
14
|
-
from llm_bridge.type.chat_response import ChatResponse
|
|
15
|
+
from llm_bridge.type.chat_response import ChatResponse, Citation
|
|
15
16
|
from llm_bridge.type.serializer import serialize
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
def process_delta(event: ResponseStreamEvent) ->
|
|
19
|
-
|
|
20
|
-
|
|
19
|
+
def process_delta(event: ResponseStreamEvent) -> ChatResponse:
|
|
20
|
+
text: str = ""
|
|
21
|
+
image: Optional[str] = None
|
|
22
|
+
citations: list[Citation] = []
|
|
21
23
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
+
if event.type == "response.output_text.delta":
|
|
25
|
+
text = event.delta
|
|
26
|
+
# Citation is unavailable in OpenAI Responses API
|
|
27
|
+
if event.type == "response.output_text.annotation.added":
|
|
28
|
+
pass
|
|
29
|
+
# Image Generation untestable due to organization verification requirement
|
|
30
|
+
# if event.type == "response.image_generation_call.partial_image":
|
|
31
|
+
# image = event.partial_image_b64
|
|
32
|
+
|
|
33
|
+
chat_response = ChatResponse(
|
|
34
|
+
text=text,
|
|
35
|
+
image=image,
|
|
36
|
+
citations=citations,
|
|
37
|
+
)
|
|
38
|
+
return chat_response
|
|
24
39
|
|
|
25
40
|
|
|
26
41
|
async def generate_chunk(
|
|
@@ -29,11 +44,12 @@ async def generate_chunk(
|
|
|
29
44
|
) -> AsyncGenerator[ChatResponse, None]:
|
|
30
45
|
try:
|
|
31
46
|
async for event in stream:
|
|
32
|
-
|
|
33
|
-
chat_response = ChatResponse(text=content_delta)
|
|
47
|
+
chat_response = process_delta(event)
|
|
34
48
|
output_tokens = count_openai_output_tokens(chat_response)
|
|
35
49
|
yield ChatResponse(
|
|
36
|
-
text=
|
|
50
|
+
text=chat_response.text,
|
|
51
|
+
image=chat_response.image,
|
|
52
|
+
citations=chat_response.citations,
|
|
37
53
|
input_tokens=input_tokens,
|
|
38
54
|
output_tokens=output_tokens,
|
|
39
55
|
)
|
|
@@ -53,6 +69,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
|
|
|
53
69
|
|
|
54
70
|
stream: AsyncStream[ResponseStreamEvent] = await self.client.responses.create(
|
|
55
71
|
model=self.model,
|
|
72
|
+
reasoning=self.reasoning,
|
|
56
73
|
input=serialize(self.messages),
|
|
57
74
|
temperature=self.temperature,
|
|
58
75
|
stream=True,
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import AsyncGenerator, Iterable
|
|
2
2
|
|
|
3
3
|
import openai.lib.azure
|
|
4
|
+
from openai.types import Reasoning
|
|
4
5
|
from openai.types.responses import ToolParam
|
|
5
6
|
|
|
6
7
|
from llm_bridge.client.chat_client import ChatClient
|
|
@@ -18,6 +19,7 @@ class OpenAIClient(ChatClient):
|
|
|
18
19
|
api_type: str,
|
|
19
20
|
client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
|
|
20
21
|
tools: Iterable[ToolParam],
|
|
22
|
+
reasoning: Reasoning,
|
|
21
23
|
):
|
|
22
24
|
self.model = model
|
|
23
25
|
self.messages = messages
|
|
@@ -25,6 +27,7 @@ class OpenAIClient(ChatClient):
|
|
|
25
27
|
self.api_type = api_type
|
|
26
28
|
self.client = client
|
|
27
29
|
self.tools = tools
|
|
30
|
+
self.reasoning = reasoning
|
|
28
31
|
|
|
29
32
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
30
33
|
raise NotImplementedError
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import anthropic
|
|
2
|
-
from anthropic.types import ThinkingConfigEnabledParam
|
|
2
|
+
from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
|
|
3
|
+
from anthropic.types.beta import BetaCodeExecutionTool20250825Param, BetaWebSearchTool20250305Param
|
|
3
4
|
|
|
4
5
|
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
|
|
5
6
|
from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
|
|
@@ -39,11 +40,18 @@ async def create_claude_client(
|
|
|
39
40
|
budget_tokens=16000
|
|
40
41
|
)
|
|
41
42
|
temperature = 1
|
|
42
|
-
betas = ["output-128k-2025-02-19"]
|
|
43
|
-
tools = [
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
43
|
+
betas: list[AnthropicBetaParam] = ["output-128k-2025-02-19", "code-execution-2025-08-25"]
|
|
44
|
+
tools = [
|
|
45
|
+
BetaWebSearchTool20250305Param(
|
|
46
|
+
type="web_search_20250305",
|
|
47
|
+
name="web_search",
|
|
48
|
+
),
|
|
49
|
+
# Code Execution is unavailable in Claude
|
|
50
|
+
# BetaCodeExecutionTool20250825Param(
|
|
51
|
+
# type="code_execution_20250825",
|
|
52
|
+
# name="code_execution",
|
|
53
|
+
# )
|
|
54
|
+
]
|
|
47
55
|
|
|
48
56
|
if stream:
|
|
49
57
|
return StreamClaudeClient(
|
|
@@ -38,6 +38,11 @@ async def create_gemini_client(
|
|
|
38
38
|
url_context=types.UrlContext()
|
|
39
39
|
)
|
|
40
40
|
)
|
|
41
|
+
tools.append(
|
|
42
|
+
types.Tool(
|
|
43
|
+
code_execution=types.ToolCodeExecution()
|
|
44
|
+
)
|
|
45
|
+
)
|
|
41
46
|
if "image" not in model:
|
|
42
47
|
thinking_config = types.ThinkingConfig(include_thoughts=True)
|
|
43
48
|
if "image" in model:
|
|
@@ -2,7 +2,10 @@ import re
|
|
|
2
2
|
|
|
3
3
|
import openai
|
|
4
4
|
from fastapi import HTTPException
|
|
5
|
+
from openai.types import Reasoning
|
|
5
6
|
from openai.types.responses import WebSearchToolParam
|
|
7
|
+
from openai.types.responses.tool_param import CodeInterpreter, CodeInterpreterContainerCodeInterpreterToolAuto, \
|
|
8
|
+
ImageGeneration
|
|
6
9
|
|
|
7
10
|
from llm_bridge.client.implementations.openai.non_stream_openai_client import NonStreamOpenAIClient
|
|
8
11
|
from llm_bridge.client.implementations.openai.non_stream_openai_responses_client import NonStreamOpenAIResponsesClient
|
|
@@ -54,20 +57,31 @@ async def create_openai_client(
|
|
|
54
57
|
else:
|
|
55
58
|
openai_messages = await convert_messages_to_openai(messages)
|
|
56
59
|
|
|
57
|
-
tools = [
|
|
58
|
-
|
|
59
|
-
type="web_search_preview",
|
|
60
|
-
search_context_size="high",
|
|
61
|
-
)
|
|
62
|
-
]
|
|
60
|
+
tools = []
|
|
61
|
+
reasoning = None
|
|
63
62
|
|
|
64
|
-
if
|
|
65
|
-
tools
|
|
66
|
-
|
|
67
|
-
|
|
63
|
+
if model != "gpt-5-chat-latest":
|
|
64
|
+
tools.append(
|
|
65
|
+
WebSearchToolParam(
|
|
66
|
+
type="web_search",
|
|
67
|
+
search_context_size="high",
|
|
68
|
+
)
|
|
69
|
+
)
|
|
70
|
+
tools.append(
|
|
71
|
+
CodeInterpreter(
|
|
72
|
+
type="code_interpreter",
|
|
73
|
+
container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
|
|
74
|
+
)
|
|
75
|
+
)
|
|
76
|
+
if re.match(r"^o\d", model) or (re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest"):
|
|
68
77
|
temperature = 1
|
|
69
|
-
if model
|
|
70
|
-
|
|
78
|
+
if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
|
|
79
|
+
reasoning = Reasoning(effort="high")
|
|
80
|
+
tools.append(
|
|
81
|
+
ImageGeneration(
|
|
82
|
+
type="image_generation",
|
|
83
|
+
)
|
|
84
|
+
)
|
|
71
85
|
|
|
72
86
|
if use_responses_api:
|
|
73
87
|
if stream:
|
|
@@ -78,6 +92,7 @@ async def create_openai_client(
|
|
|
78
92
|
api_type=api_type,
|
|
79
93
|
client=client,
|
|
80
94
|
tools=tools,
|
|
95
|
+
reasoning=reasoning,
|
|
81
96
|
)
|
|
82
97
|
else:
|
|
83
98
|
return NonStreamOpenAIResponsesClient(
|
|
@@ -87,6 +102,7 @@ async def create_openai_client(
|
|
|
87
102
|
api_type=api_type,
|
|
88
103
|
client=client,
|
|
89
104
|
tools=tools,
|
|
105
|
+
reasoning=reasoning,
|
|
90
106
|
)
|
|
91
107
|
else:
|
|
92
108
|
if stream:
|
|
@@ -97,6 +113,7 @@ async def create_openai_client(
|
|
|
97
113
|
api_type=api_type,
|
|
98
114
|
client=client,
|
|
99
115
|
tools=tools,
|
|
116
|
+
reasoning=reasoning,
|
|
100
117
|
)
|
|
101
118
|
else:
|
|
102
119
|
return NonStreamOpenAIClient(
|
|
@@ -106,4 +123,5 @@ async def create_openai_client(
|
|
|
106
123
|
api_type=api_type,
|
|
107
124
|
client=client,
|
|
108
125
|
tools=tools,
|
|
126
|
+
reasoning=reasoning,
|
|
109
127
|
)
|
llm_bridge/type/chat_response.py
CHANGED
|
@@ -8,10 +8,20 @@ class Citation:
|
|
|
8
8
|
indices: list[int]
|
|
9
9
|
|
|
10
10
|
|
|
11
|
+
# TODO: adapt to different Citation formats
|
|
12
|
+
# @dataclass
|
|
13
|
+
# class Citation:
|
|
14
|
+
# text: str
|
|
15
|
+
# indices: Optional[list[int]] = None
|
|
16
|
+
# url: Optional[str] = None
|
|
17
|
+
|
|
18
|
+
|
|
11
19
|
@dataclass
|
|
12
20
|
class ChatResponse:
|
|
13
21
|
text: Optional[str] = None
|
|
14
22
|
thought: Optional[str] = None
|
|
23
|
+
code: Optional[str] = None
|
|
24
|
+
code_output: Optional[str] = None
|
|
15
25
|
image: Optional[str] = None
|
|
16
26
|
display: Optional[str] = None
|
|
17
27
|
citations: Optional[list[Citation]] = None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.9.0
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -13,10 +13,10 @@ License-File: LICENSE
|
|
|
13
13
|
Requires-Dist: fastapi
|
|
14
14
|
Requires-Dist: httpx
|
|
15
15
|
Requires-Dist: tenacity
|
|
16
|
-
Requires-Dist: openai==1.
|
|
16
|
+
Requires-Dist: openai==1.106.1
|
|
17
17
|
Requires-Dist: tiktoken==0.11.0
|
|
18
18
|
Requires-Dist: google-genai==1.28.0
|
|
19
|
-
Requires-Dist: anthropic==0.
|
|
19
|
+
Requires-Dist: anthropic==0.66.0
|
|
20
20
|
Requires-Dist: PyMuPDF
|
|
21
21
|
Requires-Dist: docxlatex>=1.1.1
|
|
22
22
|
Requires-Dist: openpyxl
|
|
@@ -49,14 +49,20 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
|
|
|
49
49
|
|
|
50
50
|
### Model Features
|
|
51
51
|
|
|
52
|
-
The features listed represent the maximum capabilities of each API type
|
|
52
|
+
The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
|
|
53
53
|
|
|
54
|
-
|
|
|
55
|
-
|
|
56
|
-
| OpenAI
|
|
57
|
-
| Gemini
|
|
58
|
-
| Claude
|
|
59
|
-
| Grok
|
|
54
|
+
| API Type | Input Format | Capabilities | Output Format |
|
|
55
|
+
|----------|--------------------------------|------------------------------------------------------------|---------------|
|
|
56
|
+
| OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
|
|
57
|
+
| Gemini | Text, Image, Video, Audio, PDF | Thinking + Thought, Web Search + Citations, Code Execution | Text, Image |
|
|
58
|
+
| Claude | Text, Image, PDF | Thinking, Web Search | Text |
|
|
59
|
+
| Grok | Text, Image | | Text |
|
|
60
|
+
|
|
61
|
+
#### Planned Features
|
|
62
|
+
|
|
63
|
+
- OpenAI: Web Search: Citations, Image Output
|
|
64
|
+
- Gemini: Code Execution: Code, Code Output
|
|
65
|
+
- Claude: Code Execution, File Output
|
|
60
66
|
|
|
61
67
|
## Installation
|
|
62
68
|
|
|
@@ -4,25 +4,25 @@ llm_bridge/client/chat_client.py,sha256=XISF2BM-WkZJNbnvcLfMcbSzlrE0XMDulyE_VG9z
|
|
|
4
4
|
llm_bridge/client/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62HeXTThDx1L6Zbs_IT0HPLPspI,102
|
|
6
6
|
llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=
|
|
7
|
+
llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=qNy0w3ObKopYp3NBWaz25tGm_bEO9RnEk6qi94W1WIs,1190
|
|
8
8
|
llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=g8M7BFY2zM0jrLFyfGPW-4KYetib3x098XKvEdbZh30,1182
|
|
9
|
-
llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=
|
|
9
|
+
llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=xnge1J-j_Er4K4L1UxhjuxAs_Pl6vralxTKk9yItwjI,2500
|
|
10
10
|
llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=q4w1UYc1yZJw5UFOtnxCoeg8MFp5soc1d57YiCTCCGE,2109
|
|
11
11
|
llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
-
llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=
|
|
13
|
-
llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=
|
|
12
|
+
llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=eqXpIx1xJK5VZtuUlye5kIVjLr1YWBU9koq2HEEUX9s,4034
|
|
13
|
+
llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=M_mlrtu_dZTgEG9JgRaPDVyXqFtHSSVAIhsknhOaVrs,504
|
|
14
14
|
llm_bridge/client/implementations/gemini/non_stream_gemini_client.py,sha256=JGNNpeln42SoXg2vGIC9xG5GGlBh6dIhz4BzYIkgraA,1302
|
|
15
|
-
llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=
|
|
15
|
+
llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=vqPhQdr-jaHXzn-_1PSZfpo96zM-_89XOEXIx7UBBIw,1545
|
|
16
16
|
llm_bridge/client/implementations/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
17
|
llm_bridge/client/implementations/openai/non_stream_openai_client.py,sha256=aceJm6FF6VdzVRECzJyTY8-aQjCekhhbrMPEcUN24fo,2171
|
|
18
|
-
llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=
|
|
18
|
+
llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=Ecd0dLLDXMqNEBut_t74VPDZlLObbEOoyt3vkttJEug,3595
|
|
19
19
|
llm_bridge/client/implementations/openai/openai_token_couter.py,sha256=pWsuaUjoqXjnptVlRma-dItczEo9DMw2o_9uF7FPVAk,1449
|
|
20
|
-
llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=
|
|
20
|
+
llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=52re50oU1ArIwWuocDSUN6TNDtZIP348qt4wjO3qj30,3560
|
|
21
21
|
llm_bridge/client/implementations/openai/stream_openai_client.py,sha256=Izq4xH9EuLjUCBJsuSr6U4Kj6FN5c7w_oHf9wmQatXE,2988
|
|
22
22
|
llm_bridge/client/model_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
23
23
|
llm_bridge/client/model_client/claude_client.py,sha256=cuYORseQY8HVt-COh2J0C_mhqPehDB3A4G4vrunoSFA,1352
|
|
24
24
|
llm_bridge/client/model_client/gemini_client.py,sha256=4dcueIbpLFqkT98WxmeVmW9Vbq7Z5jbYbifAem-NL1E,906
|
|
25
|
-
llm_bridge/client/model_client/openai_client.py,sha256=
|
|
25
|
+
llm_bridge/client/model_client/openai_client.py,sha256=92nSIrlAhT0u6m8MvT31-VSqrtDUekkRwV3JpTd_WKE,1239
|
|
26
26
|
llm_bridge/logic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
27
|
llm_bridge/logic/file_fetch.py,sha256=Q8PGNj76E25sKD70TmlnSIdPgAxpNlb89syk87DbAGg,1341
|
|
28
28
|
llm_bridge/logic/model_prices.py,sha256=hiXVbki3004Rrm5LQrmVfdm0lLABeygxtFB-Qn9_mm0,1219
|
|
@@ -31,9 +31,9 @@ llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=huZO5pqRNFDUK9KpCk3
|
|
|
31
31
|
llm_bridge/logic/chat_generate/chat_message_converter.py,sha256=40VTBOPXg_ocrEZMdt1ObYlm-mhRL35zWzzxv8m2xRc,1538
|
|
32
32
|
llm_bridge/logic/chat_generate/media_processor.py,sha256=ZR8G24EHwZZr2T9iFDRmScDGyJ_kvThApABzSzK0CL0,702
|
|
33
33
|
llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
|
-
llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=
|
|
35
|
-
llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=
|
|
36
|
-
llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=
|
|
34
|
+
llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=j8RwLDul_zdZIIZfzrJji3VmqnYVAV61Xjrbp4NC69k,2603
|
|
35
|
+
llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=ORiyVkLytTN4nyvdzH6P7BCbpj07CHVNFWuNuL0d6UQ,3252
|
|
36
|
+
llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=5DLyWhl5Ab5cRA6awTdAlBTmzmy65iiXKxysaDc2XzU,4301
|
|
37
37
|
llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
38
|
llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=SfDhQXR7L5nCPHS4MIjwgzK_wER7qOUCc8gh-K77kKY,2441
|
|
39
39
|
llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=UjhzRX7sBa3-Zv1flMJd8bc8uRWMMav4UdJFhE6nVq4,1527
|
|
@@ -47,7 +47,7 @@ llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=ERws57Dsu-f5L
|
|
|
47
47
|
llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
48
48
|
llm_bridge/resources/model_prices.json,sha256=_2ZXKjnMDa6YSKfnWEPR_vUtuMw3cEi1d2L3IZ2kVxs,2707
|
|
49
49
|
llm_bridge/type/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
50
|
-
llm_bridge/type/chat_response.py,sha256=
|
|
50
|
+
llm_bridge/type/chat_response.py,sha256=zggw5UGhTjyCCYqQjnp8l9rtjOe1LuJtsmeYLluj8Cc,680
|
|
51
51
|
llm_bridge/type/message.py,sha256=NyWmSSrciFfvF81aBwAH8qFpo5IpRhh8QXMselbYen8,370
|
|
52
52
|
llm_bridge/type/serializer.py,sha256=moCL9y_HTO2CFg2w_jc5MljDxKgHiCo_qiz-o4l2jYU,515
|
|
53
53
|
llm_bridge/type/model_message/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -55,8 +55,8 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
|
|
|
55
55
|
llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
|
|
56
56
|
llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
|
|
57
57
|
llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
|
|
58
|
-
llm_bridge-1.
|
|
59
|
-
llm_bridge-1.
|
|
60
|
-
llm_bridge-1.
|
|
61
|
-
llm_bridge-1.
|
|
62
|
-
llm_bridge-1.
|
|
58
|
+
llm_bridge-1.9.0.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
|
|
59
|
+
llm_bridge-1.9.0.dist-info/METADATA,sha256=1cH7zZINWSt9XmhGs5WeSPa5vsABWYou9CL3Y6yWzaM,7848
|
|
60
|
+
llm_bridge-1.9.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
61
|
+
llm_bridge-1.9.0.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
|
|
62
|
+
llm_bridge-1.9.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|