LLM-Bridge 1.15.0a0__tar.gz → 1.15.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/PKG-INFO +2 -2
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/README.md +1 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/claude/claude_response_handler.py +84 -15
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +1 -44
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +2 -1
- llm_bridge-1.15.2/llm_bridge/logic/chat_generate/media_processor.py +23 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +10 -2
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +2 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +2 -2
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +1 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +1 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +1 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/resources/model_prices.json +35 -17
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/chat_response.py +1 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/pyproject.toml +1 -1
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/usage/main.py +10 -10
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/uv.lock +1 -1
- llm_bridge-1.15.0a0/llm_bridge/logic/chat_generate/media_processor.py +0 -20
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/.gitattributes +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/.github/workflows/python-publish.yml +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/.gitignore +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/LICENSE +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/MANIFEST.in +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/chat_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/claude/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/printing_status.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/model_client/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/model_client/claude_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/model_client/gemini_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/model_client/openai_client.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/file_fetch.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/model_prices.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/resources/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/message.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/model_message/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/model_message/claude_message.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/model_message/gemini_message.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/model_message/openai_message.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/serializer.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/tests/__init__.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/tests/chat_client_factory_test.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/tests/message_preprocessor_test.py +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/usage/.env.example +0 -0
- {llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/usage/workflow.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.15.
|
|
3
|
+
Version: 1.15.2
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -48,7 +48,7 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
48
48
|
|
|
49
49
|
| API Type | Input Format | Capabilities | Output Format |
|
|
50
50
|
|----------|--------------------------------|---------------------------------------------------------|-------------------|
|
|
51
|
-
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text
|
|
51
|
+
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
|
|
52
52
|
| Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
|
|
53
53
|
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
|
|
54
54
|
| Grok | Text, Image | | Text |
|
|
@@ -24,7 +24,7 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
24
24
|
|
|
25
25
|
| API Type | Input Format | Capabilities | Output Format |
|
|
26
26
|
|----------|--------------------------------|---------------------------------------------------------|-------------------|
|
|
27
|
-
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text
|
|
27
|
+
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
|
|
28
28
|
| Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
|
|
29
29
|
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
|
|
30
30
|
| Grok | Text, Image | | Text |
|
|
@@ -1,20 +1,36 @@
|
|
|
1
1
|
from anthropic import BetaMessageStreamEvent, AsyncAnthropic
|
|
2
|
+
from anthropic._response import AsyncBinaryAPIResponse
|
|
2
3
|
from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
|
|
3
4
|
BetaBashCodeExecutionToolResultBlock, \
|
|
4
5
|
BetaTextEditorCodeExecutionToolResultBlock, BetaTextEditorCodeExecutionViewResultBlock, \
|
|
5
6
|
BetaTextEditorCodeExecutionStrReplaceResultBlock, \
|
|
6
|
-
BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock
|
|
7
|
+
BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock, \
|
|
8
|
+
BetaBashCodeExecutionOutputBlock, BetaMessage, FileMetadata
|
|
7
9
|
from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
|
|
8
10
|
|
|
9
11
|
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
10
|
-
from llm_bridge.
|
|
11
|
-
|
|
12
|
+
from llm_bridge.logic.chat_generate.media_processor import bytes_to_base64
|
|
13
|
+
from llm_bridge.type.chat_response import ChatResponse, File
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
async def download_claude_file(client: AsyncAnthropic, file_id: str) -> File:
|
|
17
|
+
file_metadata: FileMetadata = await client.beta.files.retrieve_metadata(file_id)
|
|
18
|
+
file_content: AsyncBinaryAPIResponse = await client.beta.files.download(file_id)
|
|
19
|
+
data = await file_content.read()
|
|
20
|
+
return File(
|
|
21
|
+
name=file_metadata.filename,
|
|
22
|
+
data=bytes_to_base64(data),
|
|
23
|
+
type=file_metadata.mime_type,
|
|
24
|
+
)
|
|
12
25
|
|
|
13
|
-
def process_content_block(
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
26
|
+
async def process_content_block(
|
|
27
|
+
content_block: ContentBlock, client: AsyncAnthropic
|
|
28
|
+
) -> ChatResponse:
|
|
29
|
+
text: str = ""
|
|
30
|
+
thought: str = ""
|
|
31
|
+
code: str = ""
|
|
32
|
+
code_output: str = ""
|
|
33
|
+
files: list[File] = []
|
|
18
34
|
|
|
19
35
|
if content_block.type == "text":
|
|
20
36
|
text_block: BetaTextBlock = content_block
|
|
@@ -31,23 +47,72 @@ def process_content_block(content_block: ContentBlock) -> ChatResponse:
|
|
|
31
47
|
elif content_block.type == "bash_code_execution_tool_result":
|
|
32
48
|
bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
|
|
33
49
|
if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
|
|
34
|
-
|
|
35
|
-
code_output +=
|
|
50
|
+
result: BetaBashCodeExecutionResultBlock = content_block.content
|
|
51
|
+
code_output += result.stdout
|
|
52
|
+
outputs: list[BetaBashCodeExecutionOutputBlock] = result.content
|
|
53
|
+
file_ids = [output.file_id for output in outputs]
|
|
54
|
+
for file_id in file_ids:
|
|
55
|
+
file = await download_claude_file(client, file_id)
|
|
56
|
+
files.append(file)
|
|
36
57
|
|
|
37
58
|
elif content_block.type == "text_editor_code_execution_tool_result":
|
|
38
59
|
text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = content_block
|
|
39
60
|
if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
|
|
40
|
-
|
|
41
|
-
code_output +=
|
|
61
|
+
result: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
|
|
62
|
+
code_output += result.content
|
|
42
63
|
elif text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_str_replace_result":
|
|
43
|
-
|
|
44
|
-
code_output +=
|
|
64
|
+
result: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
|
|
65
|
+
code_output += result.lines
|
|
66
|
+
|
|
67
|
+
return ChatResponse(
|
|
68
|
+
text=text,
|
|
69
|
+
thought=thought,
|
|
70
|
+
code=code,
|
|
71
|
+
code_output=code_output,
|
|
72
|
+
files=files,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
async def process_claude_non_stream_response(
|
|
77
|
+
message: BetaMessage,
|
|
78
|
+
input_tokens: int,
|
|
79
|
+
client: AsyncAnthropic,
|
|
80
|
+
model: str,
|
|
81
|
+
) -> ChatResponse:
|
|
82
|
+
text = ""
|
|
83
|
+
thought = ""
|
|
84
|
+
code = ""
|
|
85
|
+
code_output = ""
|
|
86
|
+
files: list[File] = []
|
|
45
87
|
|
|
88
|
+
for content_block in message.content:
|
|
89
|
+
content_block_chat_response = await process_content_block(content_block, client)
|
|
90
|
+
text += content_block_chat_response.text
|
|
91
|
+
thought += content_block_chat_response.thought
|
|
92
|
+
code += content_block_chat_response.code
|
|
93
|
+
code_output += content_block_chat_response.code_output
|
|
94
|
+
files.extend(content_block_chat_response.files)
|
|
95
|
+
|
|
96
|
+
chat_response = ChatResponse(
|
|
97
|
+
text=text,
|
|
98
|
+
thought=thought,
|
|
99
|
+
code=code,
|
|
100
|
+
code_output=code_output,
|
|
101
|
+
files=files,
|
|
102
|
+
)
|
|
103
|
+
output_tokens = await count_claude_output_tokens(
|
|
104
|
+
client=client,
|
|
105
|
+
model=model,
|
|
106
|
+
chat_response=chat_response,
|
|
107
|
+
)
|
|
46
108
|
return ChatResponse(
|
|
47
109
|
text=text,
|
|
48
110
|
thought=thought,
|
|
49
111
|
code=code,
|
|
50
112
|
code_output=code_output,
|
|
113
|
+
files=files,
|
|
114
|
+
input_tokens=input_tokens,
|
|
115
|
+
output_tokens=output_tokens,
|
|
51
116
|
)
|
|
52
117
|
|
|
53
118
|
|
|
@@ -61,6 +126,7 @@ async def process_claude_stream_response(
|
|
|
61
126
|
thought = ""
|
|
62
127
|
code = ""
|
|
63
128
|
code_output = ""
|
|
129
|
+
files: list[File] = []
|
|
64
130
|
|
|
65
131
|
if event.type == "content_block_delta":
|
|
66
132
|
event_delta: BetaRawContentBlockDelta = event.delta
|
|
@@ -79,17 +145,19 @@ async def process_claude_stream_response(
|
|
|
79
145
|
|
|
80
146
|
if event.type == "content_block_start":
|
|
81
147
|
content_block: ContentBlock = event.content_block
|
|
82
|
-
content_block_chat_response = process_content_block(content_block)
|
|
148
|
+
content_block_chat_response = await process_content_block(content_block, client)
|
|
83
149
|
text += content_block_chat_response.text
|
|
84
150
|
thought += content_block_chat_response.thought
|
|
85
151
|
code += content_block_chat_response.code
|
|
86
152
|
code_output += content_block_chat_response.code_output
|
|
153
|
+
files.extend(content_block_chat_response.files)
|
|
87
154
|
|
|
88
155
|
chat_response = ChatResponse(
|
|
89
156
|
text=text,
|
|
90
157
|
thought=thought,
|
|
91
158
|
code=code,
|
|
92
159
|
code_output=code_output,
|
|
160
|
+
files=files,
|
|
93
161
|
)
|
|
94
162
|
output_tokens = await count_claude_output_tokens(
|
|
95
163
|
client=client,
|
|
@@ -101,6 +169,7 @@ async def process_claude_stream_response(
|
|
|
101
169
|
thought=thought,
|
|
102
170
|
code=code,
|
|
103
171
|
code_output=code_output,
|
|
172
|
+
files=files,
|
|
104
173
|
input_tokens=input_tokens,
|
|
105
174
|
output_tokens=output_tokens,
|
|
106
175
|
)
|
|
@@ -2,57 +2,14 @@ import logging
|
|
|
2
2
|
import re
|
|
3
3
|
|
|
4
4
|
import httpx
|
|
5
|
-
from anthropic import AsyncAnthropic
|
|
6
|
-
from anthropic.types.beta import BetaMessage, BetaBashCodeExecutionToolResultBlock, BetaTextBlock, BetaThinkingBlock, \
|
|
7
|
-
BetaServerToolUseBlock
|
|
8
5
|
from fastapi import HTTPException
|
|
9
6
|
|
|
10
|
-
from llm_bridge.client.implementations.claude.claude_response_handler import
|
|
11
|
-
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
7
|
+
from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_non_stream_response
|
|
12
8
|
from llm_bridge.client.model_client.claude_client import ClaudeClient
|
|
13
9
|
from llm_bridge.type.chat_response import ChatResponse
|
|
14
10
|
from llm_bridge.type.serializer import serialize
|
|
15
11
|
|
|
16
12
|
|
|
17
|
-
async def process_claude_non_stream_response(
|
|
18
|
-
message: BetaMessage,
|
|
19
|
-
input_tokens: int,
|
|
20
|
-
client: AsyncAnthropic,
|
|
21
|
-
model: str,
|
|
22
|
-
) -> ChatResponse:
|
|
23
|
-
text = ""
|
|
24
|
-
thought = ""
|
|
25
|
-
code = ""
|
|
26
|
-
code_output = ""
|
|
27
|
-
|
|
28
|
-
for content_block in message.content:
|
|
29
|
-
content_block_chat_response = process_content_block(content_block)
|
|
30
|
-
text += content_block_chat_response.text
|
|
31
|
-
thought += content_block_chat_response.thought
|
|
32
|
-
code += content_block_chat_response.code
|
|
33
|
-
code_output += content_block_chat_response.code_output
|
|
34
|
-
|
|
35
|
-
chat_response = ChatResponse(
|
|
36
|
-
text=text,
|
|
37
|
-
thought=thought,
|
|
38
|
-
code=code,
|
|
39
|
-
code_output=code_output,
|
|
40
|
-
)
|
|
41
|
-
output_tokens = await count_claude_output_tokens(
|
|
42
|
-
client=client,
|
|
43
|
-
model=model,
|
|
44
|
-
chat_response=chat_response,
|
|
45
|
-
)
|
|
46
|
-
return ChatResponse(
|
|
47
|
-
text=text,
|
|
48
|
-
thought=thought,
|
|
49
|
-
code=code,
|
|
50
|
-
code_output=code_output,
|
|
51
|
-
input_tokens=input_tokens,
|
|
52
|
-
output_tokens=output_tokens,
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
|
|
56
13
|
class NonStreamClaudeClient(ClaudeClient):
|
|
57
14
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
58
15
|
try:
|
|
@@ -7,6 +7,7 @@ from google.genai.types import Part
|
|
|
7
7
|
|
|
8
8
|
from llm_bridge.client.implementations.gemini.gemini_token_counter import count_gemini_tokens
|
|
9
9
|
from llm_bridge.client.implementations.printing_status import PrintingStatus
|
|
10
|
+
from llm_bridge.logic.chat_generate.media_processor import bytes_to_base64
|
|
10
11
|
from llm_bridge.type.chat_response import ChatResponse, File
|
|
11
12
|
|
|
12
13
|
|
|
@@ -57,7 +58,7 @@ class GeminiResponseHandler:
|
|
|
57
58
|
extension = mimetypes.guess_extension(mime_type) or ""
|
|
58
59
|
file = File(
|
|
59
60
|
name=f"generated_file{extension}",
|
|
60
|
-
data=
|
|
61
|
+
data=bytes_to_base64(part.inline_data.data),
|
|
61
62
|
type=mime_type,
|
|
62
63
|
)
|
|
63
64
|
files.append(file)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
|
|
3
|
+
from llm_bridge.logic.file_fetch import fetch_file_data
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def bytes_to_base64(data: bytes) -> str:
|
|
7
|
+
return base64.b64encode(data).decode('utf-8')
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def get_bytes_content_from_url(req_url: str) -> tuple[bytes, str]:
|
|
11
|
+
file_data, media_type = await fetch_file_data(req_url)
|
|
12
|
+
return file_data, media_type
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def get_base64_content_from_url(req_url: str) -> tuple[str, str]:
|
|
16
|
+
media_data, media_type = await get_bytes_content_from_url(req_url)
|
|
17
|
+
base64_media = bytes_to_base64(media_data)
|
|
18
|
+
return base64_media, media_type
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
async def get_openai_image_content_from_url(req_img_url: str) -> str:
|
|
22
|
+
base64_image, media_type = await get_base64_content_from_url(req_img_url)
|
|
23
|
+
return f"data:{media_type};base64,{base64_image}"
|
|
@@ -3,7 +3,8 @@ from typing import Any
|
|
|
3
3
|
import anthropic
|
|
4
4
|
from anthropic import Omit, transform_schema
|
|
5
5
|
from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
|
|
6
|
-
from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param
|
|
6
|
+
from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param, \
|
|
7
|
+
BetaToolBash20250124Param
|
|
7
8
|
|
|
8
9
|
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
|
|
9
10
|
from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
|
|
@@ -61,7 +62,8 @@ async def create_claude_client(
|
|
|
61
62
|
"context-1m-2025-08-07",
|
|
62
63
|
"output-128k-2025-02-19",
|
|
63
64
|
"code-execution-2025-08-25",
|
|
64
|
-
"
|
|
65
|
+
"files-api-2025-04-14",
|
|
66
|
+
"structured-outputs-2025-11-13",
|
|
65
67
|
]
|
|
66
68
|
tools: list[BetaToolUnionParam] = []
|
|
67
69
|
tools.append(
|
|
@@ -71,6 +73,12 @@ async def create_claude_client(
|
|
|
71
73
|
)
|
|
72
74
|
)
|
|
73
75
|
if code_execution:
|
|
76
|
+
tools.append(
|
|
77
|
+
BetaToolBash20250124Param(
|
|
78
|
+
type="bash_20250124",
|
|
79
|
+
name="bash",
|
|
80
|
+
)
|
|
81
|
+
)
|
|
74
82
|
tools.append(
|
|
75
83
|
BetaCodeExecutionTool20250825Param(
|
|
76
84
|
type="code_execution_20250825",
|
|
@@ -65,7 +65,7 @@ async def create_openai_client(
|
|
|
65
65
|
tools = []
|
|
66
66
|
reasoning = None
|
|
67
67
|
|
|
68
|
-
if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
|
|
68
|
+
if model not in ["gpt-5-pro", "gpt-5.2-pro"] and "codex" not in model:
|
|
69
69
|
if code_execution:
|
|
70
70
|
tools.append(
|
|
71
71
|
CodeInterpreter(
|
|
@@ -87,6 +87,7 @@ async def create_openai_client(
|
|
|
87
87
|
effort="high",
|
|
88
88
|
summary="auto",
|
|
89
89
|
)
|
|
90
|
+
if re.match(r"gpt-5.*", model) and "codex" not in model:
|
|
90
91
|
tools.append(
|
|
91
92
|
ImageGeneration(
|
|
92
93
|
type="image_generation",
|
|
@@ -25,7 +25,7 @@ async def convert_message_to_claude(message: Message) -> ClaudeMessage:
|
|
|
25
25
|
file_url = content_item.data
|
|
26
26
|
file_type, sub_type = await get_file_type(file_url)
|
|
27
27
|
if file_type == "image":
|
|
28
|
-
base64_image, media_type = await media_processor.
|
|
28
|
+
base64_image, media_type = await media_processor.get_base64_content_from_url(file_url)
|
|
29
29
|
image_content = ImageBlockParam(
|
|
30
30
|
type="image",
|
|
31
31
|
source=Base64ImageSourceParam(
|
|
@@ -36,7 +36,7 @@ async def convert_message_to_claude(message: Message) -> ClaudeMessage:
|
|
|
36
36
|
)
|
|
37
37
|
claude_content.append(image_content)
|
|
38
38
|
elif sub_type == "pdf":
|
|
39
|
-
file_data, media_type = await media_processor.
|
|
39
|
+
file_data, media_type = await media_processor.get_base64_content_from_url(file_url)
|
|
40
40
|
pdf_content = DocumentBlockParam(
|
|
41
41
|
type="document",
|
|
42
42
|
source=Base64PDFSourceParam(
|
|
@@ -23,7 +23,7 @@ async def convert_message_to_gemini(message: Message) -> GeminiMessage:
|
|
|
23
23
|
file_url = content_item.data
|
|
24
24
|
file_type, sub_type = await get_file_type(file_url)
|
|
25
25
|
if sub_type == "pdf" or file_type in ("image", "video", "audio"):
|
|
26
|
-
file_data, media_type = await media_processor.
|
|
26
|
+
file_data, media_type = await media_processor.get_bytes_content_from_url(file_url)
|
|
27
27
|
if media_type == 'video/webm':
|
|
28
28
|
media_type = 'audio/webm'
|
|
29
29
|
parts.append(types.Part.from_bytes(data=file_data, mime_type=media_type))
|
|
@@ -28,7 +28,7 @@ async def convert_message_to_openai(message: Message) -> OpenAIMessage:
|
|
|
28
28
|
)
|
|
29
29
|
content.append(image_content)
|
|
30
30
|
elif file_type == "audio":
|
|
31
|
-
encoded_string, _ = await media_processor.
|
|
31
|
+
encoded_string, _ = await media_processor.get_base64_content_from_url(file_url)
|
|
32
32
|
audio_content = ChatCompletionContentPartInputAudioParam(
|
|
33
33
|
type="input_audio",
|
|
34
34
|
input_audio=InputAudio(data=encoded_string, format=sub_type)
|
|
@@ -30,7 +30,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
|
|
|
30
30
|
)
|
|
31
31
|
content.append(image_content)
|
|
32
32
|
elif sub_type == "pdf":
|
|
33
|
-
file_data, _ = await media_processor.
|
|
33
|
+
file_data, _ = await media_processor.get_base64_content_from_url(file_url)
|
|
34
34
|
pdf_content = ResponseInputFileParam(
|
|
35
35
|
type="input_file",
|
|
36
36
|
filename=get_file_name(file_url),
|
|
@@ -1,51 +1,51 @@
|
|
|
1
1
|
[
|
|
2
2
|
{
|
|
3
|
-
"apiType": "Gemini-
|
|
3
|
+
"apiType": "Gemini-Paid",
|
|
4
4
|
"model": "gemini-3-pro-preview",
|
|
5
5
|
"input": 4,
|
|
6
6
|
"output": 18
|
|
7
7
|
},
|
|
8
8
|
{
|
|
9
|
-
"apiType": "Gemini-
|
|
9
|
+
"apiType": "Gemini-Paid",
|
|
10
10
|
"model": "gemini-3-flash-preview",
|
|
11
11
|
"input": 1,
|
|
12
12
|
"output": 3
|
|
13
13
|
},
|
|
14
14
|
{
|
|
15
|
-
"apiType": "Gemini-
|
|
15
|
+
"apiType": "Gemini-Paid",
|
|
16
16
|
"model": "gemini-3-pro-image-preview",
|
|
17
17
|
"input": 2,
|
|
18
18
|
"output": 120
|
|
19
19
|
},
|
|
20
|
-
{
|
|
21
|
-
"apiType": "Gemini-Free",
|
|
22
|
-
"model": "gemini-3-flash-preview",
|
|
23
|
-
"input": 0,
|
|
24
|
-
"output": 0
|
|
25
|
-
},
|
|
26
20
|
{
|
|
27
21
|
"apiType": "Gemini-Paid",
|
|
28
|
-
"model": "gemini-
|
|
22
|
+
"model": "gemini-flash-latest",
|
|
29
23
|
"input": 1,
|
|
30
|
-
"output":
|
|
24
|
+
"output": 2.5
|
|
31
25
|
},
|
|
32
26
|
{
|
|
33
|
-
"apiType": "Gemini-
|
|
27
|
+
"apiType": "Gemini-Vertex",
|
|
34
28
|
"model": "gemini-3-pro-preview",
|
|
35
29
|
"input": 4,
|
|
36
30
|
"output": 18
|
|
37
31
|
},
|
|
38
32
|
{
|
|
39
|
-
"apiType": "Gemini-
|
|
33
|
+
"apiType": "Gemini-Vertex",
|
|
34
|
+
"model": "gemini-3-flash-preview",
|
|
35
|
+
"input": 1,
|
|
36
|
+
"output": 3
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"apiType": "Gemini-Vertex",
|
|
40
40
|
"model": "gemini-3-pro-image-preview",
|
|
41
41
|
"input": 2,
|
|
42
42
|
"output": 120
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
"apiType": "Gemini-
|
|
46
|
-
"model": "gemini-flash-
|
|
47
|
-
"input":
|
|
48
|
-
"output":
|
|
45
|
+
"apiType": "Gemini-Free",
|
|
46
|
+
"model": "gemini-3-flash-preview",
|
|
47
|
+
"input": 0,
|
|
48
|
+
"output": 0
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
51
|
"apiType": "OpenAI",
|
|
@@ -59,12 +59,30 @@
|
|
|
59
59
|
"input": 1.25,
|
|
60
60
|
"output": 10
|
|
61
61
|
},
|
|
62
|
+
{
|
|
63
|
+
"apiType": "OpenAI",
|
|
64
|
+
"model": "gpt-5.1-codex-max",
|
|
65
|
+
"input": 1.25,
|
|
66
|
+
"output": 10
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"apiType": "OpenAI",
|
|
70
|
+
"model": "gpt-5.1-codex",
|
|
71
|
+
"input": 1.25,
|
|
72
|
+
"output": 10
|
|
73
|
+
},
|
|
62
74
|
{
|
|
63
75
|
"apiType": "OpenAI",
|
|
64
76
|
"model": "gpt-5",
|
|
65
77
|
"input": 1.25,
|
|
66
78
|
"output": 10
|
|
67
79
|
},
|
|
80
|
+
{
|
|
81
|
+
"apiType": "OpenAI",
|
|
82
|
+
"model": "gpt-5-codex",
|
|
83
|
+
"input": 1.25,
|
|
84
|
+
"output": 10
|
|
85
|
+
},
|
|
68
86
|
{
|
|
69
87
|
"apiType": "OpenAI",
|
|
70
88
|
"model": "gpt-5-mini",
|
|
@@ -98,11 +98,11 @@ messages = [
|
|
|
98
98
|
# Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
|
|
99
99
|
|
|
100
100
|
# Web Search
|
|
101
|
-
Content(type=ContentType.Text, data="What's the weather in NYC today?"),
|
|
101
|
+
# Content(type=ContentType.Text, data="What's the weather in NYC today?"),
|
|
102
102
|
|
|
103
103
|
# Image Understanding
|
|
104
|
-
|
|
105
|
-
|
|
104
|
+
Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
|
|
105
|
+
Content(type=ContentType.Text, data="What is in this image?"),
|
|
106
106
|
|
|
107
107
|
# Image Generation
|
|
108
108
|
# Content(type=ContentType.Text, data="Please generate an image of a cat."),
|
|
@@ -114,8 +114,7 @@ messages = [
|
|
|
114
114
|
# Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
|
|
115
115
|
|
|
116
116
|
# File Output
|
|
117
|
-
# Content(type=ContentType.
|
|
118
|
-
# Content(type=ContentType.Text, data="Please implement a minimum example of Neural Network in `script.py`"),
|
|
117
|
+
# Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
|
|
119
118
|
|
|
120
119
|
# Structured Output
|
|
121
120
|
# Content(type=ContentType.Text, data="Please generate a product."),
|
|
@@ -139,23 +138,24 @@ messages = [
|
|
|
139
138
|
# model = "gpt-5-pro"
|
|
140
139
|
# model = "gpt-5"
|
|
141
140
|
# model = "gpt-4.1"
|
|
142
|
-
model = "
|
|
141
|
+
model = "gpt-5-codex"
|
|
142
|
+
# model = "gemini-3-pro-preview"
|
|
143
143
|
# model = "gemini-3-pro-image-preview"
|
|
144
144
|
# model = "gemini-3-flash-preview"
|
|
145
145
|
# model = "grok-4-1-fast-reasoning"
|
|
146
146
|
# model = "claude-sonnet-4-5"
|
|
147
147
|
# model = "claude-opus-4-5"
|
|
148
|
-
api_type = "Gemini-Vertex"
|
|
148
|
+
# api_type = "Gemini-Vertex"
|
|
149
149
|
# api_type = "Gemini-Free"
|
|
150
150
|
# api_type = "Gemini-Paid"
|
|
151
|
-
|
|
151
|
+
api_type = "OpenAI"
|
|
152
152
|
# api_type = "OpenAI-Azure"
|
|
153
153
|
# api_type = "OpenAI-GitHub"
|
|
154
154
|
# api_type = "Claude"
|
|
155
155
|
# api_type = "Grok"
|
|
156
156
|
temperature = 0
|
|
157
|
-
|
|
158
|
-
stream = False
|
|
157
|
+
stream = True
|
|
158
|
+
# stream = False
|
|
159
159
|
thought = True
|
|
160
160
|
# thought = False
|
|
161
161
|
code_execution = True
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import base64
|
|
2
|
-
|
|
3
|
-
from llm_bridge.logic.file_fetch import fetch_file_data
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
async def get_raw_content_from_url(req_url: str) -> tuple[bytes, str]:
|
|
7
|
-
file_data, media_type = await fetch_file_data(req_url)
|
|
8
|
-
return file_data, media_type
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
# Base64 Encoded
|
|
12
|
-
async def get_encoded_content_from_url(req_url: str) -> tuple[str, str]:
|
|
13
|
-
media_data, media_type = await get_raw_content_from_url(req_url)
|
|
14
|
-
base64_media = base64.b64encode(media_data).decode('utf-8')
|
|
15
|
-
return base64_media, media_type
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
async def get_openai_image_content_from_url(req_img_url: str) -> str:
|
|
19
|
-
base64_image, media_type = await get_encoded_content_from_url(req_img_url)
|
|
20
|
-
return f"data:{media_type};base64,{base64_image}"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/claude/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/openai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/client/implementations/printing_status.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/chat_client_factory.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/chat_generate/chat_message_converter.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/document_processor.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/logic/message_preprocess/file_type_checker.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.0a0 → llm_bridge-1.15.2}/llm_bridge/type/model_message/openai_responses_message.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|