LLM-Bridge 1.15.0a0__py3-none-any.whl → 1.15.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_bridge/client/implementations/claude/claude_response_handler.py +84 -15
- llm_bridge/client/implementations/claude/non_stream_claude_client.py +1 -44
- llm_bridge/client/implementations/gemini/gemini_response_handler.py +2 -1
- llm_bridge/logic/chat_generate/media_processor.py +9 -6
- llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +10 -2
- llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +2 -2
- llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +1 -1
- llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +1 -1
- llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +1 -1
- llm_bridge/resources/model_prices.json +17 -17
- llm_bridge/type/chat_response.py +1 -1
- {llm_bridge-1.15.0a0.dist-info → llm_bridge-1.15.1.dist-info}/METADATA +2 -2
- {llm_bridge-1.15.0a0.dist-info → llm_bridge-1.15.1.dist-info}/RECORD +15 -15
- {llm_bridge-1.15.0a0.dist-info → llm_bridge-1.15.1.dist-info}/WHEEL +0 -0
- {llm_bridge-1.15.0a0.dist-info → llm_bridge-1.15.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,20 +1,36 @@
|
|
|
1
1
|
from anthropic import BetaMessageStreamEvent, AsyncAnthropic
|
|
2
|
+
from anthropic._response import AsyncBinaryAPIResponse
|
|
2
3
|
from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
|
|
3
4
|
BetaBashCodeExecutionToolResultBlock, \
|
|
4
5
|
BetaTextEditorCodeExecutionToolResultBlock, BetaTextEditorCodeExecutionViewResultBlock, \
|
|
5
6
|
BetaTextEditorCodeExecutionStrReplaceResultBlock, \
|
|
6
|
-
BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock
|
|
7
|
+
BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock, \
|
|
8
|
+
BetaBashCodeExecutionOutputBlock, BetaMessage, FileMetadata
|
|
7
9
|
from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
|
|
8
10
|
|
|
9
11
|
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
10
|
-
from llm_bridge.
|
|
11
|
-
|
|
12
|
+
from llm_bridge.logic.chat_generate.media_processor import bytes_to_base64
|
|
13
|
+
from llm_bridge.type.chat_response import ChatResponse, File
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
async def download_claude_file(client: AsyncAnthropic, file_id: str) -> File:
|
|
17
|
+
file_metadata: FileMetadata = await client.beta.files.retrieve_metadata(file_id)
|
|
18
|
+
file_content: AsyncBinaryAPIResponse = await client.beta.files.download(file_id)
|
|
19
|
+
data = await file_content.read()
|
|
20
|
+
return File(
|
|
21
|
+
name=file_metadata.filename,
|
|
22
|
+
data=bytes_to_base64(data),
|
|
23
|
+
type=file_metadata.mime_type,
|
|
24
|
+
)
|
|
12
25
|
|
|
13
|
-
def process_content_block(
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
26
|
+
async def process_content_block(
|
|
27
|
+
content_block: ContentBlock, client: AsyncAnthropic
|
|
28
|
+
) -> ChatResponse:
|
|
29
|
+
text: str = ""
|
|
30
|
+
thought: str = ""
|
|
31
|
+
code: str = ""
|
|
32
|
+
code_output: str = ""
|
|
33
|
+
files: list[File] = []
|
|
18
34
|
|
|
19
35
|
if content_block.type == "text":
|
|
20
36
|
text_block: BetaTextBlock = content_block
|
|
@@ -31,23 +47,72 @@ def process_content_block(content_block: ContentBlock) -> ChatResponse:
|
|
|
31
47
|
elif content_block.type == "bash_code_execution_tool_result":
|
|
32
48
|
bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
|
|
33
49
|
if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
|
|
34
|
-
|
|
35
|
-
code_output +=
|
|
50
|
+
result: BetaBashCodeExecutionResultBlock = content_block.content
|
|
51
|
+
code_output += result.stdout
|
|
52
|
+
outputs: list[BetaBashCodeExecutionOutputBlock] = result.content
|
|
53
|
+
file_ids = [output.file_id for output in outputs]
|
|
54
|
+
for file_id in file_ids:
|
|
55
|
+
file = await download_claude_file(client, file_id)
|
|
56
|
+
files.append(file)
|
|
36
57
|
|
|
37
58
|
elif content_block.type == "text_editor_code_execution_tool_result":
|
|
38
59
|
text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = content_block
|
|
39
60
|
if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
|
|
40
|
-
|
|
41
|
-
code_output +=
|
|
61
|
+
result: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
|
|
62
|
+
code_output += result.content
|
|
42
63
|
elif text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_str_replace_result":
|
|
43
|
-
|
|
44
|
-
code_output +=
|
|
64
|
+
result: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
|
|
65
|
+
code_output += result.lines
|
|
66
|
+
|
|
67
|
+
return ChatResponse(
|
|
68
|
+
text=text,
|
|
69
|
+
thought=thought,
|
|
70
|
+
code=code,
|
|
71
|
+
code_output=code_output,
|
|
72
|
+
files=files,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
async def process_claude_non_stream_response(
|
|
77
|
+
message: BetaMessage,
|
|
78
|
+
input_tokens: int,
|
|
79
|
+
client: AsyncAnthropic,
|
|
80
|
+
model: str,
|
|
81
|
+
) -> ChatResponse:
|
|
82
|
+
text = ""
|
|
83
|
+
thought = ""
|
|
84
|
+
code = ""
|
|
85
|
+
code_output = ""
|
|
86
|
+
files: list[File] = []
|
|
45
87
|
|
|
88
|
+
for content_block in message.content:
|
|
89
|
+
content_block_chat_response = await process_content_block(content_block, client)
|
|
90
|
+
text += content_block_chat_response.text
|
|
91
|
+
thought += content_block_chat_response.thought
|
|
92
|
+
code += content_block_chat_response.code
|
|
93
|
+
code_output += content_block_chat_response.code_output
|
|
94
|
+
files.extend(content_block_chat_response.files)
|
|
95
|
+
|
|
96
|
+
chat_response = ChatResponse(
|
|
97
|
+
text=text,
|
|
98
|
+
thought=thought,
|
|
99
|
+
code=code,
|
|
100
|
+
code_output=code_output,
|
|
101
|
+
files=files,
|
|
102
|
+
)
|
|
103
|
+
output_tokens = await count_claude_output_tokens(
|
|
104
|
+
client=client,
|
|
105
|
+
model=model,
|
|
106
|
+
chat_response=chat_response,
|
|
107
|
+
)
|
|
46
108
|
return ChatResponse(
|
|
47
109
|
text=text,
|
|
48
110
|
thought=thought,
|
|
49
111
|
code=code,
|
|
50
112
|
code_output=code_output,
|
|
113
|
+
files=files,
|
|
114
|
+
input_tokens=input_tokens,
|
|
115
|
+
output_tokens=output_tokens,
|
|
51
116
|
)
|
|
52
117
|
|
|
53
118
|
|
|
@@ -61,6 +126,7 @@ async def process_claude_stream_response(
|
|
|
61
126
|
thought = ""
|
|
62
127
|
code = ""
|
|
63
128
|
code_output = ""
|
|
129
|
+
files: list[File] = []
|
|
64
130
|
|
|
65
131
|
if event.type == "content_block_delta":
|
|
66
132
|
event_delta: BetaRawContentBlockDelta = event.delta
|
|
@@ -79,17 +145,19 @@ async def process_claude_stream_response(
|
|
|
79
145
|
|
|
80
146
|
if event.type == "content_block_start":
|
|
81
147
|
content_block: ContentBlock = event.content_block
|
|
82
|
-
content_block_chat_response = process_content_block(content_block)
|
|
148
|
+
content_block_chat_response = await process_content_block(content_block, client)
|
|
83
149
|
text += content_block_chat_response.text
|
|
84
150
|
thought += content_block_chat_response.thought
|
|
85
151
|
code += content_block_chat_response.code
|
|
86
152
|
code_output += content_block_chat_response.code_output
|
|
153
|
+
files.extend(content_block_chat_response.files)
|
|
87
154
|
|
|
88
155
|
chat_response = ChatResponse(
|
|
89
156
|
text=text,
|
|
90
157
|
thought=thought,
|
|
91
158
|
code=code,
|
|
92
159
|
code_output=code_output,
|
|
160
|
+
files=files,
|
|
93
161
|
)
|
|
94
162
|
output_tokens = await count_claude_output_tokens(
|
|
95
163
|
client=client,
|
|
@@ -101,6 +169,7 @@ async def process_claude_stream_response(
|
|
|
101
169
|
thought=thought,
|
|
102
170
|
code=code,
|
|
103
171
|
code_output=code_output,
|
|
172
|
+
files=files,
|
|
104
173
|
input_tokens=input_tokens,
|
|
105
174
|
output_tokens=output_tokens,
|
|
106
175
|
)
|
|
@@ -2,57 +2,14 @@ import logging
|
|
|
2
2
|
import re
|
|
3
3
|
|
|
4
4
|
import httpx
|
|
5
|
-
from anthropic import AsyncAnthropic
|
|
6
|
-
from anthropic.types.beta import BetaMessage, BetaBashCodeExecutionToolResultBlock, BetaTextBlock, BetaThinkingBlock, \
|
|
7
|
-
BetaServerToolUseBlock
|
|
8
5
|
from fastapi import HTTPException
|
|
9
6
|
|
|
10
|
-
from llm_bridge.client.implementations.claude.claude_response_handler import
|
|
11
|
-
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
7
|
+
from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_non_stream_response
|
|
12
8
|
from llm_bridge.client.model_client.claude_client import ClaudeClient
|
|
13
9
|
from llm_bridge.type.chat_response import ChatResponse
|
|
14
10
|
from llm_bridge.type.serializer import serialize
|
|
15
11
|
|
|
16
12
|
|
|
17
|
-
async def process_claude_non_stream_response(
|
|
18
|
-
message: BetaMessage,
|
|
19
|
-
input_tokens: int,
|
|
20
|
-
client: AsyncAnthropic,
|
|
21
|
-
model: str,
|
|
22
|
-
) -> ChatResponse:
|
|
23
|
-
text = ""
|
|
24
|
-
thought = ""
|
|
25
|
-
code = ""
|
|
26
|
-
code_output = ""
|
|
27
|
-
|
|
28
|
-
for content_block in message.content:
|
|
29
|
-
content_block_chat_response = process_content_block(content_block)
|
|
30
|
-
text += content_block_chat_response.text
|
|
31
|
-
thought += content_block_chat_response.thought
|
|
32
|
-
code += content_block_chat_response.code
|
|
33
|
-
code_output += content_block_chat_response.code_output
|
|
34
|
-
|
|
35
|
-
chat_response = ChatResponse(
|
|
36
|
-
text=text,
|
|
37
|
-
thought=thought,
|
|
38
|
-
code=code,
|
|
39
|
-
code_output=code_output,
|
|
40
|
-
)
|
|
41
|
-
output_tokens = await count_claude_output_tokens(
|
|
42
|
-
client=client,
|
|
43
|
-
model=model,
|
|
44
|
-
chat_response=chat_response,
|
|
45
|
-
)
|
|
46
|
-
return ChatResponse(
|
|
47
|
-
text=text,
|
|
48
|
-
thought=thought,
|
|
49
|
-
code=code,
|
|
50
|
-
code_output=code_output,
|
|
51
|
-
input_tokens=input_tokens,
|
|
52
|
-
output_tokens=output_tokens,
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
|
|
56
13
|
class NonStreamClaudeClient(ClaudeClient):
|
|
57
14
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
58
15
|
try:
|
|
@@ -7,6 +7,7 @@ from google.genai.types import Part
|
|
|
7
7
|
|
|
8
8
|
from llm_bridge.client.implementations.gemini.gemini_token_counter import count_gemini_tokens
|
|
9
9
|
from llm_bridge.client.implementations.printing_status import PrintingStatus
|
|
10
|
+
from llm_bridge.logic.chat_generate.media_processor import bytes_to_base64
|
|
10
11
|
from llm_bridge.type.chat_response import ChatResponse, File
|
|
11
12
|
|
|
12
13
|
|
|
@@ -57,7 +58,7 @@ class GeminiResponseHandler:
|
|
|
57
58
|
extension = mimetypes.guess_extension(mime_type) or ""
|
|
58
59
|
file = File(
|
|
59
60
|
name=f"generated_file{extension}",
|
|
60
|
-
data=
|
|
61
|
+
data=bytes_to_base64(part.inline_data.data),
|
|
61
62
|
type=mime_type,
|
|
62
63
|
)
|
|
63
64
|
files.append(file)
|
|
@@ -3,18 +3,21 @@ import base64
|
|
|
3
3
|
from llm_bridge.logic.file_fetch import fetch_file_data
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
def bytes_to_base64(data: bytes) -> str:
|
|
7
|
+
return base64.b64encode(data).decode('utf-8')
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def get_bytes_content_from_url(req_url: str) -> tuple[bytes, str]:
|
|
7
11
|
file_data, media_type = await fetch_file_data(req_url)
|
|
8
12
|
return file_data, media_type
|
|
9
13
|
|
|
10
14
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
base64_media = base64.b64encode(media_data).decode('utf-8')
|
|
15
|
+
async def get_base64_content_from_url(req_url: str) -> tuple[str, str]:
|
|
16
|
+
media_data, media_type = await get_bytes_content_from_url(req_url)
|
|
17
|
+
base64_media = bytes_to_base64(media_data)
|
|
15
18
|
return base64_media, media_type
|
|
16
19
|
|
|
17
20
|
|
|
18
21
|
async def get_openai_image_content_from_url(req_img_url: str) -> str:
|
|
19
|
-
base64_image, media_type = await
|
|
22
|
+
base64_image, media_type = await get_base64_content_from_url(req_img_url)
|
|
20
23
|
return f"data:{media_type};base64,{base64_image}"
|
|
@@ -3,7 +3,8 @@ from typing import Any
|
|
|
3
3
|
import anthropic
|
|
4
4
|
from anthropic import Omit, transform_schema
|
|
5
5
|
from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
|
|
6
|
-
from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param
|
|
6
|
+
from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param, \
|
|
7
|
+
BetaToolBash20250124Param
|
|
7
8
|
|
|
8
9
|
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
|
|
9
10
|
from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
|
|
@@ -61,7 +62,8 @@ async def create_claude_client(
|
|
|
61
62
|
"context-1m-2025-08-07",
|
|
62
63
|
"output-128k-2025-02-19",
|
|
63
64
|
"code-execution-2025-08-25",
|
|
64
|
-
"
|
|
65
|
+
"files-api-2025-04-14",
|
|
66
|
+
"structured-outputs-2025-11-13",
|
|
65
67
|
]
|
|
66
68
|
tools: list[BetaToolUnionParam] = []
|
|
67
69
|
tools.append(
|
|
@@ -71,6 +73,12 @@ async def create_claude_client(
|
|
|
71
73
|
)
|
|
72
74
|
)
|
|
73
75
|
if code_execution:
|
|
76
|
+
tools.append(
|
|
77
|
+
BetaToolBash20250124Param(
|
|
78
|
+
type="bash_20250124",
|
|
79
|
+
name="bash",
|
|
80
|
+
)
|
|
81
|
+
)
|
|
74
82
|
tools.append(
|
|
75
83
|
BetaCodeExecutionTool20250825Param(
|
|
76
84
|
type="code_execution_20250825",
|
|
@@ -25,7 +25,7 @@ async def convert_message_to_claude(message: Message) -> ClaudeMessage:
|
|
|
25
25
|
file_url = content_item.data
|
|
26
26
|
file_type, sub_type = await get_file_type(file_url)
|
|
27
27
|
if file_type == "image":
|
|
28
|
-
base64_image, media_type = await media_processor.
|
|
28
|
+
base64_image, media_type = await media_processor.get_base64_content_from_url(file_url)
|
|
29
29
|
image_content = ImageBlockParam(
|
|
30
30
|
type="image",
|
|
31
31
|
source=Base64ImageSourceParam(
|
|
@@ -36,7 +36,7 @@ async def convert_message_to_claude(message: Message) -> ClaudeMessage:
|
|
|
36
36
|
)
|
|
37
37
|
claude_content.append(image_content)
|
|
38
38
|
elif sub_type == "pdf":
|
|
39
|
-
file_data, media_type = await media_processor.
|
|
39
|
+
file_data, media_type = await media_processor.get_base64_content_from_url(file_url)
|
|
40
40
|
pdf_content = DocumentBlockParam(
|
|
41
41
|
type="document",
|
|
42
42
|
source=Base64PDFSourceParam(
|
|
@@ -23,7 +23,7 @@ async def convert_message_to_gemini(message: Message) -> GeminiMessage:
|
|
|
23
23
|
file_url = content_item.data
|
|
24
24
|
file_type, sub_type = await get_file_type(file_url)
|
|
25
25
|
if sub_type == "pdf" or file_type in ("image", "video", "audio"):
|
|
26
|
-
file_data, media_type = await media_processor.
|
|
26
|
+
file_data, media_type = await media_processor.get_bytes_content_from_url(file_url)
|
|
27
27
|
if media_type == 'video/webm':
|
|
28
28
|
media_type = 'audio/webm'
|
|
29
29
|
parts.append(types.Part.from_bytes(data=file_data, mime_type=media_type))
|
|
@@ -28,7 +28,7 @@ async def convert_message_to_openai(message: Message) -> OpenAIMessage:
|
|
|
28
28
|
)
|
|
29
29
|
content.append(image_content)
|
|
30
30
|
elif file_type == "audio":
|
|
31
|
-
encoded_string, _ = await media_processor.
|
|
31
|
+
encoded_string, _ = await media_processor.get_base64_content_from_url(file_url)
|
|
32
32
|
audio_content = ChatCompletionContentPartInputAudioParam(
|
|
33
33
|
type="input_audio",
|
|
34
34
|
input_audio=InputAudio(data=encoded_string, format=sub_type)
|
llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py
CHANGED
|
@@ -30,7 +30,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
|
|
|
30
30
|
)
|
|
31
31
|
content.append(image_content)
|
|
32
32
|
elif sub_type == "pdf":
|
|
33
|
-
file_data, _ = await media_processor.
|
|
33
|
+
file_data, _ = await media_processor.get_base64_content_from_url(file_url)
|
|
34
34
|
pdf_content = ResponseInputFileParam(
|
|
35
35
|
type="input_file",
|
|
36
36
|
filename=get_file_name(file_url),
|
|
@@ -1,51 +1,51 @@
|
|
|
1
1
|
[
|
|
2
2
|
{
|
|
3
|
-
"apiType": "Gemini-
|
|
3
|
+
"apiType": "Gemini-Paid",
|
|
4
4
|
"model": "gemini-3-pro-preview",
|
|
5
5
|
"input": 4,
|
|
6
6
|
"output": 18
|
|
7
7
|
},
|
|
8
8
|
{
|
|
9
|
-
"apiType": "Gemini-
|
|
9
|
+
"apiType": "Gemini-Paid",
|
|
10
10
|
"model": "gemini-3-flash-preview",
|
|
11
11
|
"input": 1,
|
|
12
12
|
"output": 3
|
|
13
13
|
},
|
|
14
14
|
{
|
|
15
|
-
"apiType": "Gemini-
|
|
15
|
+
"apiType": "Gemini-Paid",
|
|
16
16
|
"model": "gemini-3-pro-image-preview",
|
|
17
17
|
"input": 2,
|
|
18
18
|
"output": 120
|
|
19
19
|
},
|
|
20
|
-
{
|
|
21
|
-
"apiType": "Gemini-Free",
|
|
22
|
-
"model": "gemini-3-flash-preview",
|
|
23
|
-
"input": 0,
|
|
24
|
-
"output": 0
|
|
25
|
-
},
|
|
26
20
|
{
|
|
27
21
|
"apiType": "Gemini-Paid",
|
|
28
|
-
"model": "gemini-
|
|
22
|
+
"model": "gemini-flash-latest",
|
|
29
23
|
"input": 1,
|
|
30
|
-
"output":
|
|
24
|
+
"output": 2.5
|
|
31
25
|
},
|
|
32
26
|
{
|
|
33
|
-
"apiType": "Gemini-
|
|
27
|
+
"apiType": "Gemini-Vertex",
|
|
34
28
|
"model": "gemini-3-pro-preview",
|
|
35
29
|
"input": 4,
|
|
36
30
|
"output": 18
|
|
37
31
|
},
|
|
38
32
|
{
|
|
39
|
-
"apiType": "Gemini-
|
|
33
|
+
"apiType": "Gemini-Vertex",
|
|
34
|
+
"model": "gemini-3-flash-preview",
|
|
35
|
+
"input": 1,
|
|
36
|
+
"output": 3
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"apiType": "Gemini-Vertex",
|
|
40
40
|
"model": "gemini-3-pro-image-preview",
|
|
41
41
|
"input": 2,
|
|
42
42
|
"output": 120
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
"apiType": "Gemini-
|
|
46
|
-
"model": "gemini-flash-
|
|
47
|
-
"input":
|
|
48
|
-
"output":
|
|
45
|
+
"apiType": "Gemini-Free",
|
|
46
|
+
"model": "gemini-3-flash-preview",
|
|
47
|
+
"input": 0,
|
|
48
|
+
"output": 0
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
51
|
"apiType": "OpenAI",
|
llm_bridge/type/chat_response.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.15.
|
|
3
|
+
Version: 1.15.1
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -48,7 +48,7 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
48
48
|
|
|
49
49
|
| API Type | Input Format | Capabilities | Output Format |
|
|
50
50
|
|----------|--------------------------------|---------------------------------------------------------|-------------------|
|
|
51
|
-
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text
|
|
51
|
+
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
|
|
52
52
|
| Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
|
|
53
53
|
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
|
|
54
54
|
| Grok | Text, Image | | Text |
|
|
@@ -4,12 +4,12 @@ llm_bridge/client/chat_client.py,sha256=XISF2BM-WkZJNbnvcLfMcbSzlrE0XMDulyE_VG9z
|
|
|
4
4
|
llm_bridge/client/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62HeXTThDx1L6Zbs_IT0HPLPspI,102
|
|
6
6
|
llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=
|
|
7
|
+
llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=qSbSraOSLFYunverWpBliWh3pDiw7ZSYntg0d11K78o,6477
|
|
8
8
|
llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=m_aoLJkFPJqSBA3Thzv5vg3GnaucZh41SAgT28sLeBA,1324
|
|
9
|
-
llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=
|
|
9
|
+
llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=XKUNIGGetkE2qvwXc3xS3mreD1SuYynbhKLz8NAThpM,1715
|
|
10
10
|
llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=8jzD9fVptQnSkRVz0oQ3QnQ22NyMm6hjsmEtoDvl8g8,2059
|
|
11
11
|
llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
-
llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256
|
|
12
|
+
llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=C5XZaFH1AlxSt4C-xrtRx04-cqs0lOaous6gHyu8-JI,3636
|
|
13
13
|
llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=GdnwJWPhGZMB_xC0fz88zQRparIHzTemkQoqfDcxVEA,687
|
|
14
14
|
llm_bridge/client/implementations/gemini/non_stream_gemini_client.py,sha256=JGNNpeln42SoXg2vGIC9xG5GGlBh6dIhz4BzYIkgraA,1302
|
|
15
15
|
llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=vqPhQdr-jaHXzn-_1PSZfpo96zM-_89XOEXIx7UBBIw,1545
|
|
@@ -29,26 +29,26 @@ llm_bridge/logic/model_prices.py,sha256=hiXVbki3004Rrm5LQrmVfdm0lLABeygxtFB-Qn9_
|
|
|
29
29
|
llm_bridge/logic/chat_generate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
30
|
llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=g34iodMfCqopsPu2aDYp9hsEBY2ap7I_io620y1wy-Q,4385
|
|
31
31
|
llm_bridge/logic/chat_generate/chat_message_converter.py,sha256=40VTBOPXg_ocrEZMdt1ObYlm-mhRL35zWzzxv8m2xRc,1538
|
|
32
|
-
llm_bridge/logic/chat_generate/media_processor.py,sha256=
|
|
32
|
+
llm_bridge/logic/chat_generate/media_processor.py,sha256=icl2kX-2SBFt09fjQ5tpmznmuu3WgnZAOiISp0NU8lw,763
|
|
33
33
|
llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
|
-
llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=
|
|
34
|
+
llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=mlGcCPnSqu8KBzD-Yl4Qtsw5L_2TE2ezuL9NcD-iYIY,3815
|
|
35
35
|
llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=6CuacST1bLUGhY-rGH0bm5tu3r9iQQhIweN32TgqLCc,3692
|
|
36
36
|
llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=zQyDC3beuUeC8czU22U_Xg8VGxvuQxxuaxWgghCknWg,4889
|
|
37
37
|
llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py,sha256=kQ3RGyg_9vbe7oYzbl11Dzu-tHPY1z2-SBBSgHHwPfM,143
|
|
38
38
|
llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
39
|
-
llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=
|
|
40
|
-
llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=
|
|
41
|
-
llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py,sha256=
|
|
42
|
-
llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py,sha256=
|
|
39
|
+
llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=YiPqMkybCXrsAJOFcUfPOHXdMkn3mZxq7gft_W449dA,2439
|
|
40
|
+
llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=m6IeeQ_-yKcyBwLcEO_1HOoQAXDR5nl0mz_DNSsjieo,1529
|
|
41
|
+
llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py,sha256=lmc-lUVZ_LgHcJZVB-l989TgrB4FtbCyGlRDp4eXycE,2179
|
|
42
|
+
llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py,sha256=R1cl0rhkbG_PditrVGqrP_CiDi_KsmibqciVgPbuFxc,2977
|
|
43
43
|
llm_bridge/logic/message_preprocess/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
44
|
llm_bridge/logic/message_preprocess/code_file_extensions.py,sha256=5bsnSKC9PGbl6ZMy80sXfagAbz77pGjt6Z2-qwzUw48,9306
|
|
45
45
|
llm_bridge/logic/message_preprocess/document_processor.py,sha256=IsVqoFgWNa9i8cRsDAfmCynJMdlvBqiCKIT9kbx96kg,2861
|
|
46
46
|
llm_bridge/logic/message_preprocess/file_type_checker.py,sha256=nkrVki1a2udCeVqUnfIVi7Wxx8OMKbBuHw3FOlm17uo,1603
|
|
47
47
|
llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=VR4__ip4ytAo62DHn9HeeYdbcx5lWItBnKsm9l3gmY4,1924
|
|
48
48
|
llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
|
-
llm_bridge/resources/model_prices.json,sha256=
|
|
49
|
+
llm_bridge/resources/model_prices.json,sha256=tv9tUsMxukdw91-itj4Ui_bPm5QQeOzSz0V9GtyRQT0,2372
|
|
50
50
|
llm_bridge/type/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
51
|
-
llm_bridge/type/chat_response.py,sha256=
|
|
51
|
+
llm_bridge/type/chat_response.py,sha256=6sz6g4Ns720Q-k6-7YYqrT2mANArKng4Svc8WxCP6I8,483
|
|
52
52
|
llm_bridge/type/message.py,sha256=NyWmSSrciFfvF81aBwAH8qFpo5IpRhh8QXMselbYen8,370
|
|
53
53
|
llm_bridge/type/serializer.py,sha256=moCL9y_HTO2CFg2w_jc5MljDxKgHiCo_qiz-o4l2jYU,515
|
|
54
54
|
llm_bridge/type/model_message/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -56,7 +56,7 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
|
|
|
56
56
|
llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
|
|
57
57
|
llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
|
|
58
58
|
llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
|
|
59
|
-
llm_bridge-1.15.
|
|
60
|
-
llm_bridge-1.15.
|
|
61
|
-
llm_bridge-1.15.
|
|
62
|
-
llm_bridge-1.15.
|
|
59
|
+
llm_bridge-1.15.1.dist-info/METADATA,sha256=tX1qT3BA1MmmxXmlrk_jgD_mdsbXkkXt_n_Oc6wI7cI,3417
|
|
60
|
+
llm_bridge-1.15.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
61
|
+
llm_bridge-1.15.1.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
|
|
62
|
+
llm_bridge-1.15.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|