LLM-Bridge 1.15.1__py3-none-any.whl → 1.15.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_bridge/client/implementations/claude/claude_response_handler.py +37 -26
- llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -1
- llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +7 -47
- llm_bridge/client/implementations/openai/openai_responses_response_handler.py +129 -0
- llm_bridge/client/implementations/openai/openai_token_couter.py +4 -0
- llm_bridge/client/implementations/openai/steam_openai_responses_client.py +8 -30
- llm_bridge/client/model_client/openai_client.py +3 -1
- llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +12 -5
- llm_bridge/resources/model_prices.json +18 -0
- {llm_bridge-1.15.1.dist-info → llm_bridge-1.15.3.dist-info}/METADATA +2 -2
- {llm_bridge-1.15.1.dist-info → llm_bridge-1.15.3.dist-info}/RECORD +13 -12
- {llm_bridge-1.15.1.dist-info → llm_bridge-1.15.3.dist-info}/WHEEL +0 -0
- {llm_bridge-1.15.1.dist-info → llm_bridge-1.15.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -73,6 +73,39 @@ async def process_content_block(
|
|
|
73
73
|
)
|
|
74
74
|
|
|
75
75
|
|
|
76
|
+
async def build_chat_response_with_tokens(
|
|
77
|
+
text: str,
|
|
78
|
+
thought: str,
|
|
79
|
+
code: str,
|
|
80
|
+
code_output: str,
|
|
81
|
+
files: list[File],
|
|
82
|
+
input_tokens: int,
|
|
83
|
+
client: AsyncAnthropic,
|
|
84
|
+
model: str,
|
|
85
|
+
) -> ChatResponse:
|
|
86
|
+
chat_response = ChatResponse(
|
|
87
|
+
text=text,
|
|
88
|
+
thought=thought,
|
|
89
|
+
code=code,
|
|
90
|
+
code_output=code_output,
|
|
91
|
+
files=files,
|
|
92
|
+
)
|
|
93
|
+
output_tokens = await count_claude_output_tokens(
|
|
94
|
+
client=client,
|
|
95
|
+
model=model,
|
|
96
|
+
chat_response=chat_response,
|
|
97
|
+
)
|
|
98
|
+
return ChatResponse(
|
|
99
|
+
text=text,
|
|
100
|
+
thought=thought,
|
|
101
|
+
code=code,
|
|
102
|
+
code_output=code_output,
|
|
103
|
+
files=files,
|
|
104
|
+
input_tokens=input_tokens,
|
|
105
|
+
output_tokens=output_tokens,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
76
109
|
async def process_claude_non_stream_response(
|
|
77
110
|
message: BetaMessage,
|
|
78
111
|
input_tokens: int,
|
|
@@ -93,26 +126,15 @@ async def process_claude_non_stream_response(
|
|
|
93
126
|
code_output += content_block_chat_response.code_output
|
|
94
127
|
files.extend(content_block_chat_response.files)
|
|
95
128
|
|
|
96
|
-
|
|
129
|
+
return await build_chat_response_with_tokens(
|
|
97
130
|
text=text,
|
|
98
131
|
thought=thought,
|
|
99
132
|
code=code,
|
|
100
133
|
code_output=code_output,
|
|
101
134
|
files=files,
|
|
102
|
-
|
|
103
|
-
output_tokens = await count_claude_output_tokens(
|
|
135
|
+
input_tokens=input_tokens,
|
|
104
136
|
client=client,
|
|
105
137
|
model=model,
|
|
106
|
-
chat_response=chat_response,
|
|
107
|
-
)
|
|
108
|
-
return ChatResponse(
|
|
109
|
-
text=text,
|
|
110
|
-
thought=thought,
|
|
111
|
-
code=code,
|
|
112
|
-
code_output=code_output,
|
|
113
|
-
files=files,
|
|
114
|
-
input_tokens=input_tokens,
|
|
115
|
-
output_tokens=output_tokens,
|
|
116
138
|
)
|
|
117
139
|
|
|
118
140
|
|
|
@@ -152,24 +174,13 @@ async def process_claude_stream_response(
|
|
|
152
174
|
code_output += content_block_chat_response.code_output
|
|
153
175
|
files.extend(content_block_chat_response.files)
|
|
154
176
|
|
|
155
|
-
|
|
177
|
+
return await build_chat_response_with_tokens(
|
|
156
178
|
text=text,
|
|
157
179
|
thought=thought,
|
|
158
180
|
code=code,
|
|
159
181
|
code_output=code_output,
|
|
160
182
|
files=files,
|
|
161
|
-
|
|
162
|
-
output_tokens = await count_claude_output_tokens(
|
|
183
|
+
input_tokens=input_tokens,
|
|
163
184
|
client=client,
|
|
164
185
|
model=model,
|
|
165
|
-
chat_response=chat_response,
|
|
166
|
-
)
|
|
167
|
-
return ChatResponse(
|
|
168
|
-
text=text,
|
|
169
|
-
thought=thought,
|
|
170
|
-
code=code,
|
|
171
|
-
code_output=code_output,
|
|
172
|
-
files=files,
|
|
173
|
-
input_tokens=input_tokens,
|
|
174
|
-
output_tokens=output_tokens,
|
|
175
186
|
)
|
|
@@ -5,57 +5,16 @@ import httpx
|
|
|
5
5
|
import openai
|
|
6
6
|
from fastapi import HTTPException
|
|
7
7
|
from openai import APIStatusError
|
|
8
|
-
from openai.types.responses import Response
|
|
9
|
-
ResponseOutputText, ResponseReasoningItem
|
|
8
|
+
from openai.types.responses import Response
|
|
10
9
|
|
|
11
|
-
from llm_bridge.client.implementations.openai.
|
|
12
|
-
|
|
10
|
+
from llm_bridge.client.implementations.openai.openai_responses_response_handler import \
|
|
11
|
+
process_openai_responses_non_stream_response
|
|
12
|
+
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens
|
|
13
13
|
from llm_bridge.client.model_client.openai_client import OpenAIClient
|
|
14
|
-
from llm_bridge.type.chat_response import ChatResponse
|
|
14
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
15
15
|
from llm_bridge.type.serializer import serialize
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
def process_openai_responses_non_stream_response(
|
|
19
|
-
response: Response,
|
|
20
|
-
input_tokens: int,
|
|
21
|
-
) -> ChatResponse:
|
|
22
|
-
|
|
23
|
-
output_list: list[ResponseOutputItem] = response.output
|
|
24
|
-
|
|
25
|
-
text: str = ""
|
|
26
|
-
thought: str = ""
|
|
27
|
-
files: list[File] = []
|
|
28
|
-
|
|
29
|
-
for output in output_list:
|
|
30
|
-
if output.type == "message":
|
|
31
|
-
output_message: ResponseOutputMessage = output
|
|
32
|
-
for content in output_message.content:
|
|
33
|
-
if content.type == "output_text":
|
|
34
|
-
output_text: ResponseOutputText = content
|
|
35
|
-
text += output_text.text
|
|
36
|
-
elif output.type == "reasoning":
|
|
37
|
-
reasoning_item: ResponseReasoningItem = output
|
|
38
|
-
for summary_delta in reasoning_item.summary:
|
|
39
|
-
thought += summary_delta.text
|
|
40
|
-
if output.type == "image_generation_call":
|
|
41
|
-
file = File(
|
|
42
|
-
name="generated_image.png",
|
|
43
|
-
data=output.result,
|
|
44
|
-
type="image/png",
|
|
45
|
-
)
|
|
46
|
-
files.append(file)
|
|
47
|
-
|
|
48
|
-
chat_response = ChatResponse(text=text, files=files)
|
|
49
|
-
output_tokens = count_openai_output_tokens(chat_response)
|
|
50
|
-
return ChatResponse(
|
|
51
|
-
text=text,
|
|
52
|
-
thought=thought,
|
|
53
|
-
files=files,
|
|
54
|
-
input_tokens=input_tokens,
|
|
55
|
-
output_tokens=output_tokens,
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
|
|
59
18
|
class NonStreamOpenAIResponsesClient(OpenAIClient):
|
|
60
19
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
61
20
|
try:
|
|
@@ -72,10 +31,11 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
|
|
|
72
31
|
temperature=self.temperature,
|
|
73
32
|
stream=False,
|
|
74
33
|
tools=self.tools,
|
|
34
|
+
include=self.include,
|
|
75
35
|
# text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
|
|
76
36
|
)
|
|
77
37
|
|
|
78
|
-
return process_openai_responses_non_stream_response(
|
|
38
|
+
return await process_openai_responses_non_stream_response(
|
|
79
39
|
response=response,
|
|
80
40
|
input_tokens=input_tokens,
|
|
81
41
|
)
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from openai.types.responses import Response, ResponseOutputItem, ResponseOutputMessage, \
|
|
2
|
+
ResponseOutputText, ResponseReasoningItem
|
|
3
|
+
from openai.types.responses import ResponseStreamEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent, \
|
|
4
|
+
ResponseCodeInterpreterCallCodeDeltaEvent, ResponseImageGenCallPartialImageEvent, ResponseOutputItemDoneEvent, \
|
|
5
|
+
ResponseCodeInterpreterToolCall
|
|
6
|
+
from openai.types.responses.response_code_interpreter_tool_call import Output, OutputLogs, OutputImage
|
|
7
|
+
from openai.types.responses.response_output_item import ImageGenerationCall
|
|
8
|
+
|
|
9
|
+
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_output_tokens
|
|
10
|
+
from llm_bridge.logic.chat_generate.media_processor import get_base64_content_from_url
|
|
11
|
+
from llm_bridge.type.chat_response import ChatResponse, File
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def process_code_interpreter_outputs(interpreter_outputs: list[Output]) -> tuple[str, list[File]]:
|
|
15
|
+
code_output: str = ""
|
|
16
|
+
files: list[File] = []
|
|
17
|
+
|
|
18
|
+
for interpreter_output in interpreter_outputs:
|
|
19
|
+
if interpreter_output.type == "logs":
|
|
20
|
+
output_logs: OutputLogs = interpreter_output
|
|
21
|
+
code_output += output_logs.logs
|
|
22
|
+
if interpreter_output.type == "image":
|
|
23
|
+
output_image: OutputImage = interpreter_output
|
|
24
|
+
data, _ = await get_base64_content_from_url(output_image.url)
|
|
25
|
+
file = File(
|
|
26
|
+
name="code_interpreter_call_output.png",
|
|
27
|
+
data=data,
|
|
28
|
+
type="image/png",
|
|
29
|
+
)
|
|
30
|
+
files.append(file)
|
|
31
|
+
|
|
32
|
+
return code_output, files
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def process_openai_responses_non_stream_response(
|
|
36
|
+
response: Response,
|
|
37
|
+
input_tokens: int,
|
|
38
|
+
) -> ChatResponse:
|
|
39
|
+
|
|
40
|
+
output_list: list[ResponseOutputItem] = response.output
|
|
41
|
+
|
|
42
|
+
text: str = ""
|
|
43
|
+
thought: str = ""
|
|
44
|
+
code: str = ""
|
|
45
|
+
code_output: str = ""
|
|
46
|
+
files: list[File] = []
|
|
47
|
+
|
|
48
|
+
for output in output_list:
|
|
49
|
+
if output.type == "message":
|
|
50
|
+
output_message: ResponseOutputMessage = output
|
|
51
|
+
for content in output_message.content:
|
|
52
|
+
if content.type == "output_text":
|
|
53
|
+
output_text: ResponseOutputText = content
|
|
54
|
+
text += output_text.text
|
|
55
|
+
elif output.type == "reasoning":
|
|
56
|
+
reasoning_item: ResponseReasoningItem = output
|
|
57
|
+
for summary_delta in reasoning_item.summary:
|
|
58
|
+
thought += summary_delta.text
|
|
59
|
+
elif output.type == "code_interpreter_call":
|
|
60
|
+
code_interpreter_tool_call: ResponseCodeInterpreterToolCall = output
|
|
61
|
+
if interpreter_code := code_interpreter_tool_call.code:
|
|
62
|
+
code += interpreter_code
|
|
63
|
+
if interpreter_outputs := code_interpreter_tool_call.outputs:
|
|
64
|
+
interpreter_code_output, interpreter_files = await process_code_interpreter_outputs(interpreter_outputs)
|
|
65
|
+
code_output += interpreter_code_output
|
|
66
|
+
files.extend(interpreter_files)
|
|
67
|
+
elif output.type == "image_generation_call":
|
|
68
|
+
image_generation_call: ImageGenerationCall = output
|
|
69
|
+
file = File(
|
|
70
|
+
name="image_generation_call_output.png",
|
|
71
|
+
data=image_generation_call.result,
|
|
72
|
+
type="image/png",
|
|
73
|
+
)
|
|
74
|
+
files.append(file)
|
|
75
|
+
|
|
76
|
+
chat_response = ChatResponse(text=text, files=files)
|
|
77
|
+
output_tokens = count_openai_output_tokens(chat_response)
|
|
78
|
+
return ChatResponse(
|
|
79
|
+
text=text,
|
|
80
|
+
thought=thought,
|
|
81
|
+
code=code,
|
|
82
|
+
code_output=code_output,
|
|
83
|
+
files=files,
|
|
84
|
+
input_tokens=input_tokens,
|
|
85
|
+
output_tokens=output_tokens,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
async def process_openai_responses_stream_response(event: ResponseStreamEvent) -> ChatResponse:
|
|
90
|
+
text: str = ""
|
|
91
|
+
thought: str = ""
|
|
92
|
+
code: str = ""
|
|
93
|
+
code_output: str = ""
|
|
94
|
+
files: list[File] = []
|
|
95
|
+
|
|
96
|
+
if event.type == "response.output_text.delta":
|
|
97
|
+
text_delta_event: ResponseTextDeltaEvent = event
|
|
98
|
+
text = text_delta_event.delta
|
|
99
|
+
elif event.type == "response.reasoning_summary_text.delta":
|
|
100
|
+
reasoning_summary_text_delta_event: ResponseReasoningSummaryTextDeltaEvent = event
|
|
101
|
+
thought = reasoning_summary_text_delta_event.delta
|
|
102
|
+
elif event.type == "response.code_interpreter_call_code.delta":
|
|
103
|
+
code_interpreter_call_code_delta_event: ResponseCodeInterpreterCallCodeDeltaEvent = event
|
|
104
|
+
code = code_interpreter_call_code_delta_event.delta
|
|
105
|
+
elif event.type == "response.output_item.done":
|
|
106
|
+
output_item_done_event: ResponseOutputItemDoneEvent = event
|
|
107
|
+
if output_item_done_event.item.type == "code_interpreter_call":
|
|
108
|
+
code_interpreter_tool_call: ResponseCodeInterpreterToolCall = output_item_done_event.item
|
|
109
|
+
if interpreter_outputs := code_interpreter_tool_call.outputs:
|
|
110
|
+
interpreter_code_output, interpreter_files = await process_code_interpreter_outputs(interpreter_outputs)
|
|
111
|
+
code_output += interpreter_code_output
|
|
112
|
+
files.extend(interpreter_files)
|
|
113
|
+
elif event.type == "response.image_generation_call.partial_image":
|
|
114
|
+
image_gen_call_partial_image_event: ResponseImageGenCallPartialImageEvent = event
|
|
115
|
+
file = File(
|
|
116
|
+
name="generated_image.png",
|
|
117
|
+
data=image_gen_call_partial_image_event.partial_image_b64,
|
|
118
|
+
type="image/png",
|
|
119
|
+
)
|
|
120
|
+
files.append(file)
|
|
121
|
+
|
|
122
|
+
chat_response = ChatResponse(
|
|
123
|
+
text=text,
|
|
124
|
+
thought=thought,
|
|
125
|
+
code=code,
|
|
126
|
+
code_output=code_output,
|
|
127
|
+
files=files,
|
|
128
|
+
)
|
|
129
|
+
return chat_response
|
|
@@ -35,6 +35,10 @@ def count_openai_responses_input_tokens(messages: list[OpenAIResponsesMessage])
|
|
|
35
35
|
|
|
36
36
|
def count_openai_output_tokens(chat_response: ChatResponse) -> int:
|
|
37
37
|
text = chat_response.text
|
|
38
|
+
if chat_response.thought:
|
|
39
|
+
text += chat_response.thought
|
|
40
|
+
if chat_response.code:
|
|
41
|
+
text += chat_response.code
|
|
38
42
|
file_count = len(chat_response.files) if chat_response.files else 0
|
|
39
43
|
|
|
40
44
|
return num_tokens_from_text(text) + file_count * 1000
|
|
@@ -6,53 +6,30 @@ import httpx
|
|
|
6
6
|
import openai
|
|
7
7
|
from fastapi import HTTPException
|
|
8
8
|
from openai import APIStatusError, AsyncStream
|
|
9
|
-
from openai.types.responses import ResponseStreamEvent
|
|
9
|
+
from openai.types.responses import ResponseStreamEvent
|
|
10
10
|
|
|
11
|
+
from llm_bridge.client.implementations.openai.openai_responses_response_handler import \
|
|
12
|
+
process_openai_responses_stream_response
|
|
11
13
|
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
|
|
12
14
|
count_openai_output_tokens
|
|
13
15
|
from llm_bridge.client.model_client.openai_client import OpenAIClient
|
|
14
|
-
from llm_bridge.type.chat_response import ChatResponse
|
|
16
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
15
17
|
from llm_bridge.type.serializer import serialize
|
|
16
18
|
|
|
17
19
|
|
|
18
|
-
def process_delta(event: ResponseStreamEvent) -> ChatResponse:
|
|
19
|
-
text: str = ""
|
|
20
|
-
thought: str = ""
|
|
21
|
-
files: list[File] = []
|
|
22
|
-
|
|
23
|
-
if event.type == "response.output_text.delta":
|
|
24
|
-
text_delta_event: ResponseTextDeltaEvent = event
|
|
25
|
-
text = text_delta_event.delta
|
|
26
|
-
elif event.type == "response.reasoning_summary_text.delta":
|
|
27
|
-
reasoning_summary_text_delta_event: ResponseReasoningSummaryTextDeltaEvent = event
|
|
28
|
-
thought = reasoning_summary_text_delta_event.delta
|
|
29
|
-
if event.type == "response.image_generation_call.partial_image":
|
|
30
|
-
file = File(
|
|
31
|
-
name="generated_image.png",
|
|
32
|
-
data=event.partial_image_b64,
|
|
33
|
-
type="image/png",
|
|
34
|
-
)
|
|
35
|
-
files.append(file)
|
|
36
|
-
|
|
37
|
-
chat_response = ChatResponse(
|
|
38
|
-
text=text,
|
|
39
|
-
thought=thought,
|
|
40
|
-
files=files,
|
|
41
|
-
)
|
|
42
|
-
return chat_response
|
|
43
|
-
|
|
44
|
-
|
|
45
20
|
async def generate_chunk(
|
|
46
21
|
stream: AsyncStream[ResponseStreamEvent],
|
|
47
22
|
input_tokens: int,
|
|
48
23
|
) -> AsyncGenerator[ChatResponse, None]:
|
|
49
24
|
try:
|
|
50
25
|
async for event in stream:
|
|
51
|
-
chat_response =
|
|
26
|
+
chat_response = await process_openai_responses_stream_response(event)
|
|
52
27
|
output_tokens = count_openai_output_tokens(chat_response)
|
|
53
28
|
yield ChatResponse(
|
|
54
29
|
text=chat_response.text,
|
|
55
30
|
thought=chat_response.thought,
|
|
31
|
+
code=chat_response.code,
|
|
32
|
+
code_output=chat_response.code_output,
|
|
56
33
|
files=chat_response.files,
|
|
57
34
|
input_tokens=input_tokens,
|
|
58
35
|
output_tokens=output_tokens,
|
|
@@ -78,6 +55,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
|
|
|
78
55
|
temperature=self.temperature,
|
|
79
56
|
stream=True,
|
|
80
57
|
tools=self.tools,
|
|
58
|
+
include=self.include,
|
|
81
59
|
# text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
|
|
82
60
|
)
|
|
83
61
|
|
|
@@ -3,7 +3,7 @@ from pydantic import BaseModel
|
|
|
3
3
|
|
|
4
4
|
import openai.lib.azure
|
|
5
5
|
from openai.types import Reasoning
|
|
6
|
-
from openai.types.responses import ToolParam
|
|
6
|
+
from openai.types.responses import ToolParam, ResponseIncludable
|
|
7
7
|
|
|
8
8
|
from llm_bridge.client.chat_client import ChatClient
|
|
9
9
|
from llm_bridge.type.chat_response import ChatResponse
|
|
@@ -21,6 +21,7 @@ class OpenAIClient(ChatClient):
|
|
|
21
21
|
client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
|
|
22
22
|
tools: Iterable[ToolParam],
|
|
23
23
|
reasoning: Reasoning,
|
|
24
|
+
include: list[ResponseIncludable],
|
|
24
25
|
structured_output_base_model: Type[BaseModel] | None = None,
|
|
25
26
|
):
|
|
26
27
|
self.model = model
|
|
@@ -30,6 +31,7 @@ class OpenAIClient(ChatClient):
|
|
|
30
31
|
self.client = client
|
|
31
32
|
self.tools = tools
|
|
32
33
|
self.reasoning = reasoning
|
|
34
|
+
self.include = include
|
|
33
35
|
self.structured_output_base_model = structured_output_base_model
|
|
34
36
|
|
|
35
37
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
@@ -3,10 +3,11 @@ from typing import Any
|
|
|
3
3
|
|
|
4
4
|
import openai
|
|
5
5
|
from fastapi import HTTPException
|
|
6
|
+
from openai import Omit
|
|
6
7
|
from openai.types import Reasoning
|
|
7
|
-
from openai.types.responses import WebSearchToolParam
|
|
8
|
+
from openai.types.responses import WebSearchToolParam, ResponseIncludable
|
|
8
9
|
from openai.types.responses.tool_param import CodeInterpreter, CodeInterpreterContainerCodeInterpreterToolAuto, \
|
|
9
|
-
ImageGeneration
|
|
10
|
+
ImageGeneration, ToolParam
|
|
10
11
|
|
|
11
12
|
from llm_bridge.client.implementations.openai.non_stream_openai_client import NonStreamOpenAIClient
|
|
12
13
|
from llm_bridge.client.implementations.openai.non_stream_openai_responses_client import NonStreamOpenAIResponsesClient
|
|
@@ -29,6 +30,8 @@ async def create_openai_client(
|
|
|
29
30
|
code_execution: bool,
|
|
30
31
|
structured_output_schema: dict[str, Any] | None,
|
|
31
32
|
):
|
|
33
|
+
omit = Omit()
|
|
34
|
+
|
|
32
35
|
if api_type == "OpenAI":
|
|
33
36
|
client = openai.AsyncOpenAI(
|
|
34
37
|
api_key=api_keys["OPENAI_API_KEY"],
|
|
@@ -62,10 +65,11 @@ async def create_openai_client(
|
|
|
62
65
|
else:
|
|
63
66
|
openai_messages = await convert_messages_to_openai(messages)
|
|
64
67
|
|
|
65
|
-
tools = []
|
|
66
|
-
reasoning =
|
|
68
|
+
tools: list[ToolParam] = []
|
|
69
|
+
reasoning: Reasoning | Omit = omit
|
|
70
|
+
include: list[ResponseIncludable] = ["code_interpreter_call.outputs"]
|
|
67
71
|
|
|
68
|
-
if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
|
|
72
|
+
if model not in ["gpt-5-pro", "gpt-5.2-pro"] and "codex" not in model:
|
|
69
73
|
if code_execution:
|
|
70
74
|
tools.append(
|
|
71
75
|
CodeInterpreter(
|
|
@@ -87,6 +91,7 @@ async def create_openai_client(
|
|
|
87
91
|
effort="high",
|
|
88
92
|
summary="auto",
|
|
89
93
|
)
|
|
94
|
+
if re.match(r"gpt-5.*", model) and "codex" not in model:
|
|
90
95
|
tools.append(
|
|
91
96
|
ImageGeneration(
|
|
92
97
|
type="image_generation",
|
|
@@ -108,6 +113,7 @@ async def create_openai_client(
|
|
|
108
113
|
client=client,
|
|
109
114
|
tools=tools,
|
|
110
115
|
reasoning=reasoning,
|
|
116
|
+
include=include,
|
|
111
117
|
structured_output_base_model=structured_output_base_model,
|
|
112
118
|
)
|
|
113
119
|
else:
|
|
@@ -119,6 +125,7 @@ async def create_openai_client(
|
|
|
119
125
|
client=client,
|
|
120
126
|
tools=tools,
|
|
121
127
|
reasoning=reasoning,
|
|
128
|
+
include=include,
|
|
122
129
|
structured_output_base_model=structured_output_base_model,
|
|
123
130
|
)
|
|
124
131
|
else:
|
|
@@ -59,12 +59,30 @@
|
|
|
59
59
|
"input": 1.25,
|
|
60
60
|
"output": 10
|
|
61
61
|
},
|
|
62
|
+
{
|
|
63
|
+
"apiType": "OpenAI",
|
|
64
|
+
"model": "gpt-5.1-codex-max",
|
|
65
|
+
"input": 1.25,
|
|
66
|
+
"output": 10
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"apiType": "OpenAI",
|
|
70
|
+
"model": "gpt-5.1-codex",
|
|
71
|
+
"input": 1.25,
|
|
72
|
+
"output": 10
|
|
73
|
+
},
|
|
62
74
|
{
|
|
63
75
|
"apiType": "OpenAI",
|
|
64
76
|
"model": "gpt-5",
|
|
65
77
|
"input": 1.25,
|
|
66
78
|
"output": 10
|
|
67
79
|
},
|
|
80
|
+
{
|
|
81
|
+
"apiType": "OpenAI",
|
|
82
|
+
"model": "gpt-5-codex",
|
|
83
|
+
"input": 1.25,
|
|
84
|
+
"output": 10
|
|
85
|
+
},
|
|
68
86
|
{
|
|
69
87
|
"apiType": "OpenAI",
|
|
70
88
|
"model": "gpt-5-mini",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.15.
|
|
3
|
+
Version: 1.15.3
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -50,7 +50,7 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
50
50
|
|----------|--------------------------------|---------------------------------------------------------|-------------------|
|
|
51
51
|
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
|
|
52
52
|
| Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
|
|
53
|
-
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text
|
|
53
|
+
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, File |
|
|
54
54
|
| Grok | Text, Image | | Text |
|
|
55
55
|
|
|
56
56
|
#### Planned Features
|
|
@@ -4,25 +4,26 @@ llm_bridge/client/chat_client.py,sha256=XISF2BM-WkZJNbnvcLfMcbSzlrE0XMDulyE_VG9z
|
|
|
4
4
|
llm_bridge/client/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62HeXTThDx1L6Zbs_IT0HPLPspI,102
|
|
6
6
|
llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=
|
|
7
|
+
llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=DeYDCORvS3MAqRAoGR_KkYs_cG41ul4Kv-f8jRCOM7I,6719
|
|
8
8
|
llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=m_aoLJkFPJqSBA3Thzv5vg3GnaucZh41SAgT28sLeBA,1324
|
|
9
9
|
llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=XKUNIGGetkE2qvwXc3xS3mreD1SuYynbhKLz8NAThpM,1715
|
|
10
10
|
llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=8jzD9fVptQnSkRVz0oQ3QnQ22NyMm6hjsmEtoDvl8g8,2059
|
|
11
11
|
llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
-
llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=
|
|
12
|
+
llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=jND1OtQi1YmyVpewYiGunN5wE6oHfkublWzdlgn8glY,3622
|
|
13
13
|
llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=GdnwJWPhGZMB_xC0fz88zQRparIHzTemkQoqfDcxVEA,687
|
|
14
14
|
llm_bridge/client/implementations/gemini/non_stream_gemini_client.py,sha256=JGNNpeln42SoXg2vGIC9xG5GGlBh6dIhz4BzYIkgraA,1302
|
|
15
15
|
llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=vqPhQdr-jaHXzn-_1PSZfpo96zM-_89XOEXIx7UBBIw,1545
|
|
16
16
|
llm_bridge/client/implementations/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
17
|
llm_bridge/client/implementations/openai/non_stream_openai_client.py,sha256=aceJm6FF6VdzVRECzJyTY8-aQjCekhhbrMPEcUN24fo,2171
|
|
18
|
-
llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=
|
|
19
|
-
llm_bridge/client/implementations/openai/
|
|
20
|
-
llm_bridge/client/implementations/openai/
|
|
18
|
+
llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py,sha256=Qjc1afGlHY_8SQoWZPvUSqtAB4IQ6ioOP1-BbFcpNSk,2406
|
|
19
|
+
llm_bridge/client/implementations/openai/openai_responses_response_handler.py,sha256=Q8v7Y3KNdrcCxj2VuKzzHkayoCofsHGgpJz5T4IGBTQ,5586
|
|
20
|
+
llm_bridge/client/implementations/openai/openai_token_couter.py,sha256=ah4rMteMYYTAVREpVEsbhkJIIdDfpBopnalthi67eaY,1602
|
|
21
|
+
llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=sVAEeF1vkZiynOt817ifodulusbuVs_R4pjfuAhxIKs,3258
|
|
21
22
|
llm_bridge/client/implementations/openai/stream_openai_client.py,sha256=Izq4xH9EuLjUCBJsuSr6U4Kj6FN5c7w_oHf9wmQatXE,2988
|
|
22
23
|
llm_bridge/client/model_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
23
24
|
llm_bridge/client/model_client/claude_client.py,sha256=xLRXYD9t5E3QBVIMe-GdD7eESC752cM9_3FCcp6MFIg,1446
|
|
24
25
|
llm_bridge/client/model_client/gemini_client.py,sha256=4dcueIbpLFqkT98WxmeVmW9Vbq7Z5jbYbifAem-NL1E,906
|
|
25
|
-
llm_bridge/client/model_client/openai_client.py,sha256=
|
|
26
|
+
llm_bridge/client/model_client/openai_client.py,sha256=tRb5-T5J5swwEwQW4ryz1L1KBoWF_VGwkQQpeVVy854,1526
|
|
26
27
|
llm_bridge/logic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
28
|
llm_bridge/logic/file_fetch.py,sha256=Q8PGNj76E25sKD70TmlnSIdPgAxpNlb89syk87DbAGg,1341
|
|
28
29
|
llm_bridge/logic/model_prices.py,sha256=hiXVbki3004Rrm5LQrmVfdm0lLABeygxtFB-Qn9_mm0,1219
|
|
@@ -33,7 +34,7 @@ llm_bridge/logic/chat_generate/media_processor.py,sha256=icl2kX-2SBFt09fjQ5tpmzn
|
|
|
33
34
|
llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
35
|
llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=mlGcCPnSqu8KBzD-Yl4Qtsw5L_2TE2ezuL9NcD-iYIY,3815
|
|
35
36
|
llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=6CuacST1bLUGhY-rGH0bm5tu3r9iQQhIweN32TgqLCc,3692
|
|
36
|
-
llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=
|
|
37
|
+
llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=CL4NqI4wx_oeUCBvrXhDz3W8_OAS9-kvjY9KcYDD3KM,5224
|
|
37
38
|
llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py,sha256=kQ3RGyg_9vbe7oYzbl11Dzu-tHPY1z2-SBBSgHHwPfM,143
|
|
38
39
|
llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
39
40
|
llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=YiPqMkybCXrsAJOFcUfPOHXdMkn3mZxq7gft_W449dA,2439
|
|
@@ -46,7 +47,7 @@ llm_bridge/logic/message_preprocess/document_processor.py,sha256=IsVqoFgWNa9i8cR
|
|
|
46
47
|
llm_bridge/logic/message_preprocess/file_type_checker.py,sha256=nkrVki1a2udCeVqUnfIVi7Wxx8OMKbBuHw3FOlm17uo,1603
|
|
47
48
|
llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=VR4__ip4ytAo62DHn9HeeYdbcx5lWItBnKsm9l3gmY4,1924
|
|
48
49
|
llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
|
-
llm_bridge/resources/model_prices.json,sha256=
|
|
50
|
+
llm_bridge/resources/model_prices.json,sha256=WSTvAlTJm_gzGHysE1ma1pXtVt4ppJrtPdb2R2ZbHxA,2674
|
|
50
51
|
llm_bridge/type/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
51
52
|
llm_bridge/type/chat_response.py,sha256=6sz6g4Ns720Q-k6-7YYqrT2mANArKng4Svc8WxCP6I8,483
|
|
52
53
|
llm_bridge/type/message.py,sha256=NyWmSSrciFfvF81aBwAH8qFpo5IpRhh8QXMselbYen8,370
|
|
@@ -56,7 +57,7 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
|
|
|
56
57
|
llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
|
|
57
58
|
llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
|
|
58
59
|
llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
|
|
59
|
-
llm_bridge-1.15.
|
|
60
|
-
llm_bridge-1.15.
|
|
61
|
-
llm_bridge-1.15.
|
|
62
|
-
llm_bridge-1.15.
|
|
60
|
+
llm_bridge-1.15.3.dist-info/METADATA,sha256=620d7VMkdOWX33-9w9nFGbhKLpAmcmVjgBxCA7p0VGU,3417
|
|
61
|
+
llm_bridge-1.15.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
62
|
+
llm_bridge-1.15.3.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
|
|
63
|
+
llm_bridge-1.15.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|