LLM-Bridge 1.15.2__tar.gz → 1.15.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/PKG-INFO +2 -2
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/README.md +1 -1
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/claude/claude_response_handler.py +37 -26
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -1
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +7 -47
- llm_bridge-1.15.3/llm_bridge/client/implementations/openai/openai_responses_response_handler.py +129 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/openai_token_couter.py +4 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +8 -30
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/model_client/openai_client.py +3 -1
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +10 -4
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/pyproject.toml +1 -1
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/usage/main.py +6 -6
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/uv.lock +1 -1
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/.gitattributes +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/.github/workflows/python-publish.yml +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/.gitignore +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/LICENSE +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/MANIFEST.in +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/chat_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/claude/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/printing_status.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/model_client/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/model_client/claude_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/model_client/gemini_client.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/file_fetch.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/model_prices.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/resources/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/resources/model_prices.json +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/chat_response.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/message.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/model_message/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/model_message/claude_message.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/model_message/gemini_message.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/model_message/openai_message.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/serializer.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/tests/__init__.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/tests/chat_client_factory_test.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/tests/message_preprocessor_test.py +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/usage/.env.example +0 -0
- {llm_bridge-1.15.2 → llm_bridge-1.15.3}/usage/workflow.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.15.
|
|
3
|
+
Version: 1.15.3
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -50,7 +50,7 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
50
50
|
|----------|--------------------------------|---------------------------------------------------------|-------------------|
|
|
51
51
|
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
|
|
52
52
|
| Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
|
|
53
|
-
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text
|
|
53
|
+
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, File |
|
|
54
54
|
| Grok | Text, Image | | Text |
|
|
55
55
|
|
|
56
56
|
#### Planned Features
|
|
@@ -26,7 +26,7 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
26
26
|
|----------|--------------------------------|---------------------------------------------------------|-------------------|
|
|
27
27
|
| OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
|
|
28
28
|
| Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
|
|
29
|
-
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text
|
|
29
|
+
| Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, File |
|
|
30
30
|
| Grok | Text, Image | | Text |
|
|
31
31
|
|
|
32
32
|
#### Planned Features
|
|
@@ -73,6 +73,39 @@ async def process_content_block(
|
|
|
73
73
|
)
|
|
74
74
|
|
|
75
75
|
|
|
76
|
+
async def build_chat_response_with_tokens(
|
|
77
|
+
text: str,
|
|
78
|
+
thought: str,
|
|
79
|
+
code: str,
|
|
80
|
+
code_output: str,
|
|
81
|
+
files: list[File],
|
|
82
|
+
input_tokens: int,
|
|
83
|
+
client: AsyncAnthropic,
|
|
84
|
+
model: str,
|
|
85
|
+
) -> ChatResponse:
|
|
86
|
+
chat_response = ChatResponse(
|
|
87
|
+
text=text,
|
|
88
|
+
thought=thought,
|
|
89
|
+
code=code,
|
|
90
|
+
code_output=code_output,
|
|
91
|
+
files=files,
|
|
92
|
+
)
|
|
93
|
+
output_tokens = await count_claude_output_tokens(
|
|
94
|
+
client=client,
|
|
95
|
+
model=model,
|
|
96
|
+
chat_response=chat_response,
|
|
97
|
+
)
|
|
98
|
+
return ChatResponse(
|
|
99
|
+
text=text,
|
|
100
|
+
thought=thought,
|
|
101
|
+
code=code,
|
|
102
|
+
code_output=code_output,
|
|
103
|
+
files=files,
|
|
104
|
+
input_tokens=input_tokens,
|
|
105
|
+
output_tokens=output_tokens,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
76
109
|
async def process_claude_non_stream_response(
|
|
77
110
|
message: BetaMessage,
|
|
78
111
|
input_tokens: int,
|
|
@@ -93,26 +126,15 @@ async def process_claude_non_stream_response(
|
|
|
93
126
|
code_output += content_block_chat_response.code_output
|
|
94
127
|
files.extend(content_block_chat_response.files)
|
|
95
128
|
|
|
96
|
-
|
|
129
|
+
return await build_chat_response_with_tokens(
|
|
97
130
|
text=text,
|
|
98
131
|
thought=thought,
|
|
99
132
|
code=code,
|
|
100
133
|
code_output=code_output,
|
|
101
134
|
files=files,
|
|
102
|
-
|
|
103
|
-
output_tokens = await count_claude_output_tokens(
|
|
135
|
+
input_tokens=input_tokens,
|
|
104
136
|
client=client,
|
|
105
137
|
model=model,
|
|
106
|
-
chat_response=chat_response,
|
|
107
|
-
)
|
|
108
|
-
return ChatResponse(
|
|
109
|
-
text=text,
|
|
110
|
-
thought=thought,
|
|
111
|
-
code=code,
|
|
112
|
-
code_output=code_output,
|
|
113
|
-
files=files,
|
|
114
|
-
input_tokens=input_tokens,
|
|
115
|
-
output_tokens=output_tokens,
|
|
116
138
|
)
|
|
117
139
|
|
|
118
140
|
|
|
@@ -152,24 +174,13 @@ async def process_claude_stream_response(
|
|
|
152
174
|
code_output += content_block_chat_response.code_output
|
|
153
175
|
files.extend(content_block_chat_response.files)
|
|
154
176
|
|
|
155
|
-
|
|
177
|
+
return await build_chat_response_with_tokens(
|
|
156
178
|
text=text,
|
|
157
179
|
thought=thought,
|
|
158
180
|
code=code,
|
|
159
181
|
code_output=code_output,
|
|
160
182
|
files=files,
|
|
161
|
-
|
|
162
|
-
output_tokens = await count_claude_output_tokens(
|
|
183
|
+
input_tokens=input_tokens,
|
|
163
184
|
client=client,
|
|
164
185
|
model=model,
|
|
165
|
-
chat_response=chat_response,
|
|
166
|
-
)
|
|
167
|
-
return ChatResponse(
|
|
168
|
-
text=text,
|
|
169
|
-
thought=thought,
|
|
170
|
-
code=code,
|
|
171
|
-
code_output=code_output,
|
|
172
|
-
files=files,
|
|
173
|
-
input_tokens=input_tokens,
|
|
174
|
-
output_tokens=output_tokens,
|
|
175
186
|
)
|
|
@@ -5,57 +5,16 @@ import httpx
|
|
|
5
5
|
import openai
|
|
6
6
|
from fastapi import HTTPException
|
|
7
7
|
from openai import APIStatusError
|
|
8
|
-
from openai.types.responses import Response
|
|
9
|
-
ResponseOutputText, ResponseReasoningItem
|
|
8
|
+
from openai.types.responses import Response
|
|
10
9
|
|
|
11
|
-
from llm_bridge.client.implementations.openai.
|
|
12
|
-
|
|
10
|
+
from llm_bridge.client.implementations.openai.openai_responses_response_handler import \
|
|
11
|
+
process_openai_responses_non_stream_response
|
|
12
|
+
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens
|
|
13
13
|
from llm_bridge.client.model_client.openai_client import OpenAIClient
|
|
14
|
-
from llm_bridge.type.chat_response import ChatResponse
|
|
14
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
15
15
|
from llm_bridge.type.serializer import serialize
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
def process_openai_responses_non_stream_response(
|
|
19
|
-
response: Response,
|
|
20
|
-
input_tokens: int,
|
|
21
|
-
) -> ChatResponse:
|
|
22
|
-
|
|
23
|
-
output_list: list[ResponseOutputItem] = response.output
|
|
24
|
-
|
|
25
|
-
text: str = ""
|
|
26
|
-
thought: str = ""
|
|
27
|
-
files: list[File] = []
|
|
28
|
-
|
|
29
|
-
for output in output_list:
|
|
30
|
-
if output.type == "message":
|
|
31
|
-
output_message: ResponseOutputMessage = output
|
|
32
|
-
for content in output_message.content:
|
|
33
|
-
if content.type == "output_text":
|
|
34
|
-
output_text: ResponseOutputText = content
|
|
35
|
-
text += output_text.text
|
|
36
|
-
elif output.type == "reasoning":
|
|
37
|
-
reasoning_item: ResponseReasoningItem = output
|
|
38
|
-
for summary_delta in reasoning_item.summary:
|
|
39
|
-
thought += summary_delta.text
|
|
40
|
-
if output.type == "image_generation_call":
|
|
41
|
-
file = File(
|
|
42
|
-
name="generated_image.png",
|
|
43
|
-
data=output.result,
|
|
44
|
-
type="image/png",
|
|
45
|
-
)
|
|
46
|
-
files.append(file)
|
|
47
|
-
|
|
48
|
-
chat_response = ChatResponse(text=text, files=files)
|
|
49
|
-
output_tokens = count_openai_output_tokens(chat_response)
|
|
50
|
-
return ChatResponse(
|
|
51
|
-
text=text,
|
|
52
|
-
thought=thought,
|
|
53
|
-
files=files,
|
|
54
|
-
input_tokens=input_tokens,
|
|
55
|
-
output_tokens=output_tokens,
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
|
|
59
18
|
class NonStreamOpenAIResponsesClient(OpenAIClient):
|
|
60
19
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
61
20
|
try:
|
|
@@ -72,10 +31,11 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
|
|
|
72
31
|
temperature=self.temperature,
|
|
73
32
|
stream=False,
|
|
74
33
|
tools=self.tools,
|
|
34
|
+
include=self.include,
|
|
75
35
|
# text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
|
|
76
36
|
)
|
|
77
37
|
|
|
78
|
-
return process_openai_responses_non_stream_response(
|
|
38
|
+
return await process_openai_responses_non_stream_response(
|
|
79
39
|
response=response,
|
|
80
40
|
input_tokens=input_tokens,
|
|
81
41
|
)
|
llm_bridge-1.15.3/llm_bridge/client/implementations/openai/openai_responses_response_handler.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from openai.types.responses import Response, ResponseOutputItem, ResponseOutputMessage, \
|
|
2
|
+
ResponseOutputText, ResponseReasoningItem
|
|
3
|
+
from openai.types.responses import ResponseStreamEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent, \
|
|
4
|
+
ResponseCodeInterpreterCallCodeDeltaEvent, ResponseImageGenCallPartialImageEvent, ResponseOutputItemDoneEvent, \
|
|
5
|
+
ResponseCodeInterpreterToolCall
|
|
6
|
+
from openai.types.responses.response_code_interpreter_tool_call import Output, OutputLogs, OutputImage
|
|
7
|
+
from openai.types.responses.response_output_item import ImageGenerationCall
|
|
8
|
+
|
|
9
|
+
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_output_tokens
|
|
10
|
+
from llm_bridge.logic.chat_generate.media_processor import get_base64_content_from_url
|
|
11
|
+
from llm_bridge.type.chat_response import ChatResponse, File
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def process_code_interpreter_outputs(interpreter_outputs: list[Output]) -> tuple[str, list[File]]:
|
|
15
|
+
code_output: str = ""
|
|
16
|
+
files: list[File] = []
|
|
17
|
+
|
|
18
|
+
for interpreter_output in interpreter_outputs:
|
|
19
|
+
if interpreter_output.type == "logs":
|
|
20
|
+
output_logs: OutputLogs = interpreter_output
|
|
21
|
+
code_output += output_logs.logs
|
|
22
|
+
if interpreter_output.type == "image":
|
|
23
|
+
output_image: OutputImage = interpreter_output
|
|
24
|
+
data, _ = await get_base64_content_from_url(output_image.url)
|
|
25
|
+
file = File(
|
|
26
|
+
name="code_interpreter_call_output.png",
|
|
27
|
+
data=data,
|
|
28
|
+
type="image/png",
|
|
29
|
+
)
|
|
30
|
+
files.append(file)
|
|
31
|
+
|
|
32
|
+
return code_output, files
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def process_openai_responses_non_stream_response(
|
|
36
|
+
response: Response,
|
|
37
|
+
input_tokens: int,
|
|
38
|
+
) -> ChatResponse:
|
|
39
|
+
|
|
40
|
+
output_list: list[ResponseOutputItem] = response.output
|
|
41
|
+
|
|
42
|
+
text: str = ""
|
|
43
|
+
thought: str = ""
|
|
44
|
+
code: str = ""
|
|
45
|
+
code_output: str = ""
|
|
46
|
+
files: list[File] = []
|
|
47
|
+
|
|
48
|
+
for output in output_list:
|
|
49
|
+
if output.type == "message":
|
|
50
|
+
output_message: ResponseOutputMessage = output
|
|
51
|
+
for content in output_message.content:
|
|
52
|
+
if content.type == "output_text":
|
|
53
|
+
output_text: ResponseOutputText = content
|
|
54
|
+
text += output_text.text
|
|
55
|
+
elif output.type == "reasoning":
|
|
56
|
+
reasoning_item: ResponseReasoningItem = output
|
|
57
|
+
for summary_delta in reasoning_item.summary:
|
|
58
|
+
thought += summary_delta.text
|
|
59
|
+
elif output.type == "code_interpreter_call":
|
|
60
|
+
code_interpreter_tool_call: ResponseCodeInterpreterToolCall = output
|
|
61
|
+
if interpreter_code := code_interpreter_tool_call.code:
|
|
62
|
+
code += interpreter_code
|
|
63
|
+
if interpreter_outputs := code_interpreter_tool_call.outputs:
|
|
64
|
+
interpreter_code_output, interpreter_files = await process_code_interpreter_outputs(interpreter_outputs)
|
|
65
|
+
code_output += interpreter_code_output
|
|
66
|
+
files.extend(interpreter_files)
|
|
67
|
+
elif output.type == "image_generation_call":
|
|
68
|
+
image_generation_call: ImageGenerationCall = output
|
|
69
|
+
file = File(
|
|
70
|
+
name="image_generation_call_output.png",
|
|
71
|
+
data=image_generation_call.result,
|
|
72
|
+
type="image/png",
|
|
73
|
+
)
|
|
74
|
+
files.append(file)
|
|
75
|
+
|
|
76
|
+
chat_response = ChatResponse(text=text, files=files)
|
|
77
|
+
output_tokens = count_openai_output_tokens(chat_response)
|
|
78
|
+
return ChatResponse(
|
|
79
|
+
text=text,
|
|
80
|
+
thought=thought,
|
|
81
|
+
code=code,
|
|
82
|
+
code_output=code_output,
|
|
83
|
+
files=files,
|
|
84
|
+
input_tokens=input_tokens,
|
|
85
|
+
output_tokens=output_tokens,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
async def process_openai_responses_stream_response(event: ResponseStreamEvent) -> ChatResponse:
|
|
90
|
+
text: str = ""
|
|
91
|
+
thought: str = ""
|
|
92
|
+
code: str = ""
|
|
93
|
+
code_output: str = ""
|
|
94
|
+
files: list[File] = []
|
|
95
|
+
|
|
96
|
+
if event.type == "response.output_text.delta":
|
|
97
|
+
text_delta_event: ResponseTextDeltaEvent = event
|
|
98
|
+
text = text_delta_event.delta
|
|
99
|
+
elif event.type == "response.reasoning_summary_text.delta":
|
|
100
|
+
reasoning_summary_text_delta_event: ResponseReasoningSummaryTextDeltaEvent = event
|
|
101
|
+
thought = reasoning_summary_text_delta_event.delta
|
|
102
|
+
elif event.type == "response.code_interpreter_call_code.delta":
|
|
103
|
+
code_interpreter_call_code_delta_event: ResponseCodeInterpreterCallCodeDeltaEvent = event
|
|
104
|
+
code = code_interpreter_call_code_delta_event.delta
|
|
105
|
+
elif event.type == "response.output_item.done":
|
|
106
|
+
output_item_done_event: ResponseOutputItemDoneEvent = event
|
|
107
|
+
if output_item_done_event.item.type == "code_interpreter_call":
|
|
108
|
+
code_interpreter_tool_call: ResponseCodeInterpreterToolCall = output_item_done_event.item
|
|
109
|
+
if interpreter_outputs := code_interpreter_tool_call.outputs:
|
|
110
|
+
interpreter_code_output, interpreter_files = await process_code_interpreter_outputs(interpreter_outputs)
|
|
111
|
+
code_output += interpreter_code_output
|
|
112
|
+
files.extend(interpreter_files)
|
|
113
|
+
elif event.type == "response.image_generation_call.partial_image":
|
|
114
|
+
image_gen_call_partial_image_event: ResponseImageGenCallPartialImageEvent = event
|
|
115
|
+
file = File(
|
|
116
|
+
name="generated_image.png",
|
|
117
|
+
data=image_gen_call_partial_image_event.partial_image_b64,
|
|
118
|
+
type="image/png",
|
|
119
|
+
)
|
|
120
|
+
files.append(file)
|
|
121
|
+
|
|
122
|
+
chat_response = ChatResponse(
|
|
123
|
+
text=text,
|
|
124
|
+
thought=thought,
|
|
125
|
+
code=code,
|
|
126
|
+
code_output=code_output,
|
|
127
|
+
files=files,
|
|
128
|
+
)
|
|
129
|
+
return chat_response
|
|
@@ -35,6 +35,10 @@ def count_openai_responses_input_tokens(messages: list[OpenAIResponsesMessage])
|
|
|
35
35
|
|
|
36
36
|
def count_openai_output_tokens(chat_response: ChatResponse) -> int:
|
|
37
37
|
text = chat_response.text
|
|
38
|
+
if chat_response.thought:
|
|
39
|
+
text += chat_response.thought
|
|
40
|
+
if chat_response.code:
|
|
41
|
+
text += chat_response.code
|
|
38
42
|
file_count = len(chat_response.files) if chat_response.files else 0
|
|
39
43
|
|
|
40
44
|
return num_tokens_from_text(text) + file_count * 1000
|
|
@@ -6,53 +6,30 @@ import httpx
|
|
|
6
6
|
import openai
|
|
7
7
|
from fastapi import HTTPException
|
|
8
8
|
from openai import APIStatusError, AsyncStream
|
|
9
|
-
from openai.types.responses import ResponseStreamEvent
|
|
9
|
+
from openai.types.responses import ResponseStreamEvent
|
|
10
10
|
|
|
11
|
+
from llm_bridge.client.implementations.openai.openai_responses_response_handler import \
|
|
12
|
+
process_openai_responses_stream_response
|
|
11
13
|
from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
|
|
12
14
|
count_openai_output_tokens
|
|
13
15
|
from llm_bridge.client.model_client.openai_client import OpenAIClient
|
|
14
|
-
from llm_bridge.type.chat_response import ChatResponse
|
|
16
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
15
17
|
from llm_bridge.type.serializer import serialize
|
|
16
18
|
|
|
17
19
|
|
|
18
|
-
def process_delta(event: ResponseStreamEvent) -> ChatResponse:
|
|
19
|
-
text: str = ""
|
|
20
|
-
thought: str = ""
|
|
21
|
-
files: list[File] = []
|
|
22
|
-
|
|
23
|
-
if event.type == "response.output_text.delta":
|
|
24
|
-
text_delta_event: ResponseTextDeltaEvent = event
|
|
25
|
-
text = text_delta_event.delta
|
|
26
|
-
elif event.type == "response.reasoning_summary_text.delta":
|
|
27
|
-
reasoning_summary_text_delta_event: ResponseReasoningSummaryTextDeltaEvent = event
|
|
28
|
-
thought = reasoning_summary_text_delta_event.delta
|
|
29
|
-
if event.type == "response.image_generation_call.partial_image":
|
|
30
|
-
file = File(
|
|
31
|
-
name="generated_image.png",
|
|
32
|
-
data=event.partial_image_b64,
|
|
33
|
-
type="image/png",
|
|
34
|
-
)
|
|
35
|
-
files.append(file)
|
|
36
|
-
|
|
37
|
-
chat_response = ChatResponse(
|
|
38
|
-
text=text,
|
|
39
|
-
thought=thought,
|
|
40
|
-
files=files,
|
|
41
|
-
)
|
|
42
|
-
return chat_response
|
|
43
|
-
|
|
44
|
-
|
|
45
20
|
async def generate_chunk(
|
|
46
21
|
stream: AsyncStream[ResponseStreamEvent],
|
|
47
22
|
input_tokens: int,
|
|
48
23
|
) -> AsyncGenerator[ChatResponse, None]:
|
|
49
24
|
try:
|
|
50
25
|
async for event in stream:
|
|
51
|
-
chat_response =
|
|
26
|
+
chat_response = await process_openai_responses_stream_response(event)
|
|
52
27
|
output_tokens = count_openai_output_tokens(chat_response)
|
|
53
28
|
yield ChatResponse(
|
|
54
29
|
text=chat_response.text,
|
|
55
30
|
thought=chat_response.thought,
|
|
31
|
+
code=chat_response.code,
|
|
32
|
+
code_output=chat_response.code_output,
|
|
56
33
|
files=chat_response.files,
|
|
57
34
|
input_tokens=input_tokens,
|
|
58
35
|
output_tokens=output_tokens,
|
|
@@ -78,6 +55,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
|
|
|
78
55
|
temperature=self.temperature,
|
|
79
56
|
stream=True,
|
|
80
57
|
tools=self.tools,
|
|
58
|
+
include=self.include,
|
|
81
59
|
# text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
|
|
82
60
|
)
|
|
83
61
|
|
|
@@ -3,7 +3,7 @@ from pydantic import BaseModel
|
|
|
3
3
|
|
|
4
4
|
import openai.lib.azure
|
|
5
5
|
from openai.types import Reasoning
|
|
6
|
-
from openai.types.responses import ToolParam
|
|
6
|
+
from openai.types.responses import ToolParam, ResponseIncludable
|
|
7
7
|
|
|
8
8
|
from llm_bridge.client.chat_client import ChatClient
|
|
9
9
|
from llm_bridge.type.chat_response import ChatResponse
|
|
@@ -21,6 +21,7 @@ class OpenAIClient(ChatClient):
|
|
|
21
21
|
client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
|
|
22
22
|
tools: Iterable[ToolParam],
|
|
23
23
|
reasoning: Reasoning,
|
|
24
|
+
include: list[ResponseIncludable],
|
|
24
25
|
structured_output_base_model: Type[BaseModel] | None = None,
|
|
25
26
|
):
|
|
26
27
|
self.model = model
|
|
@@ -30,6 +31,7 @@ class OpenAIClient(ChatClient):
|
|
|
30
31
|
self.client = client
|
|
31
32
|
self.tools = tools
|
|
32
33
|
self.reasoning = reasoning
|
|
34
|
+
self.include = include
|
|
33
35
|
self.structured_output_base_model = structured_output_base_model
|
|
34
36
|
|
|
35
37
|
async def generate_non_stream_response(self) -> ChatResponse:
|
|
@@ -3,10 +3,11 @@ from typing import Any
|
|
|
3
3
|
|
|
4
4
|
import openai
|
|
5
5
|
from fastapi import HTTPException
|
|
6
|
+
from openai import Omit
|
|
6
7
|
from openai.types import Reasoning
|
|
7
|
-
from openai.types.responses import WebSearchToolParam
|
|
8
|
+
from openai.types.responses import WebSearchToolParam, ResponseIncludable
|
|
8
9
|
from openai.types.responses.tool_param import CodeInterpreter, CodeInterpreterContainerCodeInterpreterToolAuto, \
|
|
9
|
-
ImageGeneration
|
|
10
|
+
ImageGeneration, ToolParam
|
|
10
11
|
|
|
11
12
|
from llm_bridge.client.implementations.openai.non_stream_openai_client import NonStreamOpenAIClient
|
|
12
13
|
from llm_bridge.client.implementations.openai.non_stream_openai_responses_client import NonStreamOpenAIResponsesClient
|
|
@@ -29,6 +30,8 @@ async def create_openai_client(
|
|
|
29
30
|
code_execution: bool,
|
|
30
31
|
structured_output_schema: dict[str, Any] | None,
|
|
31
32
|
):
|
|
33
|
+
omit = Omit()
|
|
34
|
+
|
|
32
35
|
if api_type == "OpenAI":
|
|
33
36
|
client = openai.AsyncOpenAI(
|
|
34
37
|
api_key=api_keys["OPENAI_API_KEY"],
|
|
@@ -62,8 +65,9 @@ async def create_openai_client(
|
|
|
62
65
|
else:
|
|
63
66
|
openai_messages = await convert_messages_to_openai(messages)
|
|
64
67
|
|
|
65
|
-
tools = []
|
|
66
|
-
reasoning =
|
|
68
|
+
tools: list[ToolParam] = []
|
|
69
|
+
reasoning: Reasoning | Omit = omit
|
|
70
|
+
include: list[ResponseIncludable] = ["code_interpreter_call.outputs"]
|
|
67
71
|
|
|
68
72
|
if model not in ["gpt-5-pro", "gpt-5.2-pro"] and "codex" not in model:
|
|
69
73
|
if code_execution:
|
|
@@ -109,6 +113,7 @@ async def create_openai_client(
|
|
|
109
113
|
client=client,
|
|
110
114
|
tools=tools,
|
|
111
115
|
reasoning=reasoning,
|
|
116
|
+
include=include,
|
|
112
117
|
structured_output_base_model=structured_output_base_model,
|
|
113
118
|
)
|
|
114
119
|
else:
|
|
@@ -120,6 +125,7 @@ async def create_openai_client(
|
|
|
120
125
|
client=client,
|
|
121
126
|
tools=tools,
|
|
122
127
|
reasoning=reasoning,
|
|
128
|
+
include=include,
|
|
123
129
|
structured_output_base_model=structured_output_base_model,
|
|
124
130
|
)
|
|
125
131
|
else:
|
|
@@ -70,7 +70,7 @@ structured_output_schema = {
|
|
|
70
70
|
"price"
|
|
71
71
|
]
|
|
72
72
|
}
|
|
73
|
-
|
|
73
|
+
structured_output_schema = None
|
|
74
74
|
|
|
75
75
|
messages = [
|
|
76
76
|
Message(
|
|
@@ -101,8 +101,8 @@ messages = [
|
|
|
101
101
|
# Content(type=ContentType.Text, data="What's the weather in NYC today?"),
|
|
102
102
|
|
|
103
103
|
# Image Understanding
|
|
104
|
-
Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
|
|
105
|
-
Content(type=ContentType.Text, data="What is in this image?"),
|
|
104
|
+
# Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
|
|
105
|
+
# Content(type=ContentType.Text, data="What is in this image?"),
|
|
106
106
|
|
|
107
107
|
# Image Generation
|
|
108
108
|
# Content(type=ContentType.Text, data="Please generate an image of a cat."),
|
|
@@ -114,7 +114,7 @@ messages = [
|
|
|
114
114
|
# Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
|
|
115
115
|
|
|
116
116
|
# File Output
|
|
117
|
-
|
|
117
|
+
Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
|
|
118
118
|
|
|
119
119
|
# Structured Output
|
|
120
120
|
# Content(type=ContentType.Text, data="Please generate a product."),
|
|
@@ -133,12 +133,12 @@ messages = [
|
|
|
133
133
|
# ),
|
|
134
134
|
]
|
|
135
135
|
# See /llm_bridge/resources/model_prices.json for available models
|
|
136
|
-
|
|
136
|
+
model = "gpt-5.2"
|
|
137
137
|
# model = "gpt-5.1"
|
|
138
138
|
# model = "gpt-5-pro"
|
|
139
139
|
# model = "gpt-5"
|
|
140
140
|
# model = "gpt-4.1"
|
|
141
|
-
model = "gpt-5-codex"
|
|
141
|
+
# model = "gpt-5-codex"
|
|
142
142
|
# model = "gemini-3-pro-preview"
|
|
143
143
|
# model = "gemini-3-pro-image-preview"
|
|
144
144
|
# model = "gemini-3-flash-preview"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/claude/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/openai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/client/implementations/printing_status.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/chat_client_factory.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/chat_generate/chat_message_converter.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/code_file_extensions.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/document_processor.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/file_type_checker.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/logic/message_preprocess/message_preprocessor.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.2 → llm_bridge-1.15.3}/llm_bridge/type/model_message/openai_responses_message.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|