LLM-Bridge 1.15.6__py3-none-any.whl → 1.15.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +13 -7
- llm_bridge/logic/message_preprocess/file_type_checker.py +3 -3
- {llm_bridge-1.15.6.dist-info → llm_bridge-1.15.8.dist-info}/METADATA +2 -8
- {llm_bridge-1.15.6.dist-info → llm_bridge-1.15.8.dist-info}/RECORD +6 -6
- {llm_bridge-1.15.6.dist-info → llm_bridge-1.15.8.dist-info}/WHEEL +0 -0
- {llm_bridge-1.15.6.dist-info → llm_bridge-1.15.8.dist-info}/licenses/LICENSE +0 -0
llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py
CHANGED
|
@@ -3,18 +3,19 @@ from openai.types.responses import ResponseInputTextParam, ResponseInputImagePar
|
|
|
3
3
|
|
|
4
4
|
from llm_bridge.logic.chat_generate import media_processor
|
|
5
5
|
from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_filename_without_timestamp
|
|
6
|
-
from llm_bridge.type.message import Message, ContentType
|
|
6
|
+
from llm_bridge.type.message import Message, ContentType, Role
|
|
7
7
|
from llm_bridge.type.model_message.openai_responses_message import OpenAIResponsesMessage
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
async def convert_message_to_openai_responses(message: Message) -> OpenAIResponsesMessage:
|
|
11
|
-
role = message.role
|
|
11
|
+
role = message.role
|
|
12
12
|
content: list[ResponseInputContentParam | ResponseOutputTextParam] = []
|
|
13
|
+
contains_pdf = False
|
|
13
14
|
|
|
14
15
|
for content_item in message.contents:
|
|
15
16
|
if content_item.type == ContentType.Text:
|
|
16
|
-
if role ==
|
|
17
|
-
text_content = ResponseOutputTextParam(type="output_text", text=content_item.data)
|
|
17
|
+
if role == Role.Assistant:
|
|
18
|
+
text_content = ResponseOutputTextParam(type="output_text", text=content_item.data, annotations=[])
|
|
18
19
|
else:
|
|
19
20
|
text_content = ResponseInputTextParam(type="input_text", text=content_item.data)
|
|
20
21
|
content.append(text_content)
|
|
@@ -30,6 +31,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
|
|
|
30
31
|
)
|
|
31
32
|
content.append(image_content)
|
|
32
33
|
elif sub_type == "pdf":
|
|
34
|
+
contains_pdf = True
|
|
33
35
|
file_data, _ = await media_processor.get_base64_content_from_url(file_url)
|
|
34
36
|
pdf_content = ResponseInputFileParam(
|
|
35
37
|
type="input_file",
|
|
@@ -52,7 +54,11 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
|
|
|
52
54
|
)
|
|
53
55
|
content.append(text_content)
|
|
54
56
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
+
# Force system role to user if the message contains a PDF
|
|
58
|
+
if role == Role.System and contains_pdf:
|
|
59
|
+
role = Role.User
|
|
60
|
+
|
|
61
|
+
if role in (Role.User, Role.System):
|
|
62
|
+
return EasyInputMessageParam(role=role.value, content=content)
|
|
57
63
|
else:
|
|
58
|
-
return ResponseOutputMessageParam(role=role, content=content)
|
|
64
|
+
return ResponseOutputMessageParam(role=role.value, content=content)
|
|
@@ -15,11 +15,11 @@ def is_file_type_supported(file_name: str) -> bool:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
async def get_file_type(file_url: str) -> tuple[str, str]:
|
|
18
|
-
file_name = get_filename_without_timestamp(file_url)
|
|
18
|
+
file_name: str = get_filename_without_timestamp(file_url)
|
|
19
19
|
|
|
20
20
|
# Treat filenames without an extension as their own extension
|
|
21
|
-
suffix = PurePosixPath(file_name).suffix.lower()
|
|
22
|
-
file_extension = suffix if suffix else '.' + file_name.lower()
|
|
21
|
+
suffix: str = PurePosixPath(file_name).suffix.lower()
|
|
22
|
+
file_extension: str = suffix if suffix else '.' + file_name.lower()
|
|
23
23
|
|
|
24
24
|
if file_extension in code_file_extensions:
|
|
25
25
|
return 'text', 'code'
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.15.
|
|
3
|
+
Version: 1.15.8
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -24,7 +24,7 @@ Description-Content-Type: text/markdown
|
|
|
24
24
|
|
|
25
25
|
# LLM Bridge
|
|
26
26
|
|
|
27
|
-
LLM Bridge is a unified
|
|
27
|
+
LLM Bridge is a unified API wrapper for native interactions with various LLM providers.
|
|
28
28
|
|
|
29
29
|
GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
|
|
30
30
|
|
|
@@ -81,12 +81,6 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
81
81
|
|
|
82
82
|
Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
|
|
83
83
|
|
|
84
|
-
### Test
|
|
85
|
-
|
|
86
|
-
```bash
|
|
87
|
-
uv run pytest
|
|
88
|
-
```
|
|
89
|
-
|
|
90
84
|
### Build
|
|
91
85
|
|
|
92
86
|
```bash
|
|
@@ -40,11 +40,11 @@ llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQp
|
|
|
40
40
|
llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=YiPqMkybCXrsAJOFcUfPOHXdMkn3mZxq7gft_W449dA,2439
|
|
41
41
|
llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=m6IeeQ_-yKcyBwLcEO_1HOoQAXDR5nl0mz_DNSsjieo,1529
|
|
42
42
|
llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py,sha256=lmc-lUVZ_LgHcJZVB-l989TgrB4FtbCyGlRDp4eXycE,2179
|
|
43
|
-
llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py,sha256=
|
|
43
|
+
llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py,sha256=YscYmB9B5JRQFgrw3Mxk5Y9BkGnJtMt3tHjQzCHib3A,3242
|
|
44
44
|
llm_bridge/logic/message_preprocess/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
45
|
llm_bridge/logic/message_preprocess/code_file_extensions.py,sha256=5bsnSKC9PGbl6ZMy80sXfagAbz77pGjt6Z2-qwzUw48,9306
|
|
46
46
|
llm_bridge/logic/message_preprocess/document_processor.py,sha256=IsVqoFgWNa9i8cRsDAfmCynJMdlvBqiCKIT9kbx96kg,2861
|
|
47
|
-
llm_bridge/logic/message_preprocess/file_type_checker.py,sha256=
|
|
47
|
+
llm_bridge/logic/message_preprocess/file_type_checker.py,sha256=k4q9KGdaOV_TFDmZLeiBYBhRD8QeY4bqtaXZGbk_bzM,1733
|
|
48
48
|
llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=ECKHF6BHsucs7QZ8b4WgPDX9vKseziHSK7jaIBB9NgM,1979
|
|
49
49
|
llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
50
50
|
llm_bridge/resources/model_prices.json,sha256=dgjdMXcm0JxTZgcfVsJ8dbYnwtVmMBu84aUkp4sDyGw,2674
|
|
@@ -57,7 +57,7 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
|
|
|
57
57
|
llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
|
|
58
58
|
llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
|
|
59
59
|
llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
|
|
60
|
-
llm_bridge-1.15.
|
|
61
|
-
llm_bridge-1.15.
|
|
62
|
-
llm_bridge-1.15.
|
|
63
|
-
llm_bridge-1.15.
|
|
60
|
+
llm_bridge-1.15.8.dist-info/METADATA,sha256=UGiRIxJ13auRHJJjEzu0zFCP4w44wx-u41iiEyjRAC8,3309
|
|
61
|
+
llm_bridge-1.15.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
62
|
+
llm_bridge-1.15.8.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
|
|
63
|
+
llm_bridge-1.15.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|