LLM-Bridge 1.15.3__tar.gz → 1.15.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/.github/workflows/python-publish.yml +2 -0
- llm_bridge-1.15.6/.github/workflows/python-test.yaml +31 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/.gitignore +71 -15
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/PKG-INFO +1 -1
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +2 -2
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/file_type_checker.py +8 -5
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/message_preprocessor.py +3 -3
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/model_prices.py +9 -5
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/pyproject.toml +1 -1
- llm_bridge-1.15.6/tests/file_type_checker_test.py +17 -0
- llm_bridge-1.15.6/tests/model_prices_test.py +10 -0
- llm_bridge-1.15.6/usage/config.py +129 -0
- llm_bridge-1.15.6/usage/main.py +100 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/uv.lock +1 -1
- llm_bridge-1.15.3/tests/chat_client_factory_test.py +0 -20
- llm_bridge-1.15.3/tests/message_preprocessor_test.py +0 -26
- llm_bridge-1.15.3/usage/main.py +0 -226
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/.gitattributes +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/LICENSE +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/MANIFEST.in +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/README.md +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/chat_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/openai_responses_response_handler.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/printing_status.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/model_client/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/model_client/claude_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/model_client/gemini_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/model_client/openai_client.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/file_fetch.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/resources/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/resources/model_prices.json +11 -11
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/chat_response.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/message.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/model_message/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/model_message/claude_message.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/model_message/gemini_message.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/model_message/openai_message.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/serializer.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/tests/__init__.py +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/usage/.env.example +0 -0
- {llm_bridge-1.15.3 → llm_bridge-1.15.6}/usage/workflow.py +0 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# https://docs.astral.sh/uv/guides/integration/github/
|
|
2
|
+
|
|
3
|
+
name: Python test
|
|
4
|
+
|
|
5
|
+
permissions:
|
|
6
|
+
contents: read
|
|
7
|
+
|
|
8
|
+
on:
|
|
9
|
+
push:
|
|
10
|
+
branches: [ "main" ]
|
|
11
|
+
pull_request:
|
|
12
|
+
branches: [ "main" ]
|
|
13
|
+
|
|
14
|
+
jobs:
|
|
15
|
+
uv:
|
|
16
|
+
name: python
|
|
17
|
+
runs-on: ubuntu-latest
|
|
18
|
+
|
|
19
|
+
steps:
|
|
20
|
+
- uses: actions/checkout@v5
|
|
21
|
+
|
|
22
|
+
- name: Enable caching
|
|
23
|
+
uses: astral-sh/setup-uv@v7
|
|
24
|
+
with:
|
|
25
|
+
enable-cache: true
|
|
26
|
+
|
|
27
|
+
- name: Install the project
|
|
28
|
+
run: uv sync --locked --all-extras --dev
|
|
29
|
+
|
|
30
|
+
- name: Run tests
|
|
31
|
+
run: uv run pytest tests
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Byte-compiled / optimized / DLL files
|
|
2
2
|
__pycache__/
|
|
3
|
-
*.py[
|
|
3
|
+
*.py[codz]
|
|
4
4
|
*$py.class
|
|
5
5
|
|
|
6
6
|
# C extensions
|
|
@@ -27,8 +27,8 @@ share/python-wheels/
|
|
|
27
27
|
MANIFEST
|
|
28
28
|
|
|
29
29
|
# PyInstaller
|
|
30
|
-
#
|
|
31
|
-
#
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
32
|
*.manifest
|
|
33
33
|
*.spec
|
|
34
34
|
|
|
@@ -46,7 +46,7 @@ htmlcov/
|
|
|
46
46
|
nosetests.xml
|
|
47
47
|
coverage.xml
|
|
48
48
|
*.cover
|
|
49
|
-
*.py
|
|
49
|
+
*.py.cover
|
|
50
50
|
.hypothesis/
|
|
51
51
|
.pytest_cache/
|
|
52
52
|
cover/
|
|
@@ -92,22 +92,37 @@ ipython_config.py
|
|
|
92
92
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
93
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
94
|
# install all needed dependencies.
|
|
95
|
-
#Pipfile.lock
|
|
95
|
+
# Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
# uv.lock
|
|
96
102
|
|
|
97
103
|
# poetry
|
|
98
104
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
99
105
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
106
|
# commonly ignored for libraries.
|
|
101
107
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
102
|
-
#poetry.lock
|
|
108
|
+
# poetry.lock
|
|
109
|
+
# poetry.toml
|
|
103
110
|
|
|
104
111
|
# pdm
|
|
105
112
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
106
|
-
#pdm.
|
|
107
|
-
# pdm
|
|
108
|
-
#
|
|
109
|
-
#
|
|
110
|
-
.pdm
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
# pdm.lock
|
|
116
|
+
# pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
# pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
111
126
|
|
|
112
127
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
113
128
|
__pypackages__/
|
|
@@ -116,11 +131,25 @@ __pypackages__/
|
|
|
116
131
|
celerybeat-schedule
|
|
117
132
|
celerybeat.pid
|
|
118
133
|
|
|
134
|
+
# Redis
|
|
135
|
+
*.rdb
|
|
136
|
+
*.aof
|
|
137
|
+
*.pid
|
|
138
|
+
|
|
139
|
+
# RabbitMQ
|
|
140
|
+
mnesia/
|
|
141
|
+
rabbitmq/
|
|
142
|
+
rabbitmq-data/
|
|
143
|
+
|
|
144
|
+
# ActiveMQ
|
|
145
|
+
activemq-data/
|
|
146
|
+
|
|
119
147
|
# SageMath parsed files
|
|
120
148
|
*.sage.py
|
|
121
149
|
|
|
122
150
|
# Environments
|
|
123
151
|
.env
|
|
152
|
+
.envrc
|
|
124
153
|
.venv
|
|
125
154
|
env/
|
|
126
155
|
venv/
|
|
@@ -153,8 +182,35 @@ dmypy.json
|
|
|
153
182
|
cython_debug/
|
|
154
183
|
|
|
155
184
|
# PyCharm
|
|
156
|
-
#
|
|
157
|
-
#
|
|
158
|
-
#
|
|
159
|
-
#
|
|
185
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
186
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
187
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
188
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
160
189
|
.idea/
|
|
190
|
+
|
|
191
|
+
# Abstra
|
|
192
|
+
# Abstra is an AI-powered process automation framework.
|
|
193
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
194
|
+
# Learn more at https://abstra.io/docs
|
|
195
|
+
.abstra/
|
|
196
|
+
|
|
197
|
+
# Visual Studio Code
|
|
198
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
199
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
200
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
201
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
202
|
+
# .vscode/
|
|
203
|
+
|
|
204
|
+
# Ruff stuff:
|
|
205
|
+
.ruff_cache/
|
|
206
|
+
|
|
207
|
+
# PyPI configuration file
|
|
208
|
+
.pypirc
|
|
209
|
+
|
|
210
|
+
# Marimo
|
|
211
|
+
marimo/_static/
|
|
212
|
+
marimo/_lsp/
|
|
213
|
+
__marimo__/
|
|
214
|
+
|
|
215
|
+
# Streamlit
|
|
216
|
+
.streamlit/secrets.toml
|
|
@@ -2,7 +2,7 @@ from openai.types.responses import ResponseInputTextParam, ResponseInputImagePar
|
|
|
2
2
|
ResponseInputContentParam, EasyInputMessageParam, ResponseOutputMessageParam, ResponseInputFileParam
|
|
3
3
|
|
|
4
4
|
from llm_bridge.logic.chat_generate import media_processor
|
|
5
|
-
from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type,
|
|
5
|
+
from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_filename_without_timestamp
|
|
6
6
|
from llm_bridge.type.message import Message, ContentType
|
|
7
7
|
from llm_bridge.type.model_message.openai_responses_message import OpenAIResponsesMessage
|
|
8
8
|
|
|
@@ -33,7 +33,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
|
|
|
33
33
|
file_data, _ = await media_processor.get_base64_content_from_url(file_url)
|
|
34
34
|
pdf_content = ResponseInputFileParam(
|
|
35
35
|
type="input_file",
|
|
36
|
-
filename=
|
|
36
|
+
filename=get_filename_without_timestamp(file_url),
|
|
37
37
|
file_data=f"data:application/pdf;base64,{file_data}",
|
|
38
38
|
)
|
|
39
39
|
content.append(pdf_content)
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/file_type_checker.py
RENAMED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import mimetypes
|
|
2
2
|
import os
|
|
3
3
|
import re
|
|
4
|
+
from pathlib import PurePosixPath
|
|
4
5
|
|
|
5
6
|
from llm_bridge.logic.file_fetch import fetch_file_data
|
|
6
7
|
from llm_bridge.logic.message_preprocess.code_file_extensions import code_file_extensions
|
|
@@ -14,9 +15,12 @@ def is_file_type_supported(file_name: str) -> bool:
|
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
async def get_file_type(file_url: str) -> tuple[str, str]:
|
|
17
|
-
file_name =
|
|
18
|
+
file_name = get_filename_without_timestamp(file_url)
|
|
19
|
+
|
|
20
|
+
# Treat filenames without an extension as their own extension
|
|
21
|
+
suffix = PurePosixPath(file_name).suffix.lower()
|
|
22
|
+
file_extension = suffix if suffix else '.' + file_name.lower()
|
|
18
23
|
|
|
19
|
-
file_extension = '.' + file_name.split('.')[-1].lower() # Treat filenames without an extension as their own extension
|
|
20
24
|
if file_extension in code_file_extensions:
|
|
21
25
|
return 'text', 'code'
|
|
22
26
|
if file_extension == '.pdf':
|
|
@@ -41,9 +45,8 @@ async def get_file_type(file_url: str) -> tuple[str, str]:
|
|
|
41
45
|
return 'unknown', 'unknown'
|
|
42
46
|
|
|
43
47
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
base_name = os.path.basename(file_url)
|
|
48
|
+
def get_filename_without_timestamp(file_url: str) -> str:
|
|
49
|
+
base_name = PurePosixPath(file_url).name
|
|
47
50
|
match = re.search(r'-(.+)', base_name)
|
|
48
51
|
if match:
|
|
49
52
|
return match.group(1)
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/message_preprocessor.py
RENAMED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from llm_bridge.logic.message_preprocess import document_processor
|
|
2
|
-
from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type,
|
|
2
|
+
from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_filename_without_timestamp
|
|
3
3
|
from llm_bridge.type.message import Message, Role, Content, ContentType
|
|
4
4
|
|
|
5
5
|
|
|
@@ -24,12 +24,12 @@ async def extract_text_files_to_message(message: Message, api_type: str) -> None
|
|
|
24
24
|
if sub_type == "pdf" and api_type in ("OpenAI", "OpenAI-Azure", "Gemini-Vertex", "Gemini-Free", "Gemini-Paid", "Claude"):
|
|
25
25
|
continue
|
|
26
26
|
|
|
27
|
-
filename =
|
|
27
|
+
filename = get_filename_without_timestamp(file_url)
|
|
28
28
|
file_text = await document_processor.extract_text_from_file(file_url)
|
|
29
29
|
|
|
30
30
|
message.contents[i] = Content(
|
|
31
31
|
type=ContentType.Text,
|
|
32
|
-
data=f"{filename}
|
|
32
|
+
data=f"<file name=\"{filename}\">\n{file_text}\n</file>"
|
|
33
33
|
)
|
|
34
34
|
|
|
35
35
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import importlib.resources
|
|
2
1
|
import json
|
|
2
|
+
from importlib.resources import files
|
|
3
3
|
from typing import TypedDict
|
|
4
4
|
|
|
5
5
|
from fastapi import HTTPException
|
|
@@ -12,13 +12,17 @@ class ModelPrice(TypedDict):
|
|
|
12
12
|
output: float
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
def load_json_file(package, filename):
|
|
16
|
-
|
|
17
|
-
|
|
15
|
+
def load_json_file(package: str, filename: str):
|
|
16
|
+
content = files(package).joinpath(filename).read_text(encoding="utf-8")
|
|
17
|
+
return json.loads(content)
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def get_model_prices() -> list[ModelPrice]:
|
|
21
|
-
|
|
21
|
+
prices = load_json_file("llm_bridge.resources", "model_prices.json")
|
|
22
|
+
for price in prices:
|
|
23
|
+
price["input"] = float(price["input"])
|
|
24
|
+
price["output"] = float(price["output"])
|
|
25
|
+
return prices
|
|
22
26
|
|
|
23
27
|
|
|
24
28
|
def find_model_prices(api_type: str, model: str) -> ModelPrice | None:
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@pytest.mark.asyncio
|
|
7
|
+
async def test_get_file_type_with_extension():
|
|
8
|
+
file_type, sub_type = await get_file_type("https://example.com/1767243600000-document.pdf")
|
|
9
|
+
assert file_type == "text"
|
|
10
|
+
assert sub_type == "pdf"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@pytest.mark.asyncio
|
|
14
|
+
async def test_get_file_type_without_extension():
|
|
15
|
+
file_type, sub_type = await get_file_type("https://example.com/1767243600000-Dockerfile")
|
|
16
|
+
assert file_type == "text"
|
|
17
|
+
assert sub_type == "code"
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from llm_bridge.logic.model_prices import get_model_prices
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_get_model_prices_types():
|
|
5
|
+
result = get_model_prices()
|
|
6
|
+
for model_price in result:
|
|
7
|
+
assert isinstance(model_price["apiType"], str)
|
|
8
|
+
assert isinstance(model_price["model"], str)
|
|
9
|
+
assert isinstance(model_price["input"], float)
|
|
10
|
+
assert isinstance(model_price["output"], float)
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from llm_bridge import *
|
|
2
|
+
|
|
3
|
+
structured_output_schema = {
|
|
4
|
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
|
5
|
+
"$id": "https://example.com/product.schema.json",
|
|
6
|
+
"title": "Product",
|
|
7
|
+
"description": "A product from Acme's catalog",
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"productId": {
|
|
11
|
+
"description": "The unique identifier for a product",
|
|
12
|
+
"type": "integer"
|
|
13
|
+
},
|
|
14
|
+
"productName": {
|
|
15
|
+
"description": "Name of the product",
|
|
16
|
+
"type": "string"
|
|
17
|
+
},
|
|
18
|
+
"price": {
|
|
19
|
+
"description": "The price of the product",
|
|
20
|
+
"type": "number",
|
|
21
|
+
"exclusiveMinimum": 0
|
|
22
|
+
},
|
|
23
|
+
"tags": {
|
|
24
|
+
"description": "Tags for the product",
|
|
25
|
+
"type": "array",
|
|
26
|
+
"items": {
|
|
27
|
+
"type": "string"
|
|
28
|
+
},
|
|
29
|
+
"minItems": 1,
|
|
30
|
+
"uniqueItems": True
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"required": [
|
|
34
|
+
"productId",
|
|
35
|
+
"productName",
|
|
36
|
+
"price"
|
|
37
|
+
]
|
|
38
|
+
}
|
|
39
|
+
structured_output_schema = None
|
|
40
|
+
|
|
41
|
+
messages = [
|
|
42
|
+
Message(
|
|
43
|
+
role=Role.System,
|
|
44
|
+
contents=[
|
|
45
|
+
Content(type=ContentType.Text, data="You are a helpful assistant.")
|
|
46
|
+
]
|
|
47
|
+
),
|
|
48
|
+
Message(
|
|
49
|
+
role=Role.User,
|
|
50
|
+
contents=[
|
|
51
|
+
Content(type=ContentType.Text, data="Hello")
|
|
52
|
+
]
|
|
53
|
+
),
|
|
54
|
+
Message(
|
|
55
|
+
role=Role.Assistant,
|
|
56
|
+
contents=[
|
|
57
|
+
Content(type=ContentType.Text, data="Hello! How can I assist you today?")
|
|
58
|
+
]
|
|
59
|
+
),
|
|
60
|
+
# Message(
|
|
61
|
+
# role=Role.User,
|
|
62
|
+
# contents=[
|
|
63
|
+
# # Thinking
|
|
64
|
+
# # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
|
|
65
|
+
#
|
|
66
|
+
# # Web Search
|
|
67
|
+
# # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
|
|
68
|
+
#
|
|
69
|
+
# # Image Understanding
|
|
70
|
+
# # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
|
|
71
|
+
# # Content(type=ContentType.Text, data="What is in this image?"),
|
|
72
|
+
#
|
|
73
|
+
# # Image Generation
|
|
74
|
+
# # Content(type=ContentType.Text, data="Please generate an image of a cat."),
|
|
75
|
+
#
|
|
76
|
+
# # URL Context
|
|
77
|
+
# # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
|
|
78
|
+
#
|
|
79
|
+
# # Code Execution
|
|
80
|
+
# # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
|
|
81
|
+
#
|
|
82
|
+
# # File Output
|
|
83
|
+
# # Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
|
|
84
|
+
#
|
|
85
|
+
# # Structured Output
|
|
86
|
+
# # Content(type=ContentType.Text, data="Please generate a product."),
|
|
87
|
+
# ]
|
|
88
|
+
# ),
|
|
89
|
+
Message(
|
|
90
|
+
role=Role.User,
|
|
91
|
+
contents=[
|
|
92
|
+
Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1769429581512-Test.txt"),
|
|
93
|
+
# Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
|
|
94
|
+
# Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
|
|
95
|
+
# Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
|
|
96
|
+
# Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
|
|
97
|
+
# Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1753804900037-Calculus.docx"),
|
|
98
|
+
Content(type=ContentType.Text, data="What's this?"),
|
|
99
|
+
]
|
|
100
|
+
),
|
|
101
|
+
]
|
|
102
|
+
# See /llm_bridge/resources/model_prices.json for available models
|
|
103
|
+
# model = "gpt-5.2"
|
|
104
|
+
# model = "gpt-5.1"
|
|
105
|
+
# model = "gpt-5-pro"
|
|
106
|
+
# model = "gpt-5"
|
|
107
|
+
# model = "gpt-4.1"
|
|
108
|
+
# model = "gpt-5-codex"
|
|
109
|
+
# model = "gemini-3-pro-preview"
|
|
110
|
+
# model = "gemini-3-pro-image-preview"
|
|
111
|
+
model = "gemini-3-flash-preview"
|
|
112
|
+
# model = "grok-4-1-fast-reasoning"
|
|
113
|
+
# model = "claude-sonnet-4-5"
|
|
114
|
+
# model = "claude-opus-4-5"
|
|
115
|
+
api_type = "Gemini-Vertex"
|
|
116
|
+
# api_type = "Gemini-Free"
|
|
117
|
+
# api_type = "Gemini-Paid"
|
|
118
|
+
# api_type = "OpenAI"
|
|
119
|
+
# api_type = "OpenAI-Azure"
|
|
120
|
+
# api_type = "OpenAI-GitHub"
|
|
121
|
+
# api_type = "Claude"
|
|
122
|
+
# api_type = "Grok"
|
|
123
|
+
temperature = 0
|
|
124
|
+
stream = True
|
|
125
|
+
# stream = False
|
|
126
|
+
thought = True
|
|
127
|
+
# thought = False
|
|
128
|
+
code_execution = True
|
|
129
|
+
# code_execution = False
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from pprint import pprint
|
|
7
|
+
|
|
8
|
+
from dotenv import load_dotenv
|
|
9
|
+
|
|
10
|
+
from usage.config import *
|
|
11
|
+
from usage.workflow import workflow
|
|
12
|
+
|
|
13
|
+
script_dir = Path(__file__).parent.resolve()
|
|
14
|
+
|
|
15
|
+
# Env
|
|
16
|
+
load_dotenv(script_dir / ".env")
|
|
17
|
+
|
|
18
|
+
# Logging Output File
|
|
19
|
+
output_path = script_dir / "output.log"
|
|
20
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
21
|
+
output_file = output_path.open("w", encoding="utf-8")
|
|
22
|
+
sys.stdout = output_file
|
|
23
|
+
|
|
24
|
+
logging.basicConfig(
|
|
25
|
+
level=logging.INFO,
|
|
26
|
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
|
27
|
+
stream=output_file
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
api_keys = {
|
|
31
|
+
"OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
|
|
32
|
+
"AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
|
|
33
|
+
"AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
|
|
34
|
+
"GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
|
|
35
|
+
"GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
|
|
36
|
+
"GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
|
|
37
|
+
"GEMINI_VERTEX_API_KEY": os.environ.get("GEMINI_VERTEX_API_KEY"),
|
|
38
|
+
"ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
|
|
39
|
+
"XAI_API_KEY": os.environ.get("XAI_API_KEY"),
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
async def main():
|
|
44
|
+
input_tokens = 0
|
|
45
|
+
output_tokens = 0
|
|
46
|
+
response = await workflow(
|
|
47
|
+
api_keys,
|
|
48
|
+
messages,
|
|
49
|
+
model,
|
|
50
|
+
api_type,
|
|
51
|
+
temperature,
|
|
52
|
+
stream,
|
|
53
|
+
thought,
|
|
54
|
+
code_execution,
|
|
55
|
+
structured_output_schema,
|
|
56
|
+
)
|
|
57
|
+
text = ""
|
|
58
|
+
thought_text = ""
|
|
59
|
+
code_text = ""
|
|
60
|
+
code_output_text = ""
|
|
61
|
+
files = []
|
|
62
|
+
|
|
63
|
+
if stream:
|
|
64
|
+
async for chunk in response:
|
|
65
|
+
pprint(chunk)
|
|
66
|
+
if chunk.text:
|
|
67
|
+
text += chunk.text
|
|
68
|
+
if chunk.thought:
|
|
69
|
+
thought_text += chunk.thought
|
|
70
|
+
if chunk.input_tokens:
|
|
71
|
+
input_tokens = chunk.input_tokens
|
|
72
|
+
if chunk.output_tokens:
|
|
73
|
+
output_tokens += chunk.output_tokens
|
|
74
|
+
if chunk.code:
|
|
75
|
+
code_text += chunk.code
|
|
76
|
+
if chunk.code_output:
|
|
77
|
+
code_output_text += chunk.code_output
|
|
78
|
+
if chunk.files:
|
|
79
|
+
files.extend(chunk.files)
|
|
80
|
+
else:
|
|
81
|
+
pprint(response)
|
|
82
|
+
text = response.text
|
|
83
|
+
thought_text = response.thought
|
|
84
|
+
code_text = response.code
|
|
85
|
+
code_output_text = response.code_output
|
|
86
|
+
input_tokens = response.input_tokens
|
|
87
|
+
output_tokens = response.output_tokens
|
|
88
|
+
files = response.files
|
|
89
|
+
total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
|
|
90
|
+
print(f"Thought:\n{thought_text}\n")
|
|
91
|
+
print(f"Code:\n{code_text}\n")
|
|
92
|
+
print(f"Code Output:\n{code_output_text}\n")
|
|
93
|
+
print(f"Text:\n{text}\n")
|
|
94
|
+
print(f"Files:\n{files}\n")
|
|
95
|
+
print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
if __name__ == "__main__":
|
|
99
|
+
asyncio.run(main())
|
|
100
|
+
output_file.close()
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import pytest
|
|
2
|
-
|
|
3
|
-
from llm_bridge.type.message import Message, Role, Content, ContentType
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
@pytest.fixture
|
|
7
|
-
def sample_messages():
|
|
8
|
-
return [
|
|
9
|
-
Message(role=Role.System, contents=[
|
|
10
|
-
Content(type=ContentType.Text, data="You are a helpful assistant.")
|
|
11
|
-
]),
|
|
12
|
-
Message(role=Role.User, contents=[
|
|
13
|
-
Content(type=ContentType.Text, data="Hello")
|
|
14
|
-
])
|
|
15
|
-
]
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
@pytest.mark.asyncio
|
|
19
|
-
async def test_placeholder():
|
|
20
|
-
assert True
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
import pytest
|
|
2
|
-
|
|
3
|
-
from llm_bridge.logic.message_preprocess.message_preprocessor import extract_system_messages
|
|
4
|
-
from llm_bridge.type.message import Message, Role, Content, ContentType
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
@pytest.fixture
|
|
8
|
-
def sample_messages():
|
|
9
|
-
return [
|
|
10
|
-
Message(role=Role.System, contents=[
|
|
11
|
-
Content(type=ContentType.Text, data="You are a helpful assistant.")
|
|
12
|
-
]),
|
|
13
|
-
Message(role=Role.User, contents=[
|
|
14
|
-
Content(type=ContentType.Text, data="Hello")
|
|
15
|
-
])
|
|
16
|
-
]
|
|
17
|
-
|
|
18
|
-
def test_extract_system_messages(sample_messages):
|
|
19
|
-
extracted_text = extract_system_messages(sample_messages)
|
|
20
|
-
|
|
21
|
-
assert extracted_text == "You are a helpful assistant.\n"
|
|
22
|
-
|
|
23
|
-
assert len(sample_messages) == 1
|
|
24
|
-
assert sample_messages[0].role == Role.User
|
|
25
|
-
assert sample_messages[0].contents[0].type == ContentType.Text
|
|
26
|
-
assert sample_messages[0].contents[0].data == "Hello"
|
llm_bridge-1.15.3/usage/main.py
DELETED
|
@@ -1,226 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import logging
|
|
3
|
-
import os
|
|
4
|
-
import sys
|
|
5
|
-
from pprint import pprint
|
|
6
|
-
|
|
7
|
-
from dotenv import load_dotenv
|
|
8
|
-
|
|
9
|
-
from llm_bridge import *
|
|
10
|
-
from usage.workflow import workflow
|
|
11
|
-
|
|
12
|
-
output_file = open("./usage/output.log", "w", encoding="utf-8")
|
|
13
|
-
sys.stdout = output_file
|
|
14
|
-
|
|
15
|
-
logging.basicConfig(
|
|
16
|
-
level=logging.INFO,
|
|
17
|
-
format='%(asctime)s - %(levelname)s - %(message)s',
|
|
18
|
-
stream=output_file
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
22
|
-
env_path = os.path.join(script_dir, ".env")
|
|
23
|
-
load_dotenv(env_path)
|
|
24
|
-
|
|
25
|
-
api_keys = {
|
|
26
|
-
"OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
|
|
27
|
-
"AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
|
|
28
|
-
"AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
|
|
29
|
-
"GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
|
|
30
|
-
"GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
|
|
31
|
-
"GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
|
|
32
|
-
"GEMINI_VERTEX_API_KEY": os.environ.get("GEMINI_VERTEX_API_KEY"),
|
|
33
|
-
"ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
|
|
34
|
-
"XAI_API_KEY": os.environ.get("XAI_API_KEY"),
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
structured_output_schema = {
|
|
38
|
-
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
|
39
|
-
"$id": "https://example.com/product.schema.json",
|
|
40
|
-
"title": "Product",
|
|
41
|
-
"description": "A product from Acme's catalog",
|
|
42
|
-
"type": "object",
|
|
43
|
-
"properties": {
|
|
44
|
-
"productId": {
|
|
45
|
-
"description": "The unique identifier for a product",
|
|
46
|
-
"type": "integer"
|
|
47
|
-
},
|
|
48
|
-
"productName": {
|
|
49
|
-
"description": "Name of the product",
|
|
50
|
-
"type": "string"
|
|
51
|
-
},
|
|
52
|
-
"price": {
|
|
53
|
-
"description": "The price of the product",
|
|
54
|
-
"type": "number",
|
|
55
|
-
"exclusiveMinimum": 0
|
|
56
|
-
},
|
|
57
|
-
"tags": {
|
|
58
|
-
"description": "Tags for the product",
|
|
59
|
-
"type": "array",
|
|
60
|
-
"items": {
|
|
61
|
-
"type": "string"
|
|
62
|
-
},
|
|
63
|
-
"minItems": 1,
|
|
64
|
-
"uniqueItems": True
|
|
65
|
-
}
|
|
66
|
-
},
|
|
67
|
-
"required": [
|
|
68
|
-
"productId",
|
|
69
|
-
"productName",
|
|
70
|
-
"price"
|
|
71
|
-
]
|
|
72
|
-
}
|
|
73
|
-
structured_output_schema = None
|
|
74
|
-
|
|
75
|
-
messages = [
|
|
76
|
-
Message(
|
|
77
|
-
role=Role.System,
|
|
78
|
-
contents=[
|
|
79
|
-
Content(type=ContentType.Text, data="You are a helpful assistant.")
|
|
80
|
-
]
|
|
81
|
-
),
|
|
82
|
-
Message(
|
|
83
|
-
role=Role.User,
|
|
84
|
-
contents=[
|
|
85
|
-
Content(type=ContentType.Text, data="Hello")
|
|
86
|
-
]
|
|
87
|
-
),
|
|
88
|
-
Message(
|
|
89
|
-
role=Role.Assistant,
|
|
90
|
-
contents=[
|
|
91
|
-
Content(type=ContentType.Text, data="Hello! How can I assist you today?")
|
|
92
|
-
]
|
|
93
|
-
),
|
|
94
|
-
Message(
|
|
95
|
-
role=Role.User,
|
|
96
|
-
contents=[
|
|
97
|
-
# Thinking
|
|
98
|
-
# Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
|
|
99
|
-
|
|
100
|
-
# Web Search
|
|
101
|
-
# Content(type=ContentType.Text, data="What's the weather in NYC today?"),
|
|
102
|
-
|
|
103
|
-
# Image Understanding
|
|
104
|
-
# Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
|
|
105
|
-
# Content(type=ContentType.Text, data="What is in this image?"),
|
|
106
|
-
|
|
107
|
-
# Image Generation
|
|
108
|
-
# Content(type=ContentType.Text, data="Please generate an image of a cat."),
|
|
109
|
-
|
|
110
|
-
# URL Context
|
|
111
|
-
# Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
|
|
112
|
-
|
|
113
|
-
# Code Execution
|
|
114
|
-
# Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
|
|
115
|
-
|
|
116
|
-
# File Output
|
|
117
|
-
Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
|
|
118
|
-
|
|
119
|
-
# Structured Output
|
|
120
|
-
# Content(type=ContentType.Text, data="Please generate a product."),
|
|
121
|
-
]
|
|
122
|
-
),
|
|
123
|
-
# Message(
|
|
124
|
-
# role=Role.User,
|
|
125
|
-
# contents=[
|
|
126
|
-
# # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
|
|
127
|
-
# Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
|
|
128
|
-
# # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
|
|
129
|
-
# # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
|
|
130
|
-
# # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1753804900037-Calculus.docx"),
|
|
131
|
-
# Content(type=ContentType.Text, data="What's this?"),
|
|
132
|
-
# ]
|
|
133
|
-
# ),
|
|
134
|
-
]
|
|
135
|
-
# See /llm_bridge/resources/model_prices.json for available models
|
|
136
|
-
model = "gpt-5.2"
|
|
137
|
-
# model = "gpt-5.1"
|
|
138
|
-
# model = "gpt-5-pro"
|
|
139
|
-
# model = "gpt-5"
|
|
140
|
-
# model = "gpt-4.1"
|
|
141
|
-
# model = "gpt-5-codex"
|
|
142
|
-
# model = "gemini-3-pro-preview"
|
|
143
|
-
# model = "gemini-3-pro-image-preview"
|
|
144
|
-
# model = "gemini-3-flash-preview"
|
|
145
|
-
# model = "grok-4-1-fast-reasoning"
|
|
146
|
-
# model = "claude-sonnet-4-5"
|
|
147
|
-
# model = "claude-opus-4-5"
|
|
148
|
-
# api_type = "Gemini-Vertex"
|
|
149
|
-
# api_type = "Gemini-Free"
|
|
150
|
-
# api_type = "Gemini-Paid"
|
|
151
|
-
api_type = "OpenAI"
|
|
152
|
-
# api_type = "OpenAI-Azure"
|
|
153
|
-
# api_type = "OpenAI-GitHub"
|
|
154
|
-
# api_type = "Claude"
|
|
155
|
-
# api_type = "Grok"
|
|
156
|
-
temperature = 0
|
|
157
|
-
stream = True
|
|
158
|
-
# stream = False
|
|
159
|
-
thought = True
|
|
160
|
-
# thought = False
|
|
161
|
-
code_execution = True
|
|
162
|
-
# code_execution = False
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
async def main():
|
|
166
|
-
model_prices = get_model_prices()
|
|
167
|
-
pprint(model_prices)
|
|
168
|
-
print(structured_output_schema)
|
|
169
|
-
|
|
170
|
-
input_tokens = 0
|
|
171
|
-
output_tokens = 0
|
|
172
|
-
response = await workflow(
|
|
173
|
-
api_keys,
|
|
174
|
-
messages,
|
|
175
|
-
model,
|
|
176
|
-
api_type,
|
|
177
|
-
temperature,
|
|
178
|
-
stream,
|
|
179
|
-
thought,
|
|
180
|
-
code_execution,
|
|
181
|
-
structured_output_schema,
|
|
182
|
-
)
|
|
183
|
-
text = ""
|
|
184
|
-
thought_text = ""
|
|
185
|
-
code_text = ""
|
|
186
|
-
code_output_text = ""
|
|
187
|
-
files = []
|
|
188
|
-
|
|
189
|
-
if stream:
|
|
190
|
-
async for chunk in response:
|
|
191
|
-
pprint(chunk)
|
|
192
|
-
if chunk.text:
|
|
193
|
-
text += chunk.text
|
|
194
|
-
if chunk.thought:
|
|
195
|
-
thought_text += chunk.thought
|
|
196
|
-
if chunk.input_tokens:
|
|
197
|
-
input_tokens = chunk.input_tokens
|
|
198
|
-
if chunk.output_tokens:
|
|
199
|
-
output_tokens += chunk.output_tokens
|
|
200
|
-
if chunk.code:
|
|
201
|
-
code_text += chunk.code
|
|
202
|
-
if chunk.code_output:
|
|
203
|
-
code_output_text += chunk.code_output
|
|
204
|
-
if chunk.files:
|
|
205
|
-
files.extend(chunk.files)
|
|
206
|
-
else:
|
|
207
|
-
pprint(response)
|
|
208
|
-
text = response.text
|
|
209
|
-
thought_text = response.thought
|
|
210
|
-
code_text = response.code
|
|
211
|
-
code_output_text = response.code_output
|
|
212
|
-
input_tokens = response.input_tokens
|
|
213
|
-
output_tokens = response.output_tokens
|
|
214
|
-
files = response.files
|
|
215
|
-
total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
|
|
216
|
-
print(f"Thought:\n{thought_text}\n")
|
|
217
|
-
print(f"Code:\n{code_text}\n")
|
|
218
|
-
print(f"Code Output:\n{code_output_text}\n")
|
|
219
|
-
print(f"Text:\n{text}\n")
|
|
220
|
-
print(f"Files:\n{files}\n")
|
|
221
|
-
print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
if __name__ == "__main__":
|
|
225
|
-
asyncio.run(main())
|
|
226
|
-
output_file.close()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/client/implementations/printing_status.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/chat_client_factory.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/chat_message_converter.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/code_file_extensions.py
RENAMED
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/document_processor.py
RENAMED
|
File without changes
|
|
File without changes
|
|
@@ -1,46 +1,46 @@
|
|
|
1
1
|
[
|
|
2
2
|
{
|
|
3
|
-
"apiType": "Gemini-
|
|
3
|
+
"apiType": "Gemini-Vertex",
|
|
4
4
|
"model": "gemini-3-pro-preview",
|
|
5
5
|
"input": 4,
|
|
6
6
|
"output": 18
|
|
7
7
|
},
|
|
8
8
|
{
|
|
9
|
-
"apiType": "Gemini-
|
|
9
|
+
"apiType": "Gemini-Vertex",
|
|
10
10
|
"model": "gemini-3-flash-preview",
|
|
11
11
|
"input": 1,
|
|
12
12
|
"output": 3
|
|
13
13
|
},
|
|
14
14
|
{
|
|
15
|
-
"apiType": "Gemini-
|
|
15
|
+
"apiType": "Gemini-Vertex",
|
|
16
16
|
"model": "gemini-3-pro-image-preview",
|
|
17
17
|
"input": 2,
|
|
18
18
|
"output": 120
|
|
19
19
|
},
|
|
20
20
|
{
|
|
21
21
|
"apiType": "Gemini-Paid",
|
|
22
|
-
"model": "gemini-flash-latest",
|
|
23
|
-
"input": 1,
|
|
24
|
-
"output": 2.5
|
|
25
|
-
},
|
|
26
|
-
{
|
|
27
|
-
"apiType": "Gemini-Vertex",
|
|
28
22
|
"model": "gemini-3-pro-preview",
|
|
29
23
|
"input": 4,
|
|
30
24
|
"output": 18
|
|
31
25
|
},
|
|
32
26
|
{
|
|
33
|
-
"apiType": "Gemini-
|
|
27
|
+
"apiType": "Gemini-Paid",
|
|
34
28
|
"model": "gemini-3-flash-preview",
|
|
35
29
|
"input": 1,
|
|
36
30
|
"output": 3
|
|
37
31
|
},
|
|
38
32
|
{
|
|
39
|
-
"apiType": "Gemini-
|
|
33
|
+
"apiType": "Gemini-Paid",
|
|
40
34
|
"model": "gemini-3-pro-image-preview",
|
|
41
35
|
"input": 2,
|
|
42
36
|
"output": 120
|
|
43
37
|
},
|
|
38
|
+
{
|
|
39
|
+
"apiType": "Gemini-Paid",
|
|
40
|
+
"model": "gemini-flash-latest",
|
|
41
|
+
"input": 1,
|
|
42
|
+
"output": 2.5
|
|
43
|
+
},
|
|
44
44
|
{
|
|
45
45
|
"apiType": "Gemini-Free",
|
|
46
46
|
"model": "gemini-3-flash-preview",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.15.3 → llm_bridge-1.15.6}/llm_bridge/type/model_message/openai_responses_message.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|