LLM-Bridge 1.13.0__tar.gz → 1.14.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_bridge-1.14.0/.gitattributes +2 -0
- llm_bridge-1.14.0/.github/workflows/python-publish.yml +32 -0
- llm_bridge-1.14.0/.gitignore +160 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/PKG-INFO +38 -26
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/README.md +28 -10
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +3 -1
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +8 -9
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/resources/model_prices.json +12 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/pyproject.toml +22 -27
- llm_bridge-1.14.0/tests/__init__.py +0 -0
- llm_bridge-1.14.0/tests/chat_client_factory_test.py +20 -0
- llm_bridge-1.14.0/tests/message_preprocessor_test.py +26 -0
- llm_bridge-1.14.0/usage/.env.example +9 -0
- llm_bridge-1.14.0/usage/main.py +226 -0
- llm_bridge-1.14.0/usage/workflow.py +34 -0
- llm_bridge-1.14.0/uv.lock +1025 -0
- llm_bridge-1.13.0/LLM_Bridge.egg-info/PKG-INFO +0 -88
- llm_bridge-1.13.0/LLM_Bridge.egg-info/SOURCES.txt +0 -67
- llm_bridge-1.13.0/LLM_Bridge.egg-info/dependency_links.txt +0 -1
- llm_bridge-1.13.0/LLM_Bridge.egg-info/requires.txt +0 -17
- llm_bridge-1.13.0/LLM_Bridge.egg-info/top_level.txt +0 -1
- llm_bridge-1.13.0/setup.cfg +0 -4
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/LICENSE +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/MANIFEST.in +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/chat_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/implementations/printing_status.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/model_client/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/model_client/claude_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/model_client/gemini_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/client/model_client/openai_client.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/file_fetch.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/logic/model_prices.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/resources/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/chat_response.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/message.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/model_message/__init__.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/model_message/claude_message.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/model_message/gemini_message.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/model_message/openai_message.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
- {llm_bridge-1.13.0 → llm_bridge-1.14.0}/llm_bridge/type/serializer.py +0 -0
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
name: "Publish"
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
tags:
|
|
6
|
+
# Publish on any tag starting with a `v`, e.g., v0.1.0
|
|
7
|
+
- v*
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
run:
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
environment:
|
|
13
|
+
name: pypi
|
|
14
|
+
permissions:
|
|
15
|
+
id-token: write
|
|
16
|
+
contents: read
|
|
17
|
+
steps:
|
|
18
|
+
- name: Checkout
|
|
19
|
+
uses: actions/checkout@v5
|
|
20
|
+
- name: Install uv
|
|
21
|
+
uses: astral-sh/setup-uv@v7
|
|
22
|
+
- name: Install Python 3.12
|
|
23
|
+
run: uv python install 3.12
|
|
24
|
+
- name: Build
|
|
25
|
+
run: uv build
|
|
26
|
+
# Check that basic features work and we didn't miss to include crucial files
|
|
27
|
+
# - name: Smoke test (wheel)
|
|
28
|
+
# run: uv run --isolated --no-project --with dist/*.whl tests/smoke_test.py
|
|
29
|
+
# - name: Smoke test (source distribution)
|
|
30
|
+
# run: uv run --isolated --no-project --with dist/*.tar.gz tests/smoke_test.py
|
|
31
|
+
- name: Publish
|
|
32
|
+
run: uv publish
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py,cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# poetry
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
102
|
+
#poetry.lock
|
|
103
|
+
|
|
104
|
+
# pdm
|
|
105
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
106
|
+
#pdm.lock
|
|
107
|
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
|
108
|
+
# in version control.
|
|
109
|
+
# https://pdm.fming.dev/#use-with-ide
|
|
110
|
+
.pdm.toml
|
|
111
|
+
|
|
112
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
113
|
+
__pypackages__/
|
|
114
|
+
|
|
115
|
+
# Celery stuff
|
|
116
|
+
celerybeat-schedule
|
|
117
|
+
celerybeat.pid
|
|
118
|
+
|
|
119
|
+
# SageMath parsed files
|
|
120
|
+
*.sage.py
|
|
121
|
+
|
|
122
|
+
# Environments
|
|
123
|
+
.env
|
|
124
|
+
.venv
|
|
125
|
+
env/
|
|
126
|
+
venv/
|
|
127
|
+
ENV/
|
|
128
|
+
env.bak/
|
|
129
|
+
venv.bak/
|
|
130
|
+
|
|
131
|
+
# Spyder project settings
|
|
132
|
+
.spyderproject
|
|
133
|
+
.spyproject
|
|
134
|
+
|
|
135
|
+
# Rope project settings
|
|
136
|
+
.ropeproject
|
|
137
|
+
|
|
138
|
+
# mkdocs documentation
|
|
139
|
+
/site
|
|
140
|
+
|
|
141
|
+
# mypy
|
|
142
|
+
.mypy_cache/
|
|
143
|
+
.dmypy.json
|
|
144
|
+
dmypy.json
|
|
145
|
+
|
|
146
|
+
# Pyre type checker
|
|
147
|
+
.pyre/
|
|
148
|
+
|
|
149
|
+
# pytype static type analyzer
|
|
150
|
+
.pytype/
|
|
151
|
+
|
|
152
|
+
# Cython debug symbols
|
|
153
|
+
cython_debug/
|
|
154
|
+
|
|
155
|
+
# PyCharm
|
|
156
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
157
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
158
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
159
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
160
|
+
.idea/
|
|
@@ -1,32 +1,26 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: LLM-Bridge
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.14.0
|
|
4
4
|
Summary: A Bridge for LLMs
|
|
5
5
|
Author-email: windsnow1025 <windsnow1025@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
7
|
-
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Keywords: ai,llm
|
|
8
9
|
Classifier: Framework :: FastAPI
|
|
9
10
|
Classifier: Programming Language :: Python :: 3
|
|
10
11
|
Requires-Python: >=3.12
|
|
11
|
-
|
|
12
|
-
|
|
12
|
+
Requires-Dist: anthropic==0.75.0
|
|
13
|
+
Requires-Dist: docxlatex>=1.1.1
|
|
13
14
|
Requires-Dist: fastapi
|
|
15
|
+
Requires-Dist: google-genai==1.46.0
|
|
14
16
|
Requires-Dist: httpx
|
|
15
|
-
Requires-Dist: tenacity
|
|
16
17
|
Requires-Dist: openai==2.9.0
|
|
17
|
-
Requires-Dist: tiktoken==0.11.0
|
|
18
|
-
Requires-Dist: google-genai==1.46.0
|
|
19
|
-
Requires-Dist: anthropic==0.75.0
|
|
20
|
-
Requires-Dist: PyMuPDF
|
|
21
|
-
Requires-Dist: docxlatex>=1.1.1
|
|
22
18
|
Requires-Dist: openpyxl
|
|
19
|
+
Requires-Dist: pymupdf
|
|
23
20
|
Requires-Dist: python-pptx
|
|
24
|
-
|
|
25
|
-
Requires-Dist:
|
|
26
|
-
|
|
27
|
-
Requires-Dist: python-dotenv; extra == "test"
|
|
28
|
-
Requires-Dist: protobuf; extra == "test"
|
|
29
|
-
Dynamic: license-file
|
|
21
|
+
Requires-Dist: tenacity
|
|
22
|
+
Requires-Dist: tiktoken==0.11.0
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
30
24
|
|
|
31
25
|
# LLM Bridge
|
|
32
26
|
|
|
@@ -71,18 +65,36 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
71
65
|
pip install --upgrade llm_bridge
|
|
72
66
|
```
|
|
73
67
|
|
|
74
|
-
##
|
|
68
|
+
## Development
|
|
69
|
+
|
|
70
|
+
### Python uv
|
|
71
|
+
|
|
72
|
+
1. Install uv: `powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"`
|
|
73
|
+
2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
|
|
74
|
+
3. Configure requirements:
|
|
75
|
+
```bash
|
|
76
|
+
uv sync
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Pycharm Professional
|
|
80
|
+
|
|
81
|
+
1. Add New Interpreter >> Add Local Interpreter
|
|
82
|
+
- Environment: Select existing
|
|
83
|
+
- Type: uv
|
|
84
|
+
2. Add New Configuration >> uv run >> script: `./usage/main.py`
|
|
85
|
+
|
|
86
|
+
### Usage
|
|
87
|
+
|
|
88
|
+
Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
|
|
89
|
+
|
|
90
|
+
### Test
|
|
75
91
|
|
|
76
92
|
```bash
|
|
77
|
-
pytest
|
|
93
|
+
uv run pytest
|
|
78
94
|
```
|
|
79
95
|
|
|
80
|
-
|
|
96
|
+
### Build
|
|
81
97
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
2. Install requirements: `pip install -r requirements.txt`
|
|
86
|
-
3. In PyCharm, add a new Python configuration:
|
|
87
|
-
- script: `./usage/main.py`
|
|
88
|
-
- Paths to ".env" files: `./usage/.env`
|
|
98
|
+
```bash
|
|
99
|
+
uv build
|
|
100
|
+
```
|
|
@@ -41,18 +41,36 @@ The features listed represent the maximum capabilities of each API type supporte
|
|
|
41
41
|
pip install --upgrade llm_bridge
|
|
42
42
|
```
|
|
43
43
|
|
|
44
|
-
##
|
|
44
|
+
## Development
|
|
45
|
+
|
|
46
|
+
### Python uv
|
|
47
|
+
|
|
48
|
+
1. Install uv: `powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"`
|
|
49
|
+
2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
|
|
50
|
+
3. Configure requirements:
|
|
51
|
+
```bash
|
|
52
|
+
uv sync
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Pycharm Professional
|
|
56
|
+
|
|
57
|
+
1. Add New Interpreter >> Add Local Interpreter
|
|
58
|
+
- Environment: Select existing
|
|
59
|
+
- Type: uv
|
|
60
|
+
2. Add New Configuration >> uv run >> script: `./usage/main.py`
|
|
61
|
+
|
|
62
|
+
### Usage
|
|
63
|
+
|
|
64
|
+
Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
|
|
65
|
+
|
|
66
|
+
### Test
|
|
45
67
|
|
|
46
68
|
```bash
|
|
47
|
-
pytest
|
|
69
|
+
uv run pytest
|
|
48
70
|
```
|
|
49
71
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
### Setup
|
|
72
|
+
### Build
|
|
53
73
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
- script: `./usage/main.py`
|
|
58
|
-
- Paths to ".env" files: `./usage/.env`
|
|
74
|
+
```bash
|
|
75
|
+
uv build
|
|
76
|
+
```
|
|
@@ -61,7 +61,6 @@ async def create_gemini_client(
|
|
|
61
61
|
config = types.GenerateContentConfig(
|
|
62
62
|
system_instruction=system_instruction,
|
|
63
63
|
temperature=temperature,
|
|
64
|
-
media_resolution=MediaResolution.MEDIA_RESOLUTION_HIGH,
|
|
65
64
|
safety_settings=[
|
|
66
65
|
types.SafetySetting(
|
|
67
66
|
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
@@ -89,6 +88,9 @@ async def create_gemini_client(
|
|
|
89
88
|
response_modalities=response_modalities,
|
|
90
89
|
)
|
|
91
90
|
|
|
91
|
+
if vertexai:
|
|
92
|
+
config.media_resolution=MediaResolution.MEDIA_RESOLUTION_HIGH
|
|
93
|
+
|
|
92
94
|
if structured_output_schema:
|
|
93
95
|
config.response_mime_type = "application/json"
|
|
94
96
|
config.response_json_schema = structured_output_schema
|
|
@@ -65,7 +65,7 @@ async def create_openai_client(
|
|
|
65
65
|
tools = []
|
|
66
66
|
reasoning = None
|
|
67
67
|
|
|
68
|
-
if model not in ["gpt-5-
|
|
68
|
+
if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
|
|
69
69
|
if code_execution:
|
|
70
70
|
tools.append(
|
|
71
71
|
CodeInterpreter(
|
|
@@ -73,16 +73,15 @@ async def create_openai_client(
|
|
|
73
73
|
container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
|
|
74
74
|
)
|
|
75
75
|
)
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
search_context_size="high",
|
|
81
|
-
)
|
|
76
|
+
tools.append(
|
|
77
|
+
WebSearchToolParam(
|
|
78
|
+
type="web_search",
|
|
79
|
+
search_context_size="high",
|
|
82
80
|
)
|
|
83
|
-
|
|
81
|
+
)
|
|
82
|
+
if re.match(r"gpt-5.*", model):
|
|
84
83
|
temperature = 1
|
|
85
|
-
if re.match(r"gpt-5.*", model)
|
|
84
|
+
if re.match(r"gpt-5.*", model):
|
|
86
85
|
if thought:
|
|
87
86
|
reasoning = Reasoning(
|
|
88
87
|
effort="high",
|
|
@@ -71,6 +71,12 @@
|
|
|
71
71
|
"input": 2.5,
|
|
72
72
|
"output": 15
|
|
73
73
|
},
|
|
74
|
+
{
|
|
75
|
+
"apiType": "OpenAI",
|
|
76
|
+
"model": "gpt-5.2",
|
|
77
|
+
"input": 1.75,
|
|
78
|
+
"output": 14
|
|
79
|
+
},
|
|
74
80
|
{
|
|
75
81
|
"apiType": "OpenAI",
|
|
76
82
|
"model": "gpt-5.1",
|
|
@@ -89,6 +95,12 @@
|
|
|
89
95
|
"input": 0.25,
|
|
90
96
|
"output": 2
|
|
91
97
|
},
|
|
98
|
+
{
|
|
99
|
+
"apiType": "OpenAI",
|
|
100
|
+
"model": "gpt-5.2-pro",
|
|
101
|
+
"input": 21,
|
|
102
|
+
"output": 168
|
|
103
|
+
},
|
|
92
104
|
{
|
|
93
105
|
"apiType": "OpenAI",
|
|
94
106
|
"model": "gpt-5-pro",
|
|
@@ -1,47 +1,42 @@
|
|
|
1
1
|
[build-system]
|
|
2
|
-
requires = ["
|
|
3
|
-
build-backend = "
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "LLM-Bridge"
|
|
7
|
-
version = "1.
|
|
8
|
-
authors = [
|
|
9
|
-
{name = "windsnow1025", email = "windsnow1025@gmail.com"}
|
|
10
|
-
]
|
|
11
|
-
description = "A Bridge for LLMs"
|
|
12
|
-
readme = "README.md"
|
|
13
|
-
requires-python = ">=3.12"
|
|
14
|
-
keywords = ["llm", "ai"]
|
|
15
|
-
license = "MIT"
|
|
16
|
-
classifiers = [
|
|
17
|
-
"Framework :: FastAPI",
|
|
18
|
-
"Programming Language :: Python :: 3",
|
|
19
|
-
]
|
|
7
|
+
version = "1.14.0"
|
|
20
8
|
dependencies = [
|
|
21
9
|
"fastapi",
|
|
22
10
|
"httpx",
|
|
23
11
|
"tenacity",
|
|
24
12
|
"openai==2.9.0",
|
|
25
13
|
"tiktoken==0.11.0",
|
|
26
|
-
"google-genai==1.46.0",
|
|
14
|
+
"google-genai==1.46.0", # google.genai
|
|
27
15
|
"anthropic==0.75.0",
|
|
28
|
-
"PyMuPDF",
|
|
16
|
+
"PyMuPDF", # fitz
|
|
29
17
|
"docxlatex>=1.1.1",
|
|
30
18
|
"openpyxl",
|
|
31
|
-
"python-pptx",
|
|
19
|
+
"python-pptx", # pptx
|
|
20
|
+
]
|
|
21
|
+
requires-python = ">=3.12"
|
|
22
|
+
authors = [
|
|
23
|
+
{name = "windsnow1025", email = "windsnow1025@gmail.com"}
|
|
24
|
+
]
|
|
25
|
+
description = "A Bridge for LLMs"
|
|
26
|
+
readme = "README.md"
|
|
27
|
+
license = "MIT"
|
|
28
|
+
keywords = ["llm", "ai"]
|
|
29
|
+
classifiers = [
|
|
30
|
+
"Framework :: FastAPI",
|
|
31
|
+
"Programming Language :: Python :: 3",
|
|
32
32
|
]
|
|
33
33
|
|
|
34
|
-
[
|
|
35
|
-
|
|
34
|
+
[dependency-groups]
|
|
35
|
+
dev = [
|
|
36
36
|
"pytest",
|
|
37
37
|
"pytest-asyncio",
|
|
38
|
-
"python-dotenv",
|
|
39
|
-
"protobuf"
|
|
38
|
+
"python-dotenv", #dotenv
|
|
40
39
|
]
|
|
41
40
|
|
|
42
|
-
[tool.setuptools.packages.find]
|
|
43
|
-
where = ["."]
|
|
44
|
-
include = ["llm_bridge*"]
|
|
45
|
-
|
|
46
41
|
[tool.pytest.ini_options]
|
|
47
|
-
asyncio_default_fixture_loop_scope = "function"
|
|
42
|
+
asyncio_default_fixture_loop_scope = "function"
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from llm_bridge.type.message import Message, Role, Content, ContentType
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@pytest.fixture
|
|
7
|
+
def sample_messages():
|
|
8
|
+
return [
|
|
9
|
+
Message(role=Role.System, contents=[
|
|
10
|
+
Content(type=ContentType.Text, data="You are a helpful assistant.")
|
|
11
|
+
]),
|
|
12
|
+
Message(role=Role.User, contents=[
|
|
13
|
+
Content(type=ContentType.Text, data="Hello")
|
|
14
|
+
])
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@pytest.mark.asyncio
|
|
19
|
+
async def test_placeholder():
|
|
20
|
+
assert True
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from llm_bridge.logic.message_preprocess.message_preprocessor import extract_system_messages
|
|
4
|
+
from llm_bridge.type.message import Message, Role, Content, ContentType
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@pytest.fixture
|
|
8
|
+
def sample_messages():
|
|
9
|
+
return [
|
|
10
|
+
Message(role=Role.System, contents=[
|
|
11
|
+
Content(type=ContentType.Text, data="You are a helpful assistant.")
|
|
12
|
+
]),
|
|
13
|
+
Message(role=Role.User, contents=[
|
|
14
|
+
Content(type=ContentType.Text, data="Hello")
|
|
15
|
+
])
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
def test_extract_system_messages(sample_messages):
|
|
19
|
+
extracted_text = extract_system_messages(sample_messages)
|
|
20
|
+
|
|
21
|
+
assert extracted_text == "You are a helpful assistant.\n"
|
|
22
|
+
|
|
23
|
+
assert len(sample_messages) == 1
|
|
24
|
+
assert sample_messages[0].role == Role.User
|
|
25
|
+
assert sample_messages[0].contents[0].type == ContentType.Text
|
|
26
|
+
assert sample_messages[0].contents[0].data == "Hello"
|