LLM-Bridge 1.14.0a0__tar.gz → 1.14.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. llm_bridge-1.14.1/.gitattributes +2 -0
  2. llm_bridge-1.14.1/.github/workflows/python-publish.yml +32 -0
  3. llm_bridge-1.14.1/.gitignore +160 -0
  4. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/PKG-INFO +16 -24
  5. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/README.md +6 -13
  6. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +8 -9
  7. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/resources/model_prices.json +20 -32
  8. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/pyproject.toml +16 -20
  9. llm_bridge-1.14.1/tests/__init__.py +0 -0
  10. llm_bridge-1.14.1/tests/chat_client_factory_test.py +20 -0
  11. llm_bridge-1.14.1/tests/message_preprocessor_test.py +26 -0
  12. llm_bridge-1.14.1/usage/.env.example +9 -0
  13. llm_bridge-1.14.1/usage/main.py +226 -0
  14. llm_bridge-1.14.1/usage/workflow.py +34 -0
  15. llm_bridge-1.14.1/uv.lock +1027 -0
  16. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/PKG-INFO +0 -102
  17. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/SOURCES.txt +0 -67
  18. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/dependency_links.txt +0 -1
  19. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/requires.txt +0 -11
  20. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/top_level.txt +0 -1
  21. llm_bridge-1.14.0a0/setup.cfg +0 -4
  22. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/LICENSE +0 -0
  23. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/MANIFEST.in +0 -0
  24. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/__init__.py +0 -0
  25. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/__init__.py +0 -0
  26. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/chat_client.py +0 -0
  27. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/__init__.py +0 -0
  28. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  29. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  30. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  31. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  32. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  33. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  34. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  35. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  36. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  37. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  38. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  39. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  40. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  41. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  42. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  43. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  44. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/implementations/printing_status.py +0 -0
  45. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/model_client/__init__.py +0 -0
  46. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/model_client/claude_client.py +0 -0
  47. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  48. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/client/model_client/openai_client.py +0 -0
  49. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/__init__.py +0 -0
  50. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  51. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  52. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  53. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  54. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  55. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  56. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  57. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  58. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  59. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  60. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  61. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  62. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  63. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/file_fetch.py +0 -0
  64. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  65. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  66. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  67. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  68. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  69. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/logic/model_prices.py +0 -0
  70. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/resources/__init__.py +0 -0
  71. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/__init__.py +0 -0
  72. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/chat_response.py +0 -0
  73. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/message.py +0 -0
  74. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/model_message/__init__.py +0 -0
  75. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/model_message/claude_message.py +0 -0
  76. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  77. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/model_message/openai_message.py +0 -0
  78. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  79. {llm_bridge-1.14.0a0 → llm_bridge-1.14.1}/llm_bridge/type/serializer.py +0 -0
@@ -0,0 +1,2 @@
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
@@ -0,0 +1,32 @@
1
+ name: "Publish"
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ # Publish on any tag starting with a `v`, e.g., v0.1.0
7
+ - v*
8
+
9
+ jobs:
10
+ run:
11
+ runs-on: ubuntu-latest
12
+ environment:
13
+ name: pypi
14
+ permissions:
15
+ id-token: write
16
+ contents: read
17
+ steps:
18
+ - name: Checkout
19
+ uses: actions/checkout@v5
20
+ - name: Install uv
21
+ uses: astral-sh/setup-uv@v7
22
+ - name: Install Python 3.12
23
+ run: uv python install 3.12
24
+ - name: Build
25
+ run: uv build
26
+ # Check that basic features work and we didn't miss to include crucial files
27
+ # - name: Smoke test (wheel)
28
+ # run: uv run --isolated --no-project --with dist/*.whl tests/smoke_test.py
29
+ # - name: Smoke test (source distribution)
30
+ # run: uv run --isolated --no-project --with dist/*.tar.gz tests/smoke_test.py
31
+ - name: Publish
32
+ run: uv publish
@@ -0,0 +1,160 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ .idea/
@@ -1,27 +1,26 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.14.0a0
3
+ Version: 1.14.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
7
- Keywords: llm,ai
7
+ License-File: LICENSE
8
+ Keywords: ai,llm
8
9
  Classifier: Framework :: FastAPI
9
10
  Classifier: Programming Language :: Python :: 3
10
11
  Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
12
+ Requires-Dist: anthropic==0.75.0
13
+ Requires-Dist: docxlatex>=1.1.1
13
14
  Requires-Dist: fastapi
15
+ Requires-Dist: google-genai==1.46.0
14
16
  Requires-Dist: httpx
15
- Requires-Dist: tenacity
16
17
  Requires-Dist: openai==2.9.0
17
- Requires-Dist: tiktoken==0.11.0
18
- Requires-Dist: google-genai==1.46.0
19
- Requires-Dist: anthropic==0.75.0
20
- Requires-Dist: PyMuPDF
21
- Requires-Dist: docxlatex>=1.1.1
22
18
  Requires-Dist: openpyxl
19
+ Requires-Dist: pymupdf
23
20
  Requires-Dist: python-pptx
24
- Dynamic: license-file
21
+ Requires-Dist: tenacity
22
+ Requires-Dist: tiktoken==0.11.0
23
+ Description-Content-Type: text/markdown
25
24
 
26
25
  # LLM Bridge
27
26
 
@@ -60,12 +59,6 @@ The features listed represent the maximum capabilities of each API type supporte
60
59
  - More features for API Types
61
60
  - Native support for Grok
62
61
 
63
- ## Installation
64
-
65
- ```bash
66
- pip install --upgrade llm_bridge
67
- ```
68
-
69
62
  ## Development
70
63
 
71
64
  ### Python uv
@@ -74,16 +67,15 @@ pip install --upgrade llm_bridge
74
67
  2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
75
68
  3. Configure requirements:
76
69
  ```bash
77
- uv sync
70
+ uv sync --refresh
78
71
  ```
79
72
 
80
- ### Pycharm
81
-
82
- Add New Configuration >> uv run
83
- - script: `./usage/main.py`
84
- - Paths to ".env" files: `./usage/.env`
73
+ ### Pycharm Professional
85
74
 
86
- If uv interpreter is not found, create a new project with uv.
75
+ 1. Add New Interpreter >> Add Local Interpreter
76
+ - Environment: Select existing
77
+ - Type: uv
78
+ 2. Add New Configuration >> uv run >> script: `./usage/main.py`
87
79
 
88
80
  ### Usage
89
81
 
@@ -35,12 +35,6 @@ The features listed represent the maximum capabilities of each API type supporte
35
35
  - More features for API Types
36
36
  - Native support for Grok
37
37
 
38
- ## Installation
39
-
40
- ```bash
41
- pip install --upgrade llm_bridge
42
- ```
43
-
44
38
  ## Development
45
39
 
46
40
  ### Python uv
@@ -49,16 +43,15 @@ pip install --upgrade llm_bridge
49
43
  2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
50
44
  3. Configure requirements:
51
45
  ```bash
52
- uv sync
46
+ uv sync --refresh
53
47
  ```
54
48
 
55
- ### Pycharm
56
-
57
- Add New Configuration >> uv run
58
- - script: `./usage/main.py`
59
- - Paths to ".env" files: `./usage/.env`
49
+ ### Pycharm Professional
60
50
 
61
- If uv interpreter is not found, create a new project with uv.
51
+ 1. Add New Interpreter >> Add Local Interpreter
52
+ - Environment: Select existing
53
+ - Type: uv
54
+ 2. Add New Configuration >> uv run >> script: `./usage/main.py`
62
55
 
63
56
  ### Usage
64
57
 
@@ -65,7 +65,7 @@ async def create_openai_client(
65
65
  tools = []
66
66
  reasoning = None
67
67
 
68
- if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
68
+ if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
69
69
  if code_execution:
70
70
  tools.append(
71
71
  CodeInterpreter(
@@ -73,16 +73,15 @@ async def create_openai_client(
73
73
  container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
74
  )
75
75
  )
76
- if model not in ["gpt-5-chat-latest"]:
77
- tools.append(
78
- WebSearchToolParam(
79
- type="web_search",
80
- search_context_size="high",
81
- )
76
+ tools.append(
77
+ WebSearchToolParam(
78
+ type="web_search",
79
+ search_context_size="high",
82
80
  )
83
- if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
81
+ )
82
+ if re.match(r"gpt-5.*", model):
84
83
  temperature = 1
85
- if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
84
+ if re.match(r"gpt-5.*", model):
86
85
  if thought:
87
86
  reasoning = Reasoning(
88
87
  effort="high",
@@ -7,39 +7,27 @@
7
7
  },
8
8
  {
9
9
  "apiType": "Gemini-Vertex",
10
- "model": "gemini-3-pro-image-preview",
11
- "input": 2,
12
- "output": 120
13
- },
14
- {
15
- "apiType": "Gemini-Vertex",
16
- "model": "gemini-2.5-flash",
10
+ "model": "gemini-3-flash-preview",
17
11
  "input": 1,
18
- "output": 2.5
12
+ "output": 3
19
13
  },
20
14
  {
21
15
  "apiType": "Gemini-Vertex",
22
- "model": "gemini-2.5-pro",
23
- "input": 2.5,
24
- "output": 15
25
- },
26
- {
27
- "apiType": "Gemini-Free",
28
- "model": "gemini-flash-latest",
29
- "input": 0,
30
- "output": 0
16
+ "model": "gemini-3-pro-image-preview",
17
+ "input": 2,
18
+ "output": 120
31
19
  },
32
20
  {
33
21
  "apiType": "Gemini-Free",
34
- "model": "gemini-2.5-flash",
22
+ "model": "gemini-3-flash-preview",
35
23
  "input": 0,
36
24
  "output": 0
37
25
  },
38
26
  {
39
- "apiType": "Gemini-Free",
40
- "model": "gemini-2.5-pro",
41
- "input": 0,
42
- "output": 0
27
+ "apiType": "Gemini-Paid",
28
+ "model": "gemini-3-flash-preview",
29
+ "input": 1,
30
+ "output": 3
43
31
  },
44
32
  {
45
33
  "apiType": "Gemini-Paid",
@@ -60,16 +48,10 @@
60
48
  "output": 2.5
61
49
  },
62
50
  {
63
- "apiType": "Gemini-Paid",
64
- "model": "gemini-2.5-flash",
65
- "input": 1,
66
- "output": 2.5
67
- },
68
- {
69
- "apiType": "Gemini-Paid",
70
- "model": "gemini-2.5-pro",
71
- "input": 2.5,
72
- "output": 15
51
+ "apiType": "OpenAI",
52
+ "model": "gpt-5.2",
53
+ "input": 1.75,
54
+ "output": 14
73
55
  },
74
56
  {
75
57
  "apiType": "OpenAI",
@@ -89,6 +71,12 @@
89
71
  "input": 0.25,
90
72
  "output": 2
91
73
  },
74
+ {
75
+ "apiType": "OpenAI",
76
+ "model": "gpt-5.2-pro",
77
+ "input": 21,
78
+ "output": 168
79
+ },
92
80
  {
93
81
  "apiType": "OpenAI",
94
82
  "model": "gpt-5-pro",
@@ -1,22 +1,10 @@
1
1
  [build-system]
2
- requires = ["setuptools"]
3
- build-backend = "setuptools.build_meta"
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.14.0-alpha.0"
8
- authors = [
9
- {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
- ]
11
- description = "A Bridge for LLMs"
12
- readme = "README.md"
13
- requires-python = ">=3.12"
14
- keywords = ["llm", "ai"]
15
- license = "MIT"
16
- classifiers = [
17
- "Framework :: FastAPI",
18
- "Programming Language :: Python :: 3",
19
- ]
7
+ version = "1.14.1"
20
8
  dependencies = [
21
9
  "fastapi",
22
10
  "httpx",
@@ -30,6 +18,18 @@ dependencies = [
30
18
  "openpyxl",
31
19
  "python-pptx", # pptx
32
20
  ]
21
+ requires-python = ">=3.12"
22
+ authors = [
23
+ {name = "windsnow1025", email = "windsnow1025@gmail.com"}
24
+ ]
25
+ description = "A Bridge for LLMs"
26
+ readme = "README.md"
27
+ license = "MIT"
28
+ keywords = ["llm", "ai"]
29
+ classifiers = [
30
+ "Framework :: FastAPI",
31
+ "Programming Language :: Python :: 3",
32
+ ]
33
33
 
34
34
  [dependency-groups]
35
35
  dev = [
@@ -38,9 +38,5 @@ dev = [
38
38
  "python-dotenv", #dotenv
39
39
  ]
40
40
 
41
- [tool.setuptools.packages.find]
42
- where = ["."]
43
- include = ["llm_bridge*"]
44
-
45
41
  [tool.pytest.ini_options]
46
- asyncio_default_fixture_loop_scope = "function"
42
+ asyncio_default_fixture_loop_scope = "function"
File without changes
@@ -0,0 +1,20 @@
1
+ import pytest
2
+
3
+ from llm_bridge.type.message import Message, Role, Content, ContentType
4
+
5
+
6
+ @pytest.fixture
7
+ def sample_messages():
8
+ return [
9
+ Message(role=Role.System, contents=[
10
+ Content(type=ContentType.Text, data="You are a helpful assistant.")
11
+ ]),
12
+ Message(role=Role.User, contents=[
13
+ Content(type=ContentType.Text, data="Hello")
14
+ ])
15
+ ]
16
+
17
+
18
+ @pytest.mark.asyncio
19
+ async def test_placeholder():
20
+ assert True
@@ -0,0 +1,26 @@
1
+ import pytest
2
+
3
+ from llm_bridge.logic.message_preprocess.message_preprocessor import extract_system_messages
4
+ from llm_bridge.type.message import Message, Role, Content, ContentType
5
+
6
+
7
+ @pytest.fixture
8
+ def sample_messages():
9
+ return [
10
+ Message(role=Role.System, contents=[
11
+ Content(type=ContentType.Text, data="You are a helpful assistant.")
12
+ ]),
13
+ Message(role=Role.User, contents=[
14
+ Content(type=ContentType.Text, data="Hello")
15
+ ])
16
+ ]
17
+
18
+ def test_extract_system_messages(sample_messages):
19
+ extracted_text = extract_system_messages(sample_messages)
20
+
21
+ assert extracted_text == "You are a helpful assistant.\n"
22
+
23
+ assert len(sample_messages) == 1
24
+ assert sample_messages[0].role == Role.User
25
+ assert sample_messages[0].contents[0].type == ContentType.Text
26
+ assert sample_messages[0].contents[0].data == "Hello"
@@ -0,0 +1,9 @@
1
+ OPENAI_API_KEY=
2
+ GEMINI_FREE_API_KEY=
3
+ GEMINI_PAID_API_KEY=
4
+ GEMINI_VERTEX_API_KEY=
5
+ ANTHROPIC_API_KEY=
6
+ AZURE_API_KEY=
7
+ AZURE_API_BASE=
8
+ GITHUB_API_KEY=
9
+ XAI_API_KEY=