LLM-Bridge 1.14.0__tar.gz → 1.14.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0/LLM_Bridge.egg-info}/PKG-INFO +17 -15
  2. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/SOURCES.txt +67 -0
  3. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/dependency_links.txt +1 -0
  4. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/requires.txt +11 -0
  5. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/top_level.txt +1 -0
  6. llm_bridge-1.14.0a0/PKG-INFO +102 -0
  7. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/README.md +6 -5
  8. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +9 -8
  9. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/resources/model_prices.json +0 -12
  10. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/pyproject.toml +20 -16
  11. llm_bridge-1.14.0a0/setup.cfg +4 -0
  12. llm_bridge-1.14.0/.gitattributes +0 -2
  13. llm_bridge-1.14.0/.github/workflows/python-publish.yml +0 -32
  14. llm_bridge-1.14.0/.gitignore +0 -160
  15. llm_bridge-1.14.0/tests/__init__.py +0 -0
  16. llm_bridge-1.14.0/tests/chat_client_factory_test.py +0 -20
  17. llm_bridge-1.14.0/tests/message_preprocessor_test.py +0 -26
  18. llm_bridge-1.14.0/usage/.env.example +0 -9
  19. llm_bridge-1.14.0/usage/main.py +0 -226
  20. llm_bridge-1.14.0/usage/workflow.py +0 -34
  21. llm_bridge-1.14.0/uv.lock +0 -1025
  22. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/LICENSE +0 -0
  23. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/MANIFEST.in +0 -0
  24. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/__init__.py +0 -0
  25. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/__init__.py +0 -0
  26. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/chat_client.py +0 -0
  27. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/__init__.py +0 -0
  28. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  29. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  30. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  31. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  32. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  33. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  34. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  35. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  36. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  37. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  38. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  39. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  40. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  41. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  42. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  43. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  44. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/implementations/printing_status.py +0 -0
  45. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/model_client/__init__.py +0 -0
  46. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/model_client/claude_client.py +0 -0
  47. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  48. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/client/model_client/openai_client.py +0 -0
  49. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/__init__.py +0 -0
  50. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  51. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  52. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  53. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  54. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  55. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  56. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  57. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  58. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  59. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  60. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  61. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  62. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  63. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/file_fetch.py +0 -0
  64. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  65. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  66. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  67. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  68. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  69. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/logic/model_prices.py +0 -0
  70. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/resources/__init__.py +0 -0
  71. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/__init__.py +0 -0
  72. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/chat_response.py +0 -0
  73. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/message.py +0 -0
  74. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/model_message/__init__.py +0 -0
  75. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/model_message/claude_message.py +0 -0
  76. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  77. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/model_message/openai_message.py +0 -0
  78. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  79. {llm_bridge-1.14.0 → llm_bridge-1.14.0a0}/llm_bridge/type/serializer.py +0 -0
@@ -1,26 +1,27 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.14.0
3
+ Version: 1.14.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
7
- License-File: LICENSE
8
- Keywords: ai,llm
7
+ Keywords: llm,ai
9
8
  Classifier: Framework :: FastAPI
10
9
  Classifier: Programming Language :: Python :: 3
11
10
  Requires-Python: >=3.12
12
- Requires-Dist: anthropic==0.75.0
13
- Requires-Dist: docxlatex>=1.1.1
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
14
13
  Requires-Dist: fastapi
15
- Requires-Dist: google-genai==1.46.0
16
14
  Requires-Dist: httpx
15
+ Requires-Dist: tenacity
17
16
  Requires-Dist: openai==2.9.0
17
+ Requires-Dist: tiktoken==0.11.0
18
+ Requires-Dist: google-genai==1.46.0
19
+ Requires-Dist: anthropic==0.75.0
20
+ Requires-Dist: PyMuPDF
21
+ Requires-Dist: docxlatex>=1.1.1
18
22
  Requires-Dist: openpyxl
19
- Requires-Dist: pymupdf
20
23
  Requires-Dist: python-pptx
21
- Requires-Dist: tenacity
22
- Requires-Dist: tiktoken==0.11.0
23
- Description-Content-Type: text/markdown
24
+ Dynamic: license-file
24
25
 
25
26
  # LLM Bridge
26
27
 
@@ -76,12 +77,13 @@ pip install --upgrade llm_bridge
76
77
  uv sync
77
78
  ```
78
79
 
79
- ### Pycharm Professional
80
+ ### Pycharm
81
+
82
+ Add New Configuration >> uv run
83
+ - script: `./usage/main.py`
84
+ - Paths to ".env" files: `./usage/.env`
80
85
 
81
- 1. Add New Interpreter >> Add Local Interpreter
82
- - Environment: Select existing
83
- - Type: uv
84
- 2. Add New Configuration >> uv run >> script: `./usage/main.py`
86
+ If uv interpreter is not found, create a new project with uv.
85
87
 
86
88
  ### Usage
87
89
 
@@ -0,0 +1,67 @@
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ pyproject.toml
5
+ LLM_Bridge.egg-info/PKG-INFO
6
+ LLM_Bridge.egg-info/SOURCES.txt
7
+ LLM_Bridge.egg-info/dependency_links.txt
8
+ LLM_Bridge.egg-info/requires.txt
9
+ LLM_Bridge.egg-info/top_level.txt
10
+ llm_bridge/__init__.py
11
+ llm_bridge/client/__init__.py
12
+ llm_bridge/client/chat_client.py
13
+ llm_bridge/client/implementations/__init__.py
14
+ llm_bridge/client/implementations/printing_status.py
15
+ llm_bridge/client/implementations/claude/__init__.py
16
+ llm_bridge/client/implementations/claude/claude_response_handler.py
17
+ llm_bridge/client/implementations/claude/claude_token_counter.py
18
+ llm_bridge/client/implementations/claude/non_stream_claude_client.py
19
+ llm_bridge/client/implementations/claude/stream_claude_client.py
20
+ llm_bridge/client/implementations/gemini/__init__.py
21
+ llm_bridge/client/implementations/gemini/gemini_response_handler.py
22
+ llm_bridge/client/implementations/gemini/gemini_token_counter.py
23
+ llm_bridge/client/implementations/gemini/non_stream_gemini_client.py
24
+ llm_bridge/client/implementations/gemini/stream_gemini_client.py
25
+ llm_bridge/client/implementations/openai/__init__.py
26
+ llm_bridge/client/implementations/openai/non_stream_openai_client.py
27
+ llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py
28
+ llm_bridge/client/implementations/openai/openai_token_couter.py
29
+ llm_bridge/client/implementations/openai/steam_openai_responses_client.py
30
+ llm_bridge/client/implementations/openai/stream_openai_client.py
31
+ llm_bridge/client/model_client/__init__.py
32
+ llm_bridge/client/model_client/claude_client.py
33
+ llm_bridge/client/model_client/gemini_client.py
34
+ llm_bridge/client/model_client/openai_client.py
35
+ llm_bridge/logic/__init__.py
36
+ llm_bridge/logic/file_fetch.py
37
+ llm_bridge/logic/model_prices.py
38
+ llm_bridge/logic/chat_generate/__init__.py
39
+ llm_bridge/logic/chat_generate/chat_client_factory.py
40
+ llm_bridge/logic/chat_generate/chat_message_converter.py
41
+ llm_bridge/logic/chat_generate/media_processor.py
42
+ llm_bridge/logic/chat_generate/model_client_factory/__init__.py
43
+ llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py
44
+ llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py
45
+ llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py
46
+ llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py
47
+ llm_bridge/logic/chat_generate/model_message_converter/__init__.py
48
+ llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py
49
+ llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py
50
+ llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py
51
+ llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py
52
+ llm_bridge/logic/message_preprocess/__init__.py
53
+ llm_bridge/logic/message_preprocess/code_file_extensions.py
54
+ llm_bridge/logic/message_preprocess/document_processor.py
55
+ llm_bridge/logic/message_preprocess/file_type_checker.py
56
+ llm_bridge/logic/message_preprocess/message_preprocessor.py
57
+ llm_bridge/resources/__init__.py
58
+ llm_bridge/resources/model_prices.json
59
+ llm_bridge/type/__init__.py
60
+ llm_bridge/type/chat_response.py
61
+ llm_bridge/type/message.py
62
+ llm_bridge/type/serializer.py
63
+ llm_bridge/type/model_message/__init__.py
64
+ llm_bridge/type/model_message/claude_message.py
65
+ llm_bridge/type/model_message/gemini_message.py
66
+ llm_bridge/type/model_message/openai_message.py
67
+ llm_bridge/type/model_message/openai_responses_message.py
@@ -0,0 +1,11 @@
1
+ fastapi
2
+ httpx
3
+ tenacity
4
+ openai==2.9.0
5
+ tiktoken==0.11.0
6
+ google-genai==1.46.0
7
+ anthropic==0.75.0
8
+ PyMuPDF
9
+ docxlatex>=1.1.1
10
+ openpyxl
11
+ python-pptx
@@ -0,0 +1 @@
1
+ llm_bridge
@@ -0,0 +1,102 @@
1
+ Metadata-Version: 2.4
2
+ Name: LLM-Bridge
3
+ Version: 1.14.0a0
4
+ Summary: A Bridge for LLMs
5
+ Author-email: windsnow1025 <windsnow1025@gmail.com>
6
+ License-Expression: MIT
7
+ Keywords: llm,ai
8
+ Classifier: Framework :: FastAPI
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: fastapi
14
+ Requires-Dist: httpx
15
+ Requires-Dist: tenacity
16
+ Requires-Dist: openai==2.9.0
17
+ Requires-Dist: tiktoken==0.11.0
18
+ Requires-Dist: google-genai==1.46.0
19
+ Requires-Dist: anthropic==0.75.0
20
+ Requires-Dist: PyMuPDF
21
+ Requires-Dist: docxlatex>=1.1.1
22
+ Requires-Dist: openpyxl
23
+ Requires-Dist: python-pptx
24
+ Dynamic: license-file
25
+
26
+ # LLM Bridge
27
+
28
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
29
+
30
+ GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
31
+
32
+ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)
33
+
34
+ ## Workflow and Features
35
+
36
+ 1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
37
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
38
+ 1. **Model Message Converter**: converts general messages to model messages
39
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
40
+ 3. **Chat Client**: generate stream or non-stream responses
41
+ - **Model Thoughts**: captures and formats the model's thinking process
42
+ - **Code Execution**: auto generate and execute Python code
43
+ - **Web Search + Citations**: extracts and formats citations from search results
44
+ - **Token Counter**: tracks and reports input and output token usage
45
+
46
+ ### Supported Features for API Types
47
+
48
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
49
+
50
+ | API Type | Input Format | Capabilities | Output Format |
51
+ |----------|--------------------------------|---------------------------------------------------------------------|-------------------|
52
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
53
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution, Structured Output | Text, Image, File |
54
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
55
+ | Grok | Text, Image | | Text |
56
+
57
+ #### Planned Features
58
+
59
+ - Structured Output
60
+ - More features for API Types
61
+ - Native support for Grok
62
+
63
+ ## Installation
64
+
65
+ ```bash
66
+ pip install --upgrade llm_bridge
67
+ ```
68
+
69
+ ## Development
70
+
71
+ ### Python uv
72
+
73
+ 1. Install uv: `powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"`
74
+ 2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
75
+ 3. Configure requirements:
76
+ ```bash
77
+ uv sync
78
+ ```
79
+
80
+ ### Pycharm
81
+
82
+ Add New Configuration >> uv run
83
+ - script: `./usage/main.py`
84
+ - Paths to ".env" files: `./usage/.env`
85
+
86
+ If uv interpreter is not found, create a new project with uv.
87
+
88
+ ### Usage
89
+
90
+ Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
91
+
92
+ ### Test
93
+
94
+ ```bash
95
+ uv run pytest
96
+ ```
97
+
98
+ ### Build
99
+
100
+ ```bash
101
+ uv build
102
+ ```
@@ -52,12 +52,13 @@ pip install --upgrade llm_bridge
52
52
  uv sync
53
53
  ```
54
54
 
55
- ### Pycharm Professional
55
+ ### Pycharm
56
56
 
57
- 1. Add New Interpreter >> Add Local Interpreter
58
- - Environment: Select existing
59
- - Type: uv
60
- 2. Add New Configuration >> uv run >> script: `./usage/main.py`
57
+ Add New Configuration >> uv run
58
+ - script: `./usage/main.py`
59
+ - Paths to ".env" files: `./usage/.env`
60
+
61
+ If uv interpreter is not found, create a new project with uv.
61
62
 
62
63
  ### Usage
63
64
 
@@ -65,7 +65,7 @@ async def create_openai_client(
65
65
  tools = []
66
66
  reasoning = None
67
67
 
68
- if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
68
+ if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
69
69
  if code_execution:
70
70
  tools.append(
71
71
  CodeInterpreter(
@@ -73,15 +73,16 @@ async def create_openai_client(
73
73
  container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
74
  )
75
75
  )
76
- tools.append(
77
- WebSearchToolParam(
78
- type="web_search",
79
- search_context_size="high",
76
+ if model not in ["gpt-5-chat-latest"]:
77
+ tools.append(
78
+ WebSearchToolParam(
79
+ type="web_search",
80
+ search_context_size="high",
81
+ )
80
82
  )
81
- )
82
- if re.match(r"gpt-5.*", model):
83
+ if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
83
84
  temperature = 1
84
- if re.match(r"gpt-5.*", model):
85
+ if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
85
86
  if thought:
86
87
  reasoning = Reasoning(
87
88
  effort="high",
@@ -71,12 +71,6 @@
71
71
  "input": 2.5,
72
72
  "output": 15
73
73
  },
74
- {
75
- "apiType": "OpenAI",
76
- "model": "gpt-5.2",
77
- "input": 1.75,
78
- "output": 14
79
- },
80
74
  {
81
75
  "apiType": "OpenAI",
82
76
  "model": "gpt-5.1",
@@ -95,12 +89,6 @@
95
89
  "input": 0.25,
96
90
  "output": 2
97
91
  },
98
- {
99
- "apiType": "OpenAI",
100
- "model": "gpt-5.2-pro",
101
- "input": 21,
102
- "output": 168
103
- },
104
92
  {
105
93
  "apiType": "OpenAI",
106
94
  "model": "gpt-5-pro",
@@ -1,10 +1,22 @@
1
1
  [build-system]
2
- requires = ["hatchling"]
3
- build-backend = "hatchling.build"
2
+ requires = ["setuptools"]
3
+ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.14.0"
7
+ version = "1.14.0-alpha.0"
8
+ authors = [
9
+ {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
+ ]
11
+ description = "A Bridge for LLMs"
12
+ readme = "README.md"
13
+ requires-python = ">=3.12"
14
+ keywords = ["llm", "ai"]
15
+ license = "MIT"
16
+ classifiers = [
17
+ "Framework :: FastAPI",
18
+ "Programming Language :: Python :: 3",
19
+ ]
8
20
  dependencies = [
9
21
  "fastapi",
10
22
  "httpx",
@@ -18,18 +30,6 @@ dependencies = [
18
30
  "openpyxl",
19
31
  "python-pptx", # pptx
20
32
  ]
21
- requires-python = ">=3.12"
22
- authors = [
23
- {name = "windsnow1025", email = "windsnow1025@gmail.com"}
24
- ]
25
- description = "A Bridge for LLMs"
26
- readme = "README.md"
27
- license = "MIT"
28
- keywords = ["llm", "ai"]
29
- classifiers = [
30
- "Framework :: FastAPI",
31
- "Programming Language :: Python :: 3",
32
- ]
33
33
 
34
34
  [dependency-groups]
35
35
  dev = [
@@ -38,5 +38,9 @@ dev = [
38
38
  "python-dotenv", #dotenv
39
39
  ]
40
40
 
41
+ [tool.setuptools.packages.find]
42
+ where = ["."]
43
+ include = ["llm_bridge*"]
44
+
41
45
  [tool.pytest.ini_options]
42
- asyncio_default_fixture_loop_scope = "function"
46
+ asyncio_default_fixture_loop_scope = "function"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -1,2 +0,0 @@
1
- # Auto detect text files and perform LF normalization
2
- * text=auto
@@ -1,32 +0,0 @@
1
- name: "Publish"
2
-
3
- on:
4
- push:
5
- tags:
6
- # Publish on any tag starting with a `v`, e.g., v0.1.0
7
- - v*
8
-
9
- jobs:
10
- run:
11
- runs-on: ubuntu-latest
12
- environment:
13
- name: pypi
14
- permissions:
15
- id-token: write
16
- contents: read
17
- steps:
18
- - name: Checkout
19
- uses: actions/checkout@v5
20
- - name: Install uv
21
- uses: astral-sh/setup-uv@v7
22
- - name: Install Python 3.12
23
- run: uv python install 3.12
24
- - name: Build
25
- run: uv build
26
- # Check that basic features work and we didn't miss to include crucial files
27
- # - name: Smoke test (wheel)
28
- # run: uv run --isolated --no-project --with dist/*.whl tests/smoke_test.py
29
- # - name: Smoke test (source distribution)
30
- # run: uv run --isolated --no-project --with dist/*.tar.gz tests/smoke_test.py
31
- - name: Publish
32
- run: uv publish
@@ -1,160 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # pdm
105
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
- #pdm.lock
107
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
- # in version control.
109
- # https://pdm.fming.dev/#use-with-ide
110
- .pdm.toml
111
-
112
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
- __pypackages__/
114
-
115
- # Celery stuff
116
- celerybeat-schedule
117
- celerybeat.pid
118
-
119
- # SageMath parsed files
120
- *.sage.py
121
-
122
- # Environments
123
- .env
124
- .venv
125
- env/
126
- venv/
127
- ENV/
128
- env.bak/
129
- venv.bak/
130
-
131
- # Spyder project settings
132
- .spyderproject
133
- .spyproject
134
-
135
- # Rope project settings
136
- .ropeproject
137
-
138
- # mkdocs documentation
139
- /site
140
-
141
- # mypy
142
- .mypy_cache/
143
- .dmypy.json
144
- dmypy.json
145
-
146
- # Pyre type checker
147
- .pyre/
148
-
149
- # pytype static type analyzer
150
- .pytype/
151
-
152
- # Cython debug symbols
153
- cython_debug/
154
-
155
- # PyCharm
156
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
- # and can be added to the global gitignore or merged into this file. For a more nuclear
159
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
- .idea/
File without changes
@@ -1,20 +0,0 @@
1
- import pytest
2
-
3
- from llm_bridge.type.message import Message, Role, Content, ContentType
4
-
5
-
6
- @pytest.fixture
7
- def sample_messages():
8
- return [
9
- Message(role=Role.System, contents=[
10
- Content(type=ContentType.Text, data="You are a helpful assistant.")
11
- ]),
12
- Message(role=Role.User, contents=[
13
- Content(type=ContentType.Text, data="Hello")
14
- ])
15
- ]
16
-
17
-
18
- @pytest.mark.asyncio
19
- async def test_placeholder():
20
- assert True
@@ -1,26 +0,0 @@
1
- import pytest
2
-
3
- from llm_bridge.logic.message_preprocess.message_preprocessor import extract_system_messages
4
- from llm_bridge.type.message import Message, Role, Content, ContentType
5
-
6
-
7
- @pytest.fixture
8
- def sample_messages():
9
- return [
10
- Message(role=Role.System, contents=[
11
- Content(type=ContentType.Text, data="You are a helpful assistant.")
12
- ]),
13
- Message(role=Role.User, contents=[
14
- Content(type=ContentType.Text, data="Hello")
15
- ])
16
- ]
17
-
18
- def test_extract_system_messages(sample_messages):
19
- extracted_text = extract_system_messages(sample_messages)
20
-
21
- assert extracted_text == "You are a helpful assistant.\n"
22
-
23
- assert len(sample_messages) == 1
24
- assert sample_messages[0].role == Role.User
25
- assert sample_messages[0].contents[0].type == ContentType.Text
26
- assert sample_messages[0].contents[0].data == "Hello"
@@ -1,9 +0,0 @@
1
- OPENAI_API_KEY=
2
- GEMINI_FREE_API_KEY=
3
- GEMINI_PAID_API_KEY=
4
- GEMINI_VERTEX_API_KEY=
5
- ANTHROPIC_API_KEY=
6
- AZURE_API_KEY=
7
- AZURE_API_BASE=
8
- GITHUB_API_KEY=
9
- XAI_API_KEY=