LLM-Bridge 1.14.0a1__tar.gz → 1.14.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/PKG-INFO +7 -14
  2. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/README.md +6 -13
  3. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +8 -9
  4. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/resources/model_prices.json +20 -32
  5. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/pyproject.toml +1 -1
  6. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/usage/main.py +7 -6
  7. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/uv.lock +3 -1
  8. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/.gitattributes +0 -0
  9. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/.github/workflows/python-publish.yml +0 -0
  10. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/.gitignore +0 -0
  11. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/LICENSE +0 -0
  12. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/MANIFEST.in +0 -0
  13. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/__init__.py +0 -0
  14. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/__init__.py +0 -0
  15. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/chat_client.py +0 -0
  16. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/__init__.py +0 -0
  17. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  18. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  19. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  20. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  21. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  22. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  23. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  24. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  25. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  26. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  27. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  28. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  29. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  30. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  31. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  32. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  33. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/implementations/printing_status.py +0 -0
  34. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/model_client/__init__.py +0 -0
  35. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/model_client/claude_client.py +0 -0
  36. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  37. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/client/model_client/openai_client.py +0 -0
  38. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/__init__.py +0 -0
  39. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  40. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  41. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  42. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  43. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  44. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  45. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  46. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  47. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  48. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  49. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  50. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  51. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  52. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/file_fetch.py +0 -0
  53. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  54. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  55. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  56. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  57. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  58. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/logic/model_prices.py +0 -0
  59. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/resources/__init__.py +0 -0
  60. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/__init__.py +0 -0
  61. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/chat_response.py +0 -0
  62. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/message.py +0 -0
  63. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/model_message/__init__.py +0 -0
  64. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/model_message/claude_message.py +0 -0
  65. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  66. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/model_message/openai_message.py +0 -0
  67. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  68. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/llm_bridge/type/serializer.py +0 -0
  69. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/tests/__init__.py +0 -0
  70. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/tests/chat_client_factory_test.py +0 -0
  71. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/tests/message_preprocessor_test.py +0 -0
  72. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/usage/.env.example +0 -0
  73. {llm_bridge-1.14.0a1 → llm_bridge-1.14.1}/usage/workflow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.14.0a1
3
+ Version: 1.14.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -59,12 +59,6 @@ The features listed represent the maximum capabilities of each API type supporte
59
59
  - More features for API Types
60
60
  - Native support for Grok
61
61
 
62
- ## Installation
63
-
64
- ```bash
65
- pip install --upgrade llm_bridge
66
- ```
67
-
68
62
  ## Development
69
63
 
70
64
  ### Python uv
@@ -73,16 +67,15 @@ pip install --upgrade llm_bridge
73
67
  2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
74
68
  3. Configure requirements:
75
69
  ```bash
76
- uv sync
70
+ uv sync --refresh
77
71
  ```
78
72
 
79
- ### Pycharm
80
-
81
- Add New Configuration >> uv run
82
- - script: `./usage/main.py`
83
- - Paths to ".env" files: `./usage/.env`
73
+ ### Pycharm Professional
84
74
 
85
- If uv interpreter is not found, create a new project with uv.
75
+ 1. Add New Interpreter >> Add Local Interpreter
76
+ - Environment: Select existing
77
+ - Type: uv
78
+ 2. Add New Configuration >> uv run >> script: `./usage/main.py`
86
79
 
87
80
  ### Usage
88
81
 
@@ -35,12 +35,6 @@ The features listed represent the maximum capabilities of each API type supporte
35
35
  - More features for API Types
36
36
  - Native support for Grok
37
37
 
38
- ## Installation
39
-
40
- ```bash
41
- pip install --upgrade llm_bridge
42
- ```
43
-
44
38
  ## Development
45
39
 
46
40
  ### Python uv
@@ -49,16 +43,15 @@ pip install --upgrade llm_bridge
49
43
  2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
50
44
  3. Configure requirements:
51
45
  ```bash
52
- uv sync
46
+ uv sync --refresh
53
47
  ```
54
48
 
55
- ### Pycharm
56
-
57
- Add New Configuration >> uv run
58
- - script: `./usage/main.py`
59
- - Paths to ".env" files: `./usage/.env`
49
+ ### Pycharm Professional
60
50
 
61
- If uv interpreter is not found, create a new project with uv.
51
+ 1. Add New Interpreter >> Add Local Interpreter
52
+ - Environment: Select existing
53
+ - Type: uv
54
+ 2. Add New Configuration >> uv run >> script: `./usage/main.py`
62
55
 
63
56
  ### Usage
64
57
 
@@ -65,7 +65,7 @@ async def create_openai_client(
65
65
  tools = []
66
66
  reasoning = None
67
67
 
68
- if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
68
+ if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
69
69
  if code_execution:
70
70
  tools.append(
71
71
  CodeInterpreter(
@@ -73,16 +73,15 @@ async def create_openai_client(
73
73
  container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
74
  )
75
75
  )
76
- if model not in ["gpt-5-chat-latest"]:
77
- tools.append(
78
- WebSearchToolParam(
79
- type="web_search",
80
- search_context_size="high",
81
- )
76
+ tools.append(
77
+ WebSearchToolParam(
78
+ type="web_search",
79
+ search_context_size="high",
82
80
  )
83
- if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
81
+ )
82
+ if re.match(r"gpt-5.*", model):
84
83
  temperature = 1
85
- if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
84
+ if re.match(r"gpt-5.*", model):
86
85
  if thought:
87
86
  reasoning = Reasoning(
88
87
  effort="high",
@@ -7,39 +7,27 @@
7
7
  },
8
8
  {
9
9
  "apiType": "Gemini-Vertex",
10
- "model": "gemini-3-pro-image-preview",
11
- "input": 2,
12
- "output": 120
13
- },
14
- {
15
- "apiType": "Gemini-Vertex",
16
- "model": "gemini-2.5-flash",
10
+ "model": "gemini-3-flash-preview",
17
11
  "input": 1,
18
- "output": 2.5
12
+ "output": 3
19
13
  },
20
14
  {
21
15
  "apiType": "Gemini-Vertex",
22
- "model": "gemini-2.5-pro",
23
- "input": 2.5,
24
- "output": 15
25
- },
26
- {
27
- "apiType": "Gemini-Free",
28
- "model": "gemini-flash-latest",
29
- "input": 0,
30
- "output": 0
16
+ "model": "gemini-3-pro-image-preview",
17
+ "input": 2,
18
+ "output": 120
31
19
  },
32
20
  {
33
21
  "apiType": "Gemini-Free",
34
- "model": "gemini-2.5-flash",
22
+ "model": "gemini-3-flash-preview",
35
23
  "input": 0,
36
24
  "output": 0
37
25
  },
38
26
  {
39
- "apiType": "Gemini-Free",
40
- "model": "gemini-2.5-pro",
41
- "input": 0,
42
- "output": 0
27
+ "apiType": "Gemini-Paid",
28
+ "model": "gemini-3-flash-preview",
29
+ "input": 1,
30
+ "output": 3
43
31
  },
44
32
  {
45
33
  "apiType": "Gemini-Paid",
@@ -60,16 +48,10 @@
60
48
  "output": 2.5
61
49
  },
62
50
  {
63
- "apiType": "Gemini-Paid",
64
- "model": "gemini-2.5-flash",
65
- "input": 1,
66
- "output": 2.5
67
- },
68
- {
69
- "apiType": "Gemini-Paid",
70
- "model": "gemini-2.5-pro",
71
- "input": 2.5,
72
- "output": 15
51
+ "apiType": "OpenAI",
52
+ "model": "gpt-5.2",
53
+ "input": 1.75,
54
+ "output": 14
73
55
  },
74
56
  {
75
57
  "apiType": "OpenAI",
@@ -89,6 +71,12 @@
89
71
  "input": 0.25,
90
72
  "output": 2
91
73
  },
74
+ {
75
+ "apiType": "OpenAI",
76
+ "model": "gpt-5.2-pro",
77
+ "input": 21,
78
+ "output": 168
79
+ },
92
80
  {
93
81
  "apiType": "OpenAI",
94
82
  "model": "gpt-5-pro",
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.14.0-alpha.1"
7
+ version = "1.14.1"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -111,14 +111,14 @@ messages = [
111
111
  # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
112
112
 
113
113
  # Code Execution
114
- # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
114
+ Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
115
115
 
116
116
  # File Output
117
117
  # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1758384216123-script.py"),
118
118
  # Content(type=ContentType.Text, data="Please implement a minimum example of Neural Network in `script.py`"),
119
119
 
120
120
  # Structured Output
121
- Content(type=ContentType.Text, data="Please generate a product."),
121
+ # Content(type=ContentType.Text, data="Please generate a product."),
122
122
  ]
123
123
  ),
124
124
  # Message(
@@ -134,23 +134,24 @@ messages = [
134
134
  # ),
135
135
  ]
136
136
  # See /llm_bridge/resources/model_prices.json for available models
137
+ # model = "gpt-5.2"
137
138
  # model = "gpt-5.1"
138
139
  # model = "gpt-5-pro"
139
140
  # model = "gpt-5"
140
141
  # model = "gpt-4.1"
141
142
  # model = "gemini-3-pro-preview"
142
143
  # model = "gemini-3-pro-image-preview"
143
- # model = "gemini-flash-latest"
144
+ model = "gemini-3-flash-preview"
144
145
  # model = "grok-4-1-fast-reasoning"
145
- model = "claude-sonnet-4-5"
146
+ # model = "claude-sonnet-4-5"
146
147
  # model = "claude-opus-4-5"
147
148
  # api_type = "Gemini-Vertex"
148
149
  # api_type = "Gemini-Free"
149
- # api_type = "Gemini-Paid"
150
+ api_type = "Gemini-Paid"
150
151
  # api_type = "OpenAI"
151
152
  # api_type = "OpenAI-Azure"
152
153
  # api_type = "OpenAI-GitHub"
153
- api_type = "Claude"
154
+ # api_type = "Claude"
154
155
  # api_type = "Grok"
155
156
  temperature = 0
156
157
  stream = True
@@ -357,7 +357,7 @@ wheels = [
357
357
 
358
358
  [[package]]
359
359
  name = "llm-bridge"
360
- version = "1.14.0a1"
360
+ version = "1.14.1"
361
361
  source = { editable = "." }
362
362
  dependencies = [
363
363
  { name = "anthropic" },
@@ -726,6 +726,8 @@ wheels = [
726
726
  { url = "https://files.pythonhosted.org/packages/72/74/448b6172927c829c6a3fba80078d7b0a016ebbe2c9ee528821f5ea21677a/pymupdf-1.26.7-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:31aa9c8377ea1eea02934b92f4dcf79fb2abba0bf41f8a46d64c3e31546a3c02", size = 22470101, upload-time = "2025-12-11T21:47:37.105Z" },
727
727
  { url = "https://files.pythonhosted.org/packages/65/e7/47af26f3ac76be7ac3dd4d6cc7ee105948a8355d774e5ca39857bf91c11c/pymupdf-1.26.7-cp310-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:e419b609996434a14a80fa060adec72c434a1cca6a511ec54db9841bc5d51b3c", size = 23502486, upload-time = "2025-12-12T09:51:25.824Z" },
728
728
  { url = "https://files.pythonhosted.org/packages/2a/6b/3de1714d734ff949be1e90a22375d0598d3540b22ae73eb85c2d7d1f36a9/pymupdf-1.26.7-cp310-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:69dfc78f206a96e5b3ac22741263ebab945fdf51f0dbe7c5757c3511b23d9d72", size = 24115727, upload-time = "2025-12-11T21:47:51.274Z" },
729
+ { url = "https://files.pythonhosted.org/packages/62/9b/f86224847949577a523be2207315ae0fd3155b5d909cd66c274d095349a3/pymupdf-1.26.7-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1d5106f46e1ca0d64d46bd51892372a4f82076bdc14a9678d33d630702abca36", size = 24324386, upload-time = "2025-12-12T14:58:45.483Z" },
730
+ { url = "https://files.pythonhosted.org/packages/85/8e/a117d39092ca645fde8b903f4a941d9aa75b370a67b4f1f435f56393dc5a/pymupdf-1.26.7-cp310-abi3-win32.whl", hash = "sha256:7c9645b6f5452629c747690190350213d3e5bbdb6b2eca227d82702b327f6eee", size = 17203888, upload-time = "2025-12-12T13:59:57.613Z" },
729
731
  { url = "https://files.pythonhosted.org/packages/dd/c3/d0047678146c294469c33bae167c8ace337deafb736b0bf97b9bc481aa65/pymupdf-1.26.7-cp310-abi3-win_amd64.whl", hash = "sha256:425b1befe40d41b72eb0fe211711c7ae334db5eb60307e9dd09066ed060cceba", size = 18405952, upload-time = "2025-12-11T21:48:02.947Z" },
730
732
  ]
731
733
 
File without changes
File without changes
File without changes