LLM-Bridge 1.14.0__tar.gz → 1.14.0a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/PKG-INFO +7 -6
  2. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/README.md +6 -5
  3. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +9 -8
  4. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/resources/model_prices.json +0 -12
  5. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/pyproject.toml +1 -1
  6. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/usage/main.py +5 -6
  7. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/.gitattributes +0 -0
  8. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/.github/workflows/python-publish.yml +0 -0
  9. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/.gitignore +0 -0
  10. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/LICENSE +0 -0
  11. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/MANIFEST.in +0 -0
  12. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/__init__.py +0 -0
  13. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/__init__.py +0 -0
  14. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/chat_client.py +0 -0
  15. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/__init__.py +0 -0
  16. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  17. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  18. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  19. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  20. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  21. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  22. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  23. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  24. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  25. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  26. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  27. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  28. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  29. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  30. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  31. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  46. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/__init__.py +0 -0
  60. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/chat_response.py +0 -0
  61. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/tests/__init__.py +0 -0
  69. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/tests/chat_client_factory_test.py +0 -0
  70. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/tests/message_preprocessor_test.py +0 -0
  71. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/usage/.env.example +0 -0
  72. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/usage/workflow.py +0 -0
  73. {llm_bridge-1.14.0 → llm_bridge-1.14.0a1}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.14.0
3
+ Version: 1.14.0a1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -76,12 +76,13 @@ pip install --upgrade llm_bridge
76
76
  uv sync
77
77
  ```
78
78
 
79
- ### Pycharm Professional
79
+ ### Pycharm
80
80
 
81
- 1. Add New Interpreter >> Add Local Interpreter
82
- - Environment: Select existing
83
- - Type: uv
84
- 2. Add New Configuration >> uv run >> script: `./usage/main.py`
81
+ Add New Configuration >> uv run
82
+ - script: `./usage/main.py`
83
+ - Paths to ".env" files: `./usage/.env`
84
+
85
+ If uv interpreter is not found, create a new project with uv.
85
86
 
86
87
  ### Usage
87
88
 
@@ -52,12 +52,13 @@ pip install --upgrade llm_bridge
52
52
  uv sync
53
53
  ```
54
54
 
55
- ### Pycharm Professional
55
+ ### Pycharm
56
56
 
57
- 1. Add New Interpreter >> Add Local Interpreter
58
- - Environment: Select existing
59
- - Type: uv
60
- 2. Add New Configuration >> uv run >> script: `./usage/main.py`
57
+ Add New Configuration >> uv run
58
+ - script: `./usage/main.py`
59
+ - Paths to ".env" files: `./usage/.env`
60
+
61
+ If uv interpreter is not found, create a new project with uv.
61
62
 
62
63
  ### Usage
63
64
 
@@ -65,7 +65,7 @@ async def create_openai_client(
65
65
  tools = []
66
66
  reasoning = None
67
67
 
68
- if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
68
+ if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
69
69
  if code_execution:
70
70
  tools.append(
71
71
  CodeInterpreter(
@@ -73,15 +73,16 @@ async def create_openai_client(
73
73
  container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
74
  )
75
75
  )
76
- tools.append(
77
- WebSearchToolParam(
78
- type="web_search",
79
- search_context_size="high",
76
+ if model not in ["gpt-5-chat-latest"]:
77
+ tools.append(
78
+ WebSearchToolParam(
79
+ type="web_search",
80
+ search_context_size="high",
81
+ )
80
82
  )
81
- )
82
- if re.match(r"gpt-5.*", model):
83
+ if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
83
84
  temperature = 1
84
- if re.match(r"gpt-5.*", model):
85
+ if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
85
86
  if thought:
86
87
  reasoning = Reasoning(
87
88
  effort="high",
@@ -71,12 +71,6 @@
71
71
  "input": 2.5,
72
72
  "output": 15
73
73
  },
74
- {
75
- "apiType": "OpenAI",
76
- "model": "gpt-5.2",
77
- "input": 1.75,
78
- "output": 14
79
- },
80
74
  {
81
75
  "apiType": "OpenAI",
82
76
  "model": "gpt-5.1",
@@ -95,12 +89,6 @@
95
89
  "input": 0.25,
96
90
  "output": 2
97
91
  },
98
- {
99
- "apiType": "OpenAI",
100
- "model": "gpt-5.2-pro",
101
- "input": 21,
102
- "output": 168
103
- },
104
92
  {
105
93
  "apiType": "OpenAI",
106
94
  "model": "gpt-5-pro",
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.14.0"
7
+ version = "1.14.0-alpha.1"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -111,14 +111,14 @@ messages = [
111
111
  # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
112
112
 
113
113
  # Code Execution
114
- Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
114
+ # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
115
115
 
116
116
  # File Output
117
117
  # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1758384216123-script.py"),
118
118
  # Content(type=ContentType.Text, data="Please implement a minimum example of Neural Network in `script.py`"),
119
119
 
120
120
  # Structured Output
121
- # Content(type=ContentType.Text, data="Please generate a product."),
121
+ Content(type=ContentType.Text, data="Please generate a product."),
122
122
  ]
123
123
  ),
124
124
  # Message(
@@ -134,7 +134,6 @@ messages = [
134
134
  # ),
135
135
  ]
136
136
  # See /llm_bridge/resources/model_prices.json for available models
137
- model = "gpt-5.2"
138
137
  # model = "gpt-5.1"
139
138
  # model = "gpt-5-pro"
140
139
  # model = "gpt-5"
@@ -143,15 +142,15 @@ model = "gpt-5.2"
143
142
  # model = "gemini-3-pro-image-preview"
144
143
  # model = "gemini-flash-latest"
145
144
  # model = "grok-4-1-fast-reasoning"
146
- # model = "claude-sonnet-4-5"
145
+ model = "claude-sonnet-4-5"
147
146
  # model = "claude-opus-4-5"
148
147
  # api_type = "Gemini-Vertex"
149
148
  # api_type = "Gemini-Free"
150
149
  # api_type = "Gemini-Paid"
151
- api_type = "OpenAI"
150
+ # api_type = "OpenAI"
152
151
  # api_type = "OpenAI-Azure"
153
152
  # api_type = "OpenAI-GitHub"
154
- # api_type = "Claude"
153
+ api_type = "Claude"
155
154
  # api_type = "Grok"
156
155
  temperature = 0
157
156
  stream = True
File without changes
File without changes
File without changes
File without changes