LLM-Bridge 1.11.9__tar.gz → 1.12.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. llm_bridge-1.12.0/LLM_Bridge.egg-info/PKG-INFO +88 -0
  2. llm_bridge-1.12.0/PKG-INFO +88 -0
  3. llm_bridge-1.12.0/README.md +58 -0
  4. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +21 -10
  5. llm_bridge-1.12.0/llm_bridge/client/implementations/claude/stream_claude_client.py +71 -0
  6. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/chat_client_factory.py +32 -14
  7. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +19 -12
  8. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +17 -26
  9. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +15 -11
  10. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/pyproject.toml +1 -1
  11. llm_bridge-1.11.9/LLM_Bridge.egg-info/PKG-INFO +0 -231
  12. llm_bridge-1.11.9/PKG-INFO +0 -231
  13. llm_bridge-1.11.9/README.md +0 -201
  14. llm_bridge-1.11.9/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -53
  15. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/LICENSE +0 -0
  16. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  17. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  18. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/LLM_Bridge.egg-info/requires.txt +0 -0
  19. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/LLM_Bridge.egg-info/top_level.txt +0 -0
  20. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/MANIFEST.in +0 -0
  21. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/__init__.py +0 -0
  22. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/__init__.py +0 -0
  23. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/chat_client.py +0 -0
  24. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/__init__.py +0 -0
  25. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  26. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  27. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  28. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  29. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  30. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  31. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  32. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  33. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  34. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  35. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  36. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  37. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  38. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  39. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/implementations/printing_status.py +0 -0
  40. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/model_client/__init__.py +0 -0
  41. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/model_client/claude_client.py +2 -2
  42. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  43. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/client/model_client/openai_client.py +0 -0
  44. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/__init__.py +0 -0
  45. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  46. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  47. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  48. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  49. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  50. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  51. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  52. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  53. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  54. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/file_fetch.py +0 -0
  55. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  56. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  57. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  58. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  59. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  60. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/logic/model_prices.py +0 -0
  61. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/resources/__init__.py +0 -0
  62. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/resources/model_prices.json +0 -0
  63. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/__init__.py +0 -0
  64. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/chat_response.py +0 -0
  65. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/message.py +0 -0
  66. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/model_message/__init__.py +0 -0
  67. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/model_message/claude_message.py +0 -0
  68. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  69. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/model_message/openai_message.py +0 -0
  70. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  71. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/llm_bridge/type/serializer.py +0 -0
  72. {llm_bridge-1.11.9 → llm_bridge-1.12.0}/setup.cfg +0 -0
@@ -0,0 +1,88 @@
1
+ Metadata-Version: 2.4
2
+ Name: LLM-Bridge
3
+ Version: 1.12.0
4
+ Summary: A Bridge for LLMs
5
+ Author-email: windsnow1025 <windsnow1025@gmail.com>
6
+ License-Expression: MIT
7
+ Keywords: llm,ai
8
+ Classifier: Framework :: FastAPI
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: fastapi
14
+ Requires-Dist: httpx
15
+ Requires-Dist: tenacity
16
+ Requires-Dist: openai==1.106.1
17
+ Requires-Dist: tiktoken==0.11.0
18
+ Requires-Dist: google-genai==1.46.0
19
+ Requires-Dist: anthropic==0.66.0
20
+ Requires-Dist: PyMuPDF
21
+ Requires-Dist: docxlatex>=1.1.1
22
+ Requires-Dist: openpyxl
23
+ Requires-Dist: python-pptx
24
+ Provides-Extra: test
25
+ Requires-Dist: pytest; extra == "test"
26
+ Requires-Dist: pytest-asyncio; extra == "test"
27
+ Requires-Dist: python-dotenv; extra == "test"
28
+ Requires-Dist: protobuf; extra == "test"
29
+ Dynamic: license-file
30
+
31
+ # LLM Bridge
32
+
33
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
34
+
35
+ GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
36
+
37
+ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)
38
+
39
+ ## Workflow and Features
40
+
41
+ 1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
42
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
43
+ 1. **Model Message Converter**: converts general messages to model messages
44
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
45
+ 3. **Chat Client**: generate stream or non-stream responses
46
+ - **Model Thoughts**: captures and formats the model's thinking process
47
+ - **Code Execution**: auto generate and execute Python code
48
+ - **Web Search + Citations**: extracts and formats citations from search results
49
+ - **Token Counter**: tracks and reports input and output token usage
50
+
51
+ ### Supported Features for API Types
52
+
53
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
54
+
55
+ | API Type | Input Format | Capabilities | Output Format |
56
+ |----------|--------------------------------|--------------------------------------------------|-------------------|
57
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
58
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
59
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
60
+ | Grok | Text, Image | | Text |
61
+
62
+ #### Planned Features
63
+
64
+ - Structured Output
65
+ - More features for API Types
66
+ - Native support for Grok
67
+
68
+ ## Installation
69
+
70
+ ```bash
71
+ pip install --upgrade llm_bridge
72
+ ```
73
+
74
+ ## Test
75
+
76
+ ```bash
77
+ pytest
78
+ ```
79
+
80
+ ## Quick Start
81
+
82
+ ### Setup
83
+
84
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
85
+ 2. Install requirements: `pip install -r requirements.txt`
86
+ 3. In PyCharm, add a new Python configuration:
87
+ - script: `./usage/main.py`
88
+ - Paths to ".env" files: `./usage/.env`
@@ -0,0 +1,88 @@
1
+ Metadata-Version: 2.4
2
+ Name: LLM-Bridge
3
+ Version: 1.12.0
4
+ Summary: A Bridge for LLMs
5
+ Author-email: windsnow1025 <windsnow1025@gmail.com>
6
+ License-Expression: MIT
7
+ Keywords: llm,ai
8
+ Classifier: Framework :: FastAPI
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: fastapi
14
+ Requires-Dist: httpx
15
+ Requires-Dist: tenacity
16
+ Requires-Dist: openai==1.106.1
17
+ Requires-Dist: tiktoken==0.11.0
18
+ Requires-Dist: google-genai==1.46.0
19
+ Requires-Dist: anthropic==0.66.0
20
+ Requires-Dist: PyMuPDF
21
+ Requires-Dist: docxlatex>=1.1.1
22
+ Requires-Dist: openpyxl
23
+ Requires-Dist: python-pptx
24
+ Provides-Extra: test
25
+ Requires-Dist: pytest; extra == "test"
26
+ Requires-Dist: pytest-asyncio; extra == "test"
27
+ Requires-Dist: python-dotenv; extra == "test"
28
+ Requires-Dist: protobuf; extra == "test"
29
+ Dynamic: license-file
30
+
31
+ # LLM Bridge
32
+
33
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
34
+
35
+ GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
36
+
37
+ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)
38
+
39
+ ## Workflow and Features
40
+
41
+ 1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
42
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
43
+ 1. **Model Message Converter**: converts general messages to model messages
44
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
45
+ 3. **Chat Client**: generate stream or non-stream responses
46
+ - **Model Thoughts**: captures and formats the model's thinking process
47
+ - **Code Execution**: auto generate and execute Python code
48
+ - **Web Search + Citations**: extracts and formats citations from search results
49
+ - **Token Counter**: tracks and reports input and output token usage
50
+
51
+ ### Supported Features for API Types
52
+
53
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
54
+
55
+ | API Type | Input Format | Capabilities | Output Format |
56
+ |----------|--------------------------------|--------------------------------------------------|-------------------|
57
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
58
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
59
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
60
+ | Grok | Text, Image | | Text |
61
+
62
+ #### Planned Features
63
+
64
+ - Structured Output
65
+ - More features for API Types
66
+ - Native support for Grok
67
+
68
+ ## Installation
69
+
70
+ ```bash
71
+ pip install --upgrade llm_bridge
72
+ ```
73
+
74
+ ## Test
75
+
76
+ ```bash
77
+ pytest
78
+ ```
79
+
80
+ ## Quick Start
81
+
82
+ ### Setup
83
+
84
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
85
+ 2. Install requirements: `pip install -r requirements.txt`
86
+ 3. In PyCharm, add a new Python configuration:
87
+ - script: `./usage/main.py`
88
+ - Paths to ".env" files: `./usage/.env`
@@ -0,0 +1,58 @@
1
+ # LLM Bridge
2
+
3
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
4
+
5
+ GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
6
+
7
+ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)
8
+
9
+ ## Workflow and Features
10
+
11
+ 1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
12
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
13
+ 1. **Model Message Converter**: converts general messages to model messages
14
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
15
+ 3. **Chat Client**: generate stream or non-stream responses
16
+ - **Model Thoughts**: captures and formats the model's thinking process
17
+ - **Code Execution**: auto generate and execute Python code
18
+ - **Web Search + Citations**: extracts and formats citations from search results
19
+ - **Token Counter**: tracks and reports input and output token usage
20
+
21
+ ### Supported Features for API Types
22
+
23
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
24
+
25
+ | API Type | Input Format | Capabilities | Output Format |
26
+ |----------|--------------------------------|--------------------------------------------------|-------------------|
27
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
28
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
29
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
30
+ | Grok | Text, Image | | Text |
31
+
32
+ #### Planned Features
33
+
34
+ - Structured Output
35
+ - More features for API Types
36
+ - Native support for Grok
37
+
38
+ ## Installation
39
+
40
+ ```bash
41
+ pip install --upgrade llm_bridge
42
+ ```
43
+
44
+ ## Test
45
+
46
+ ```bash
47
+ pytest
48
+ ```
49
+
50
+ ## Quick Start
51
+
52
+ ### Setup
53
+
54
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
55
+ 2. Install requirements: `pip install -r requirements.txt`
56
+ 3. In PyCharm, add a new Python configuration:
57
+ - script: `./usage/main.py`
58
+ - Paths to ".env" files: `./usage/.env`
@@ -58,16 +58,27 @@ class NonStreamClaudeClient(ClaudeClient):
58
58
  try:
59
59
  logging.info(f"messages: {self.messages}")
60
60
 
61
- message = await self.client.beta.messages.create(
62
- model=self.model,
63
- max_tokens=self.max_tokens,
64
- temperature=self.temperature,
65
- system=self.system,
66
- messages=serialize(self.messages),
67
- thinking=self.thinking,
68
- betas=self.betas,
69
- tools=self.tools,
70
- )
61
+ if self.thinking:
62
+ message = await self.client.beta.messages.create(
63
+ model=self.model,
64
+ max_tokens=self.max_tokens,
65
+ temperature=self.temperature,
66
+ system=self.system,
67
+ messages=serialize(self.messages),
68
+ betas=self.betas,
69
+ tools=self.tools,
70
+ thinking=self.thinking,
71
+ )
72
+ else:
73
+ message = await self.client.beta.messages.create(
74
+ model=self.model,
75
+ max_tokens=self.max_tokens,
76
+ temperature=self.temperature,
77
+ system=self.system,
78
+ messages=serialize(self.messages),
79
+ betas=self.betas,
80
+ tools=self.tools,
81
+ )
71
82
 
72
83
  return await process_claude_non_stream_response(
73
84
  message=message,
@@ -0,0 +1,71 @@
1
+ import logging
2
+ import re
3
+ from typing import AsyncGenerator
4
+
5
+ import httpx
6
+ from fastapi import HTTPException
7
+
8
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_stream_response
9
+ from llm_bridge.client.model_client.claude_client import ClaudeClient
10
+ from llm_bridge.type.chat_response import ChatResponse
11
+ from llm_bridge.type.serializer import serialize
12
+
13
+ class StreamClaudeClient(ClaudeClient):
14
+ async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
15
+ try:
16
+ logging.info(f"messages: {self.messages}")
17
+
18
+ try:
19
+ if self.thinking:
20
+ async with self.client.beta.messages.stream(
21
+ model=self.model,
22
+ max_tokens=self.max_tokens,
23
+ temperature=self.temperature,
24
+ system=self.system,
25
+ messages=serialize(self.messages),
26
+ betas=self.betas,
27
+ tools=self.tools,
28
+ thinking=self.thinking,
29
+ ) as stream:
30
+ async for event in stream:
31
+ yield await process_claude_stream_response(
32
+ event=event,
33
+ input_tokens=self.input_tokens,
34
+ client=self.client,
35
+ model=self.model,
36
+ )
37
+ else:
38
+ async with self.client.beta.messages.stream(
39
+ model=self.model,
40
+ max_tokens=self.max_tokens,
41
+ temperature=self.temperature,
42
+ system=self.system,
43
+ messages=serialize(self.messages),
44
+ betas=self.betas,
45
+ tools=self.tools,
46
+ ) as stream:
47
+ async for event in stream:
48
+ yield await process_claude_stream_response(
49
+ event=event,
50
+ input_tokens=self.input_tokens,
51
+ client=self.client,
52
+ model=self.model,
53
+ )
54
+
55
+ except Exception as e:
56
+ logging.exception(e)
57
+ yield ChatResponse(error=repr(e))
58
+
59
+ except httpx.HTTPStatusError as e:
60
+ status_code = e.response.status_code
61
+ text = e.response.text
62
+ raise HTTPException(status_code=status_code, detail=text)
63
+ except Exception as e:
64
+ logging.exception(e)
65
+ match = re.search(r'\d{3}', str(e))
66
+ if match:
67
+ error_code = int(match.group(0))
68
+ else:
69
+ error_code = 500
70
+
71
+ raise HTTPException(status_code=error_code, detail=str(e))
@@ -8,86 +8,104 @@ from llm_bridge.type.message import Message
8
8
 
9
9
 
10
10
  async def create_chat_client(
11
+ api_keys: dict,
11
12
  messages: list[Message],
12
13
  model: str,
13
14
  api_type: str,
14
15
  temperature: float,
15
16
  stream: bool,
16
- api_keys: dict
17
+ thought: bool,
18
+ code_execution: bool,
17
19
  ) -> ChatClient:
18
20
  if api_type == 'OpenAI':
19
21
  return await create_openai_client(
22
+ api_keys={"OPENAI_API_KEY": api_keys["OPENAI_API_KEY"]},
20
23
  messages=messages,
21
24
  model=model,
22
25
  api_type=api_type,
23
26
  temperature=temperature,
24
27
  stream=stream,
25
- api_keys={"OPENAI_API_KEY": api_keys["OPENAI_API_KEY"]}
28
+ thought=thought,
29
+ code_execution=code_execution,
26
30
  )
27
31
  elif api_type == 'OpenAI-Azure':
28
32
  return await create_openai_client(
33
+ api_keys={
34
+ "AZURE_API_KEY": api_keys["AZURE_API_KEY"],
35
+ "AZURE_API_BASE": api_keys["AZURE_API_BASE"]
36
+ },
29
37
  messages=messages,
30
38
  model=model,
31
39
  api_type=api_type,
32
40
  temperature=temperature,
33
41
  stream=stream,
34
- api_keys={
35
- "AZURE_API_KEY": api_keys["AZURE_API_KEY"],
36
- "AZURE_API_BASE": api_keys["AZURE_API_BASE"]
37
- }
42
+ thought=thought,
43
+ code_execution=code_execution,
38
44
  )
39
45
  elif api_type == 'OpenAI-GitHub':
40
46
  return await create_openai_client(
47
+ api_keys={"GITHUB_API_KEY": api_keys["GITHUB_API_KEY"]},
41
48
  messages=messages,
42
49
  model=model,
43
50
  api_type=api_type,
44
51
  temperature=temperature,
45
52
  stream=stream,
46
- api_keys={"GITHUB_API_KEY": api_keys["GITHUB_API_KEY"]}
53
+ thought=thought,
54
+ code_execution=code_execution,
47
55
  )
48
56
  elif api_type == 'Grok':
49
57
  return await create_openai_client(
58
+ api_keys={"XAI_API_KEY": api_keys["XAI_API_KEY"]},
50
59
  messages=messages,
51
60
  model=model,
52
61
  api_type=api_type,
53
62
  temperature=temperature,
54
63
  stream=stream,
55
- api_keys={"XAI_API_KEY": api_keys["XAI_API_KEY"]}
64
+ thought=thought,
65
+ code_execution=code_execution,
56
66
  )
57
67
  elif api_type == 'Gemini-Free':
58
68
  return await create_gemini_client(
69
+ api_key=api_keys["GEMINI_FREE_API_KEY"],
70
+ vertexai=False,
59
71
  messages=messages,
60
72
  model=model,
61
73
  temperature=temperature,
62
74
  stream=stream,
63
- api_key=api_keys["GEMINI_FREE_API_KEY"],
64
- vertexai=False,
75
+ thought=thought,
76
+ code_execution=code_execution,
65
77
  )
66
78
  elif api_type == 'Gemini-Paid':
67
79
  return await create_gemini_client(
80
+ api_key=api_keys["GEMINI_PAID_API_KEY"],
81
+ vertexai=False,
68
82
  messages=messages,
69
83
  model=model,
70
84
  temperature=temperature,
71
85
  stream=stream,
72
- api_key=api_keys["GEMINI_PAID_API_KEY"],
73
- vertexai=False,
86
+ thought=thought,
87
+ code_execution=code_execution,
74
88
  )
75
89
  elif api_type == 'Gemini-Vertex':
76
90
  return await create_gemini_client(
91
+ api_key=api_keys["GEMINI_VERTEX_API_KEY"],
77
92
  messages=messages,
78
93
  model=model,
79
94
  temperature=temperature,
80
95
  stream=stream,
81
- api_key=api_keys["GEMINI_VERTEX_API_KEY"],
96
+ thought=thought,
97
+ code_execution=code_execution,
82
98
  vertexai=True,
83
99
  )
84
100
  elif api_type == 'Claude':
85
101
  return await create_claude_client(
102
+ api_key=api_keys["ANTHROPIC_API_KEY"],
86
103
  messages=messages,
87
104
  model=model,
88
105
  temperature=temperature,
89
106
  stream=stream,
90
- api_key=api_keys["ANTHROPIC_API_KEY"]
107
+ thought=thought,
108
+ code_execution=code_execution,
91
109
  )
92
110
  else:
93
111
  raise HTTPException(status_code=400, detail="Invalid API type")
@@ -11,11 +11,13 @@ from llm_bridge.type.message import Message
11
11
 
12
12
 
13
13
  async def create_claude_client(
14
+ api_key: str,
14
15
  messages: list[Message],
15
16
  model: str,
16
17
  temperature: float,
17
18
  stream: bool,
18
- api_key: str,
19
+ thought: bool,
20
+ code_execution: bool,
19
21
  ):
20
22
  client = anthropic.AsyncAnthropic(
21
23
  api_key=api_key,
@@ -38,11 +40,13 @@ async def create_claude_client(
38
40
  32_000, # Max output: Claude 4.5 64K; Claude 4.1 32K
39
41
  200_000 - input_tokens, # Context window: Claude Sonnet 4.5 beta: 1M; otherwise 200K
40
42
  )
41
- thinking = ThinkingConfigEnabledParam(
42
- type="enabled",
43
- budget_tokens=max(1024, max_tokens // 2), # Minimum budget tokens: 1024
44
- )
45
- temperature = 1
43
+ thinking = None
44
+ if thought:
45
+ thinking = ThinkingConfigEnabledParam(
46
+ type="enabled",
47
+ budget_tokens=max(1024, max_tokens // 2), # Minimum budget tokens: 1024
48
+ )
49
+ temperature = 1
46
50
  betas: list[AnthropicBetaParam] = [
47
51
  "context-1m-2025-08-07",
48
52
  "output-128k-2025-02-19",
@@ -53,11 +57,14 @@ async def create_claude_client(
53
57
  type="web_search_20250305",
54
58
  name="web_search",
55
59
  ),
56
- BetaCodeExecutionTool20250825Param(
57
- type="code_execution_20250825",
58
- name="code_execution",
59
- )
60
60
  ]
61
+ if code_execution:
62
+ tools.append(
63
+ BetaCodeExecutionTool20250825Param(
64
+ type="code_execution_20250825",
65
+ name="code_execution",
66
+ )
67
+ )
61
68
 
62
69
  if stream:
63
70
  return StreamClaudeClient(
@@ -67,10 +74,10 @@ async def create_claude_client(
67
74
  system=system,
68
75
  client=client,
69
76
  max_tokens=max_tokens,
70
- thinking=thinking,
71
77
  betas=betas,
72
78
  input_tokens=input_tokens,
73
79
  tools=tools,
80
+ thinking=thinking,
74
81
  )
75
82
  else:
76
83
  return NonStreamClaudeClient(
@@ -80,8 +87,8 @@ async def create_claude_client(
80
87
  system=system,
81
88
  client=client,
82
89
  max_tokens=max_tokens,
83
- thinking=thinking,
84
90
  betas=betas,
85
91
  input_tokens=input_tokens,
86
92
  tools=tools,
93
+ thinking=thinking,
87
94
  )
@@ -1,6 +1,5 @@
1
1
  from google import genai
2
2
  from google.genai import types
3
- from google.genai._api_client import HttpOptions
4
3
  from google.genai.types import Modality
5
4
 
6
5
  from llm_bridge.client.implementations.gemini.non_stream_gemini_client import NonStreamGeminiClient
@@ -11,25 +10,26 @@ from llm_bridge.type.message import Message
11
10
 
12
11
 
13
12
  async def create_gemini_client(
13
+ api_key: str,
14
+ vertexai: bool,
14
15
  messages: list[Message],
15
16
  model: str,
16
17
  temperature: float,
17
18
  stream: bool,
18
- api_key: str,
19
- vertexai: bool,
19
+ thought: bool,
20
+ code_execution: bool,
20
21
  ):
21
22
  client = genai.Client(
22
23
  vertexai=vertexai,
23
24
  api_key=api_key,
24
25
  )
25
26
 
26
- system_instruction = None
27
+ system_instruction = extract_system_messages(messages) or " "
27
28
  tools = []
28
29
  thinking_config = None
29
30
  response_modalities = [Modality.TEXT]
30
31
 
31
- system_instruction = extract_system_messages(messages) or " "
32
- if "image" not in model and not vertexai:
32
+ if "image" not in model:
33
33
  tools.append(
34
34
  types.Tool(
35
35
  google_search=types.GoogleSearch()
@@ -40,27 +40,18 @@ async def create_gemini_client(
40
40
  url_context=types.UrlContext()
41
41
  )
42
42
  )
43
- tools.append(
44
- types.Tool(
45
- code_execution=types.ToolCodeExecution()
46
- )
47
- )
48
- if "image" not in model and vertexai:
49
- tools.append(
50
- types.Tool(
51
- google_search=types.GoogleSearch()
43
+ if thought:
44
+ thinking_config = types.ThinkingConfig(
45
+ include_thoughts=True,
46
+ thinking_budget=-1,
52
47
  )
53
- )
54
- tools.append(
55
- types.Tool(
56
- url_context=types.UrlContext()
57
- )
58
- )
59
- if "image" not in model:
60
- thinking_config = types.ThinkingConfig(
61
- include_thoughts=True,
62
- thinking_budget=-1,
63
- )
48
+ if not vertexai:
49
+ if code_execution:
50
+ tools.append(
51
+ types.Tool(
52
+ code_execution=types.ToolCodeExecution()
53
+ )
54
+ )
64
55
  if "image" in model:
65
56
  response_modalities = [Modality.TEXT, Modality.IMAGE]
66
57
 
@@ -17,12 +17,14 @@ from llm_bridge.type.message import Message
17
17
 
18
18
 
19
19
  async def create_openai_client(
20
+ api_keys: dict,
20
21
  messages: list[Message],
21
22
  model: str,
22
23
  api_type: str,
23
24
  temperature: float,
24
25
  stream: bool,
25
- api_keys: dict
26
+ thought: bool,
27
+ code_execution: bool,
26
28
  ):
27
29
  if api_type == "OpenAI":
28
30
  client = openai.AsyncOpenAI(
@@ -47,7 +49,7 @@ async def create_openai_client(
47
49
  else:
48
50
  raise HTTPException(status_code=500, detail="API Type not matched")
49
51
 
50
- if api_type in ("OpenAI", "OpenAI-Azure", "Grok"):
52
+ if api_type in ("OpenAI", "OpenAI-Azure"):
51
53
  use_responses_api = True
52
54
  else:
53
55
  use_responses_api = False
@@ -61,12 +63,13 @@ async def create_openai_client(
61
63
  reasoning = None
62
64
 
63
65
  if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
64
- tools.append(
65
- CodeInterpreter(
66
- type="code_interpreter",
67
- container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
66
+ if code_execution:
67
+ tools.append(
68
+ CodeInterpreter(
69
+ type="code_interpreter",
70
+ container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
71
+ )
68
72
  )
69
- )
70
73
  if model not in ["gpt-5-chat-latest"]:
71
74
  tools.append(
72
75
  WebSearchToolParam(
@@ -77,10 +80,11 @@ async def create_openai_client(
77
80
  if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
78
81
  temperature = 1
79
82
  if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
80
- reasoning = Reasoning(
81
- effort="high",
82
- summary="auto",
83
- )
83
+ if thought:
84
+ reasoning = Reasoning(
85
+ effort="high",
86
+ summary="auto",
87
+ )
84
88
  tools.append(
85
89
  ImageGeneration(
86
90
  type="image_generation",