LLM-Bridge 1.12.0a0__tar.gz → 1.12.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1/LLM_Bridge.egg-info}/PKG-INFO +14 -13
  2. {llm_bridge-1.12.0a0/LLM_Bridge.egg-info → llm_bridge-1.12.1}/PKG-INFO +14 -13
  3. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/README.md +13 -12
  4. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +1 -1
  5. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/pyproject.toml +1 -1
  6. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/LICENSE +0 -0
  7. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  8. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  9. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/LLM_Bridge.egg-info/requires.txt +0 -0
  10. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/LLM_Bridge.egg-info/top_level.txt +0 -0
  11. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/MANIFEST.in +0 -0
  12. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/__init__.py +0 -0
  13. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/__init__.py +0 -0
  14. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/chat_client.py +0 -0
  15. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/__init__.py +0 -0
  16. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  17. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  18. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  19. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  20. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  21. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  22. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  23. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  24. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  25. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  26. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  27. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  28. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  29. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  30. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  31. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  46. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/logic/model_prices.py +0 -0
  57. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/resources/__init__.py +0 -0
  58. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/resources/model_prices.json +0 -0
  59. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/__init__.py +0 -0
  60. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/chat_response.py +0 -0
  61. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.12.0a0 → llm_bridge-1.12.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.0a0
3
+ Version: 1.12.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -30,7 +30,7 @@ Dynamic: license-file
30
30
 
31
31
  # LLM Bridge
32
32
 
33
- LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI, OpenAI-Azure, OpenAI-GitHub, Gemini, Claude, and Grok.
33
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
34
34
 
35
35
  GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
36
36
 
@@ -39,30 +39,31 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
39
39
  ## Workflow and Features
40
40
 
41
41
  1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
42
- 2. **Chat Client Factory**: create a client for the specific LLM API with model parameters
43
- 1. **Model Message Converter**: convert general messages to model messages
44
- 1. **Media Processor**: converts media (Image, Audio, Video, PDF) which are natively supported by the target model into compatible formats.
42
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
43
+ 1. **Model Message Converter**: converts general messages to model messages
44
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
45
45
  3. **Chat Client**: generate stream or non-stream responses
46
- 1. **Model Thoughts**: captures and formats the model's thinking process
47
- 2. **Search Citations**: extracts and formats citations from search results
48
- 3. **Token Counter**: tracks and reports input and output token usage
46
+ - **Model Thoughts**: captures and formats the model's thinking process
47
+ - **Code Execution**: auto generate and execute Python code
48
+ - **Web Search + Citations**: extracts and formats citations from search results
49
+ - **Token Counter**: tracks and reports input and output token usage
49
50
 
50
- ### Model Features
51
+ ### Supported Features for API Types
51
52
 
52
53
  The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
53
54
 
54
55
  | API Type | Input Format | Capabilities | Output Format |
55
56
  |----------|--------------------------------|--------------------------------------------------|-------------------|
56
- | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
57
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
57
58
  | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
58
59
  | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
59
60
  | Grok | Text, Image | | Text |
60
61
 
61
62
  #### Planned Features
62
63
 
63
- - OpenAI: Web Search: Citations, Image Output
64
- - Gemini: Code Execution: Code, Code Output
65
- - Claude: Code Execution, File Output
64
+ - Structured Output
65
+ - More features for API Types
66
+ - Native support for Grok
66
67
 
67
68
  ## Installation
68
69
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.0a0
3
+ Version: 1.12.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -30,7 +30,7 @@ Dynamic: license-file
30
30
 
31
31
  # LLM Bridge
32
32
 
33
- LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI, OpenAI-Azure, OpenAI-GitHub, Gemini, Claude, and Grok.
33
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
34
34
 
35
35
  GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
36
36
 
@@ -39,30 +39,31 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
39
39
  ## Workflow and Features
40
40
 
41
41
  1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
42
- 2. **Chat Client Factory**: create a client for the specific LLM API with model parameters
43
- 1. **Model Message Converter**: convert general messages to model messages
44
- 1. **Media Processor**: converts media (Image, Audio, Video, PDF) which are natively supported by the target model into compatible formats.
42
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
43
+ 1. **Model Message Converter**: converts general messages to model messages
44
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
45
45
  3. **Chat Client**: generate stream or non-stream responses
46
- 1. **Model Thoughts**: captures and formats the model's thinking process
47
- 2. **Search Citations**: extracts and formats citations from search results
48
- 3. **Token Counter**: tracks and reports input and output token usage
46
+ - **Model Thoughts**: captures and formats the model's thinking process
47
+ - **Code Execution**: auto generate and execute Python code
48
+ - **Web Search + Citations**: extracts and formats citations from search results
49
+ - **Token Counter**: tracks and reports input and output token usage
49
50
 
50
- ### Model Features
51
+ ### Supported Features for API Types
51
52
 
52
53
  The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
53
54
 
54
55
  | API Type | Input Format | Capabilities | Output Format |
55
56
  |----------|--------------------------------|--------------------------------------------------|-------------------|
56
- | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
57
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
57
58
  | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
58
59
  | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
59
60
  | Grok | Text, Image | | Text |
60
61
 
61
62
  #### Planned Features
62
63
 
63
- - OpenAI: Web Search: Citations, Image Output
64
- - Gemini: Code Execution: Code, Code Output
65
- - Claude: Code Execution, File Output
64
+ - Structured Output
65
+ - More features for API Types
66
+ - Native support for Grok
66
67
 
67
68
  ## Installation
68
69
 
@@ -1,6 +1,6 @@
1
1
  # LLM Bridge
2
2
 
3
- LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI, OpenAI-Azure, OpenAI-GitHub, Gemini, Claude, and Grok.
3
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
4
4
 
5
5
  GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
6
6
 
@@ -9,30 +9,31 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
9
9
  ## Workflow and Features
10
10
 
11
11
  1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
12
- 2. **Chat Client Factory**: create a client for the specific LLM API with model parameters
13
- 1. **Model Message Converter**: convert general messages to model messages
14
- 1. **Media Processor**: converts media (Image, Audio, Video, PDF) which are natively supported by the target model into compatible formats.
12
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
13
+ 1. **Model Message Converter**: converts general messages to model messages
14
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
15
15
  3. **Chat Client**: generate stream or non-stream responses
16
- 1. **Model Thoughts**: captures and formats the model's thinking process
17
- 2. **Search Citations**: extracts and formats citations from search results
18
- 3. **Token Counter**: tracks and reports input and output token usage
16
+ - **Model Thoughts**: captures and formats the model's thinking process
17
+ - **Code Execution**: auto generate and execute Python code
18
+ - **Web Search + Citations**: extracts and formats citations from search results
19
+ - **Token Counter**: tracks and reports input and output token usage
19
20
 
20
- ### Model Features
21
+ ### Supported Features for API Types
21
22
 
22
23
  The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
23
24
 
24
25
  | API Type | Input Format | Capabilities | Output Format |
25
26
  |----------|--------------------------------|--------------------------------------------------|-------------------|
26
- | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
27
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
27
28
  | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
28
29
  | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
29
30
  | Grok | Text, Image | | Text |
30
31
 
31
32
  #### Planned Features
32
33
 
33
- - OpenAI: Web Search: Citations, Image Output
34
- - Gemini: Code Execution: Code, Code Output
35
- - Claude: Code Execution, File Output
34
+ - Structured Output
35
+ - More features for API Types
36
+ - Native support for Grok
36
37
 
37
38
  ## Installation
38
39
 
@@ -21,7 +21,7 @@ async def extract_text_files_to_message(message: Message, api_type: str) -> None
21
21
  if file_type != "text" and file_type != "application":
22
22
  continue
23
23
 
24
- if sub_type == "pdf" and api_type in ("OpenAI", "OpenAI-Azure", "Gemini-Free", "Gemini-Paid", "Claude"):
24
+ if sub_type == "pdf" and api_type in ("OpenAI", "OpenAI-Azure", "Gemini-Vertex", "Gemini-Free", "Gemini-Paid", "Claude"):
25
25
  continue
26
26
 
27
27
  filename = get_file_name(file_url)
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.12.0-alpha.0"
7
+ version = "1.12.1"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
File without changes
File without changes
File without changes