LLM-Bridge 1.7.17__tar.gz → 1.7.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.7.17 → llm_bridge-1.7.19/LLM_Bridge.egg-info}/PKG-INFO +9 -3
  2. {llm_bridge-1.7.17/LLM_Bridge.egg-info → llm_bridge-1.7.19}/PKG-INFO +9 -3
  3. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/README.md +8 -2
  4. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/media_processor.py +4 -3
  5. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +1 -1
  6. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +10 -2
  7. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/message_preprocess/file_type_checker.py +2 -1
  8. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/message_preprocess/message_preprocessor.py +1 -1
  9. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/resources/model_prices.json +3 -3
  10. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/pyproject.toml +1 -1
  11. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/LICENSE +0 -0
  12. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  13. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  14. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/LLM_Bridge.egg-info/requires.txt +0 -0
  15. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/LLM_Bridge.egg-info/top_level.txt +0 -0
  16. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/MANIFEST.in +0 -0
  17. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/__init__.py +0 -0
  18. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/__init__.py +0 -0
  19. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/chat_client.py +0 -0
  20. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/__init__.py +0 -0
  21. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  22. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -0
  23. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  24. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  25. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  26. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  27. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  28. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  29. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  30. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  31. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  32. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  33. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  34. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  35. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  36. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  37. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/implementations/printing_status.py +0 -0
  38. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/model_client/__init__.py +0 -0
  39. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/model_client/claude_client.py +0 -0
  40. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/model_client/gemini_client.py +0 -0
  41. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/client/model_client/openai_client.py +0 -0
  42. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/__init__.py +0 -0
  43. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  44. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  45. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  46. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  47. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  48. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  49. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  50. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  51. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  52. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  53. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/file_fetch.py +0 -0
  54. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  55. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  56. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  57. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/__init__.py +0 -0
  60. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/chat_response.py +0 -0
  61. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.7.17 → llm_bridge-1.7.19}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.7.17
3
+ Version: 1.7.19
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -72,9 +72,15 @@ pytest
72
72
 
73
73
  ## Quick Start
74
74
 
75
- See `./usage/`
75
+ ### Setup
76
76
 
77
- ### Workflow
77
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
78
+ 2. Install requirements: `pip install -r requirements.txt`
79
+ 3. In PyCharm, add a new Python configuration:
80
+ - script: `./usage/main.py`
81
+ - Paths to ".env" files: `./usage/.env`
82
+
83
+ ## Workflow
78
84
 
79
85
  ```python
80
86
  from typing import AsyncGenerator
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.7.17
3
+ Version: 1.7.19
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -72,9 +72,15 @@ pytest
72
72
 
73
73
  ## Quick Start
74
74
 
75
- See `./usage/`
75
+ ### Setup
76
76
 
77
- ### Workflow
77
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
78
+ 2. Install requirements: `pip install -r requirements.txt`
79
+ 3. In PyCharm, add a new Python configuration:
80
+ - script: `./usage/main.py`
81
+ - Paths to ".env" files: `./usage/.env`
82
+
83
+ ## Workflow
78
84
 
79
85
  ```python
80
86
  from typing import AsyncGenerator
@@ -42,9 +42,15 @@ pytest
42
42
 
43
43
  ## Quick Start
44
44
 
45
- See `./usage/`
45
+ ### Setup
46
46
 
47
- ### Workflow
47
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
48
+ 2. Install requirements: `pip install -r requirements.txt`
49
+ 3. In PyCharm, add a new Python configuration:
50
+ - script: `./usage/main.py`
51
+ - Paths to ".env" files: `./usage/.env`
52
+
53
+ ## Workflow
48
54
 
49
55
  ```python
50
56
  from typing import AsyncGenerator
@@ -8,10 +8,11 @@ async def get_raw_content_from_url(req_url: str) -> tuple[bytes, str]:
8
8
  return file_data, media_type
9
9
 
10
10
 
11
+ # Base64 Encoded
11
12
  async def get_encoded_content_from_url(req_url: str) -> tuple[str, str]:
12
- img_data, media_type = await get_raw_content_from_url(req_url)
13
- base64_image = base64.b64encode(img_data).decode('utf-8')
14
- return base64_image, media_type
13
+ media_data, media_type = await get_raw_content_from_url(req_url)
14
+ base64_media = base64.b64encode(media_data).decode('utf-8')
15
+ return base64_media, media_type
15
16
 
16
17
 
17
18
  async def get_openai_image_content_from_url(req_img_url: str) -> str:
@@ -44,7 +44,7 @@ async def create_openai_client(
44
44
  else:
45
45
  raise HTTPException(status_code=500, detail="API Type not matched")
46
46
 
47
- if api_type in ("OpenAI", "OpenAI-Azure"):
47
+ if api_type in ("OpenAI", "OpenAI-Azure", "Grok"):
48
48
  use_responses_api = True
49
49
  else:
50
50
  use_responses_api = False
@@ -1,8 +1,8 @@
1
1
  from openai.types.responses import ResponseInputTextParam, ResponseInputImageParam, ResponseOutputTextParam, \
2
- ResponseInputContentParam, EasyInputMessageParam, ResponseOutputMessageParam
2
+ ResponseInputContentParam, EasyInputMessageParam, ResponseOutputMessageParam, ResponseInputFileParam
3
3
 
4
4
  from llm_bridge.logic.chat_generate import media_processor
5
- from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type
5
+ from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_file_name
6
6
  from llm_bridge.type.message import Message, ContentType
7
7
  from llm_bridge.type.model_message.openai_responses_message import OpenAIResponsesMessage
8
8
 
@@ -29,6 +29,14 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
29
29
  detail="auto"
30
30
  )
31
31
  content.append(image_content)
32
+ elif sub_type == "pdf":
33
+ file_data, _ = await media_processor.get_encoded_content_from_url(file_url)
34
+ pdf_content = ResponseInputFileParam(
35
+ type="input_file",
36
+ filename=get_file_name(file_url),
37
+ file_data=f"data:application/pdf;base64,{file_data}",
38
+ )
39
+ content.append(pdf_content)
32
40
  # TODO: Responses API is currently unsupported for audio input
33
41
  # elif file_type == "audio":
34
42
  # encoded_string = await media_processor.get_gpt_audio_content_from_url(file_url)
@@ -41,7 +41,8 @@ async def get_file_type(file_url: str) -> tuple[str, str]:
41
41
  return 'unknown', 'unknown'
42
42
 
43
43
 
44
- def get_file_name(file_url) -> str:
44
+ # Without Timestamp
45
+ def get_file_name(file_url: str) -> str:
45
46
  base_name = os.path.basename(file_url)
46
47
  match = re.search(r'-(.+)', base_name)
47
48
  if match:
@@ -21,7 +21,7 @@ async def extract_text_files_to_message(message: Message, api_type: str) -> None
21
21
  if file_type != "text" and file_type != "application":
22
22
  continue
23
23
 
24
- if sub_type == "pdf" and api_type in ("Gemini-Free", "Gemini-Paid", "Claude"):
24
+ if sub_type == "pdf" and api_type in ("OpenAI", "OpenAI-Azure", "Gemini-Free", "Gemini-Paid", "Claude"):
25
25
  continue
26
26
 
27
27
  filename = get_file_name(file_url)
@@ -17,7 +17,7 @@
17
17
  "input": 1.25,
18
18
  "output": 10
19
19
  },
20
- {
20
+ {
21
21
  "apiType": "OpenAI",
22
22
  "model": "gpt-5",
23
23
  "input": 1.25,
@@ -91,7 +91,7 @@
91
91
  },
92
92
  {
93
93
  "apiType": "Gemini-Free",
94
- "model": "gemini-2.0-flash-preview-image-generation",
94
+ "model": "gemini-2.5-flash-image-preview",
95
95
  "input": 0,
96
96
  "output": 0
97
97
  },
@@ -109,7 +109,7 @@
109
109
  },
110
110
  {
111
111
  "apiType": "Gemini-Paid",
112
- "model": "gemini-2.0-flash-preview-image-generation",
112
+ "model": "gemini-2.5-flash-image-preview",
113
113
  "input": 0.7,
114
114
  "output": 0.4
115
115
  },
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.7.17"
7
+ version = "1.7.19"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
File without changes
File without changes
File without changes