LLM-Bridge 1.12.2__tar.gz → 1.12.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.12.2 → llm_bridge-1.12.4/LLM_Bridge.egg-info}/PKG-INFO +1 -1
  2. {llm_bridge-1.12.2/LLM_Bridge.egg-info → llm_bridge-1.12.4}/PKG-INFO +1 -1
  3. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +12 -11
  4. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/resources/model_prices.json +12 -18
  5. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/pyproject.toml +1 -1
  6. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/LICENSE +0 -0
  7. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  8. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  9. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/LLM_Bridge.egg-info/requires.txt +0 -0
  10. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/LLM_Bridge.egg-info/top_level.txt +0 -0
  11. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/MANIFEST.in +0 -0
  12. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/README.md +0 -0
  13. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/__init__.py +0 -0
  14. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/__init__.py +0 -0
  15. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/chat_client.py +0 -0
  16. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/__init__.py +0 -0
  17. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  18. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  19. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  20. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  21. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  22. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  23. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  24. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  25. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  26. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  27. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  28. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  29. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  30. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  31. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  32. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  33. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/implementations/printing_status.py +0 -0
  34. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/model_client/__init__.py +0 -0
  35. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/model_client/claude_client.py +0 -0
  36. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/model_client/gemini_client.py +0 -0
  37. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/client/model_client/openai_client.py +0 -0
  38. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/__init__.py +0 -0
  39. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  40. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  41. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  42. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  43. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  44. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  45. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  46. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/__init__.py +0 -0
  60. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/chat_response.py +0 -0
  61. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.12.2 → llm_bridge-1.12.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.2
3
+ Version: 1.12.4
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.2
3
+ Version: 1.12.4
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  from google import genai
2
2
  from google.genai import types
3
- from google.genai.types import Modality
3
+ from google.genai.types import Modality, HttpOptions, MediaResolution
4
4
 
5
5
  from llm_bridge.client.implementations.gemini.non_stream_gemini_client import NonStreamGeminiClient
6
6
  from llm_bridge.client.implementations.gemini.stream_gemini_client import StreamGeminiClient
@@ -29,22 +29,22 @@ async def create_gemini_client(
29
29
  thinking_config = None
30
30
  response_modalities = [Modality.TEXT]
31
31
 
32
- if "image" not in model:
33
- tools.append(
34
- types.Tool(
35
- google_search=types.GoogleSearch()
36
- )
32
+ tools.append(
33
+ types.Tool(
34
+ google_search=types.GoogleSearch()
37
35
  )
36
+ )
37
+ if thought:
38
+ thinking_config = types.ThinkingConfig(
39
+ include_thoughts=True,
40
+ thinking_budget=-1,
41
+ )
42
+ if "image" not in model:
38
43
  tools.append(
39
44
  types.Tool(
40
45
  url_context=types.UrlContext()
41
46
  )
42
47
  )
43
- if thought:
44
- thinking_config = types.ThinkingConfig(
45
- include_thoughts=True,
46
- thinking_budget=-1,
47
- )
48
48
  if not vertexai:
49
49
  if code_execution:
50
50
  tools.append(
@@ -58,6 +58,7 @@ async def create_gemini_client(
58
58
  config = types.GenerateContentConfig(
59
59
  system_instruction=system_instruction,
60
60
  temperature=temperature,
61
+ media_resolution=MediaResolution.MEDIA_RESOLUTION_HIGH,
61
62
  safety_settings=[
62
63
  types.SafetySetting(
63
64
  category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
@@ -5,6 +5,12 @@
5
5
  "input": 4,
6
6
  "output": 18
7
7
  },
8
+ {
9
+ "apiType": "Gemini-Vertex",
10
+ "model": "gemini-3-pro-image-preview",
11
+ "input": 2,
12
+ "output": 120
13
+ },
8
14
  {
9
15
  "apiType": "Gemini-Vertex",
10
16
  "model": "gemini-2.5-flash",
@@ -17,12 +23,6 @@
17
23
  "input": 2.5,
18
24
  "output": 15
19
25
  },
20
- {
21
- "apiType": "Gemini-Vertex",
22
- "model": "gemini-2.5-flash-image",
23
- "input": 1.0,
24
- "output": 2.5
25
- },
26
26
  {
27
27
  "apiType": "Gemini-Free",
28
28
  "model": "gemini-flash-latest",
@@ -41,18 +41,18 @@
41
41
  "input": 0,
42
42
  "output": 0
43
43
  },
44
- {
45
- "apiType": "Gemini-Free",
46
- "model": "gemini-2.5-flash-image-preview",
47
- "input": 0,
48
- "output": 0
49
- },
50
44
  {
51
45
  "apiType": "Gemini-Paid",
52
46
  "model": "gemini-3-pro-preview",
53
47
  "input": 4,
54
48
  "output": 18
55
49
  },
50
+ {
51
+ "apiType": "Gemini-Paid",
52
+ "model": "gemini-3-pro-image-preview",
53
+ "input": 2,
54
+ "output": 120
55
+ },
56
56
  {
57
57
  "apiType": "Gemini-Paid",
58
58
  "model": "gemini-flash-latest",
@@ -71,12 +71,6 @@
71
71
  "input": 2.5,
72
72
  "output": 15
73
73
  },
74
- {
75
- "apiType": "Gemini-Paid",
76
- "model": "gemini-2.5-flash-image",
77
- "input": 1.0,
78
- "output": 2.5
79
- },
80
74
  {
81
75
  "apiType": "OpenAI",
82
76
  "model": "gpt-5.1",
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.12.2"
7
+ version = "1.12.4"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
File without changes
File without changes
File without changes
File without changes