LLM-Bridge 1.15.8__tar.gz → 1.15.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/PKG-INFO +2 -2
  2. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/README.md +1 -1
  3. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +1 -1
  4. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +1 -1
  5. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/model_client/openai_client.py +4 -5
  6. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/chat_client_factory.py +1 -1
  7. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +14 -8
  8. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/pyproject.toml +1 -1
  9. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/usage/config.py +52 -8
  10. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/uv.lock +1 -1
  11. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/.gitattributes +0 -0
  12. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/.github/workflows/python-publish.yml +0 -0
  13. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/.github/workflows/python-test.yaml +0 -0
  14. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/.gitignore +0 -0
  15. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/.run/pytest.run.xml +0 -0
  16. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/LICENSE +0 -0
  17. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/MANIFEST.in +0 -0
  18. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/__init__.py +0 -0
  19. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/__init__.py +0 -0
  20. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/chat_client.py +0 -0
  21. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/__init__.py +0 -0
  22. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  23. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  24. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  25. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  26. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  27. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  28. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  29. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  30. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  31. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  32. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  33. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  34. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/openai_responses_response_handler.py +0 -0
  35. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  36. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  37. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/implementations/printing_status.py +0 -0
  38. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/model_client/__init__.py +0 -0
  39. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/model_client/claude_client.py +0 -0
  40. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/client/model_client/gemini_client.py +0 -0
  41. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/__init__.py +0 -0
  42. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  43. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  44. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  45. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  46. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  47. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  48. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  49. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  50. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  51. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  52. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  53. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  54. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/file_fetch.py +0 -0
  55. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  56. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  57. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  58. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  59. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  60. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/logic/model_prices.py +0 -0
  61. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/resources/__init__.py +0 -0
  62. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/resources/model_prices.json +0 -0
  63. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/__init__.py +0 -0
  64. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/chat_response.py +0 -0
  65. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/message.py +0 -0
  66. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/model_message/__init__.py +0 -0
  67. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/model_message/claude_message.py +0 -0
  68. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/model_message/gemini_message.py +0 -0
  69. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/model_message/openai_message.py +0 -0
  70. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  71. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/llm_bridge/type/serializer.py +0 -0
  72. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/tests/__init__.py +0 -0
  73. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/tests/file_type_checker_test.py +0 -0
  74. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/tests/model_prices_test.py +0 -0
  75. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/usage/.env.example +0 -0
  76. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/usage/main.py +0 -0
  77. {llm_bridge-1.15.8 → llm_bridge-1.15.9}/usage/workflow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.15.8
3
+ Version: 1.15.9
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -48,7 +48,7 @@ The features listed represent the maximum capabilities of each API type supporte
48
48
 
49
49
  | API Type | Input Format | Capabilities | Output Format |
50
50
  |----------|--------------------------------|---------------------------------------------------------|-------------------|
51
- | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
51
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image |
52
52
  | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
53
53
  | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, File |
54
54
  | Grok | Text, Image | | Text |
@@ -24,7 +24,7 @@ The features listed represent the maximum capabilities of each API type supporte
24
24
 
25
25
  | API Type | Input Format | Capabilities | Output Format |
26
26
  |----------|--------------------------------|---------------------------------------------------------|-------------------|
27
- | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
27
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image |
28
28
  | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
29
29
  | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, File |
30
30
  | Grok | Text, Image | | Text |
@@ -32,7 +32,7 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
32
32
  stream=False,
33
33
  tools=self.tools,
34
34
  include=self.include,
35
- # text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
35
+ text=self.text,
36
36
  )
37
37
 
38
38
  return await process_openai_responses_non_stream_response(
@@ -56,7 +56,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
56
56
  stream=True,
57
57
  tools=self.tools,
58
58
  include=self.include,
59
- # text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
59
+ text=self.text,
60
60
  )
61
61
 
62
62
  except httpx.HTTPStatusError as e:
@@ -1,9 +1,8 @@
1
- from typing import AsyncGenerator, Iterable, Type, Any
2
- from pydantic import BaseModel
1
+ from typing import AsyncGenerator, Iterable
3
2
 
4
3
  import openai.lib.azure
5
4
  from openai.types import Reasoning
6
- from openai.types.responses import ToolParam, ResponseIncludable
5
+ from openai.types.responses import ToolParam, ResponseIncludable, ResponseTextConfigParam
7
6
 
8
7
  from llm_bridge.client.chat_client import ChatClient
9
8
  from llm_bridge.type.chat_response import ChatResponse
@@ -22,7 +21,7 @@ class OpenAIClient(ChatClient):
22
21
  tools: Iterable[ToolParam],
23
22
  reasoning: Reasoning,
24
23
  include: list[ResponseIncludable],
25
- structured_output_base_model: Type[BaseModel] | None = None,
24
+ text: ResponseTextConfigParam,
26
25
  ):
27
26
  self.model = model
28
27
  self.messages = messages
@@ -32,7 +31,7 @@ class OpenAIClient(ChatClient):
32
31
  self.tools = tools
33
32
  self.reasoning = reasoning
34
33
  self.include = include
35
- self.structured_output_base_model = structured_output_base_model
34
+ self.text = text
36
35
 
37
36
  async def generate_non_stream_response(self) -> ChatResponse:
38
37
  raise NotImplementedError
@@ -10,7 +10,7 @@ from llm_bridge.type.message import Message
10
10
 
11
11
 
12
12
  async def create_chat_client(
13
- api_keys: dict,
13
+ api_keys: dict[str, str],
14
14
  messages: list[Message],
15
15
  model: str,
16
16
  api_type: str,
@@ -5,7 +5,8 @@ import openai
5
5
  from fastapi import HTTPException
6
6
  from openai import Omit
7
7
  from openai.types import Reasoning
8
- from openai.types.responses import WebSearchToolParam, ResponseIncludable
8
+ from openai.types.responses import WebSearchToolParam, ResponseIncludable, ResponseTextConfigParam, \
9
+ ResponseFormatTextJSONSchemaConfigParam
9
10
  from openai.types.responses.tool_param import CodeInterpreter, CodeInterpreterContainerCodeInterpreterToolAuto, \
10
11
  ImageGeneration, ToolParam
11
12
 
@@ -15,12 +16,11 @@ from llm_bridge.client.implementations.openai.steam_openai_responses_client impo
15
16
  from llm_bridge.client.implementations.openai.stream_openai_client import StreamOpenAIClient
16
17
  from llm_bridge.logic.chat_generate.chat_message_converter import convert_messages_to_openai_responses, \
17
18
  convert_messages_to_openai
18
- from llm_bridge.logic.chat_generate.model_client_factory.schema_converter import json_schema_to_pydantic_model
19
19
  from llm_bridge.type.message import Message
20
20
 
21
21
 
22
22
  async def create_openai_client(
23
- api_keys: dict,
23
+ api_keys: dict[str, str],
24
24
  messages: list[Message],
25
25
  model: str,
26
26
  api_type: str,
@@ -98,10 +98,16 @@ async def create_openai_client(
98
98
  )
99
99
  )
100
100
 
101
- structured_output_base_model = None
101
+ text: ResponseTextConfigParam | Omit = omit
102
102
  if structured_output_schema:
103
- structured_output_base_model = json_schema_to_pydantic_model(structured_output_schema)
104
-
103
+ text = ResponseTextConfigParam(
104
+ format=ResponseFormatTextJSONSchemaConfigParam(
105
+ name="structured_output",
106
+ schema=structured_output_schema,
107
+ type="json_schema",
108
+ strict=True,
109
+ )
110
+ )
105
111
 
106
112
  if use_responses_api:
107
113
  if stream:
@@ -114,7 +120,7 @@ async def create_openai_client(
114
120
  tools=tools,
115
121
  reasoning=reasoning,
116
122
  include=include,
117
- structured_output_base_model=structured_output_base_model,
123
+ text=text,
118
124
  )
119
125
  else:
120
126
  return NonStreamOpenAIResponsesClient(
@@ -126,7 +132,7 @@ async def create_openai_client(
126
132
  tools=tools,
127
133
  reasoning=reasoning,
128
134
  include=include,
129
- structured_output_base_model=structured_output_base_model,
135
+ text=text,
130
136
  )
131
137
  else:
132
138
  if stream:
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.15.8"
7
+ version = "1.15.9"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -1,5 +1,45 @@
1
1
  from llm_bridge import *
2
2
 
3
+ # Standard JSON Schema
4
+ # structured_output_schema = {
5
+ # "$schema": "https://json-schema.org/draft/2020-12/schema",
6
+ # "$id": "https://example.com/product.schema.json",
7
+ # "title": "Product",
8
+ # "description": "A product from Acme's catalog",
9
+ # "type": "object",
10
+ # "properties": {
11
+ # "productId": {
12
+ # "description": "The unique identifier for a product",
13
+ # "type": "integer"
14
+ # },
15
+ # "productName": {
16
+ # "description": "Name of the product",
17
+ # "type": "string"
18
+ # },
19
+ # "price": {
20
+ # "description": "The price of the product",
21
+ # "type": "number",
22
+ # "exclusiveMinimum": 0
23
+ # },
24
+ # "tags": {
25
+ # "description": "Tags for the product",
26
+ # "type": "array",
27
+ # "items": {
28
+ # "type": "string"
29
+ # },
30
+ # "minItems": 1,
31
+ # "uniqueItems": True
32
+ # }
33
+ # },
34
+ # "required": [
35
+ # "productId",
36
+ # "productName",
37
+ # "price"
38
+ # ],
39
+ # "additionalProperties": False
40
+ # }
41
+
42
+ # OpenAI Responses API JSON Schema
3
43
  structured_output_schema = {
4
44
  "$schema": "https://json-schema.org/draft/2020-12/schema",
5
45
  "$id": "https://example.com/product.schema.json",
@@ -27,23 +67,24 @@ structured_output_schema = {
27
67
  "type": "string"
28
68
  },
29
69
  "minItems": 1,
30
- "uniqueItems": True
31
70
  }
32
71
  },
33
72
  "required": [
34
73
  "productId",
35
74
  "productName",
36
- "price"
37
- ]
75
+ "price",
76
+ "tags"
77
+ ],
78
+ "additionalProperties": False
38
79
  }
39
- structured_output_schema = None
80
+ # structured_output_schema = None
40
81
 
41
82
  messages = [
42
83
  Message(
43
84
  role=Role.System,
44
85
  contents=[
45
- # Content(type=ContentType.Text, data="You are a helpful assistant."),
46
- Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf")
86
+ Content(type=ContentType.Text, data="You are a helpful assistant."),
87
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf")
47
88
  ]
48
89
  ),
49
90
  # Message(
@@ -62,7 +103,7 @@ messages = [
62
103
  role=Role.User,
63
104
  contents=[
64
105
  # Simple Question
65
- Content(type=ContentType.Text, data="What's this?"),
106
+ # Content(type=ContentType.Text, data="What's this?"),
66
107
 
67
108
  # Thinking
68
109
  # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
@@ -87,7 +128,7 @@ messages = [
87
128
  # Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
88
129
 
89
130
  # Structured Output
90
- # Content(type=ContentType.Text, data="Please generate a product."),
131
+ Content(type=ContentType.Text, data="Please generate a product."),
91
132
  ]
92
133
  ),
93
134
  # Message(
@@ -102,6 +143,7 @@ messages = [
102
143
  # ]
103
144
  # ),
104
145
  ]
146
+
105
147
  # See /llm_bridge/resources/model_prices.json for available models
106
148
  model = "gpt-5.2"
107
149
  # model = "gpt-5.1"
@@ -115,6 +157,7 @@ model = "gpt-5.2"
115
157
  # model = "grok-4-1-fast-reasoning"
116
158
  # model = "claude-sonnet-4-5"
117
159
  # model = "claude-opus-4-5"
160
+
118
161
  # api_type = "Gemini-Vertex"
119
162
  # api_type = "Gemini-Free"
120
163
  # api_type = "Gemini-Paid"
@@ -123,6 +166,7 @@ api_type = "OpenAI"
123
166
  # api_type = "OpenAI-GitHub"
124
167
  # api_type = "Claude"
125
168
  # api_type = "Grok"
169
+
126
170
  temperature = 0
127
171
  stream = True
128
172
  # stream = False
@@ -357,7 +357,7 @@ wheels = [
357
357
 
358
358
  [[package]]
359
359
  name = "llm-bridge"
360
- version = "1.15.8"
360
+ version = "1.15.9"
361
361
  source = { editable = "." }
362
362
  dependencies = [
363
363
  { name = "anthropic" },
File without changes
File without changes
File without changes
File without changes
File without changes