LLM-Bridge 1.12.7__tar.gz → 1.13.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {llm_bridge-1.12.7 → llm_bridge-1.13.1/LLM_Bridge.egg-info}/PKG-INFO +3 -3
  2. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/LLM_Bridge.egg-info/SOURCES.txt +1 -0
  3. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/LLM_Bridge.egg-info/requires.txt +2 -2
  4. {llm_bridge-1.12.7/LLM_Bridge.egg-info → llm_bridge-1.13.1}/PKG-INFO +3 -3
  5. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +11 -21
  6. llm_bridge-1.13.1/llm_bridge/client/implementations/claude/stream_claude_client.py +54 -0
  7. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +1 -0
  8. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +1 -0
  9. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/model_client/claude_client.py +3 -0
  10. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/model_client/openai_client.py +5 -1
  11. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/chat_client_factory.py +8 -1
  12. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +17 -1
  13. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +8 -4
  14. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +10 -0
  15. llm_bridge-1.13.1/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +7 -0
  16. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/pyproject.toml +3 -3
  17. llm_bridge-1.12.7/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -71
  18. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/LICENSE +0 -0
  19. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  20. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/LLM_Bridge.egg-info/top_level.txt +0 -0
  21. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/MANIFEST.in +0 -0
  22. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/README.md +0 -0
  23. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/__init__.py +0 -0
  24. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/__init__.py +0 -0
  25. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/chat_client.py +0 -0
  26. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/__init__.py +0 -0
  27. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  28. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  29. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  30. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  31. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  32. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  33. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  34. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  35. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  36. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  37. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  38. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  39. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/implementations/printing_status.py +0 -0
  40. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/model_client/__init__.py +0 -0
  41. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  42. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/__init__.py +0 -0
  43. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  44. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  45. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  46. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  47. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  48. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  49. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  50. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  51. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  52. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/file_fetch.py +0 -0
  53. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  54. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  55. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  56. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  57. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  58. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/logic/model_prices.py +0 -0
  59. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/resources/__init__.py +0 -0
  60. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/resources/model_prices.json +0 -0
  61. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/__init__.py +0 -0
  62. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/chat_response.py +0 -0
  63. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/message.py +0 -0
  64. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/model_message/__init__.py +0 -0
  65. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/model_message/claude_message.py +0 -0
  66. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  67. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/model_message/openai_message.py +0 -0
  68. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  69. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/llm_bridge/type/serializer.py +0 -0
  70. {llm_bridge-1.12.7 → llm_bridge-1.13.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.7
3
+ Version: 1.13.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.106.1
16
+ Requires-Dist: openai==2.9.0
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.46.0
19
- Requires-Dist: anthropic==0.66.0
19
+ Requires-Dist: anthropic==0.75.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -43,6 +43,7 @@ llm_bridge/logic/chat_generate/model_client_factory/__init__.py
43
43
  llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py
44
44
  llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py
45
45
  llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py
46
+ llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py
46
47
  llm_bridge/logic/chat_generate/model_message_converter/__init__.py
47
48
  llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py
48
49
  llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py
@@ -1,10 +1,10 @@
1
1
  fastapi
2
2
  httpx
3
3
  tenacity
4
- openai==1.106.1
4
+ openai==2.9.0
5
5
  tiktoken==0.11.0
6
6
  google-genai==1.46.0
7
- anthropic==0.66.0
7
+ anthropic==0.75.0
8
8
  PyMuPDF
9
9
  docxlatex>=1.1.1
10
10
  openpyxl
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.12.7
3
+ Version: 1.13.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.106.1
16
+ Requires-Dist: openai==2.9.0
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.46.0
19
- Requires-Dist: anthropic==0.66.0
19
+ Requires-Dist: anthropic==0.75.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -58,27 +58,17 @@ class NonStreamClaudeClient(ClaudeClient):
58
58
  try:
59
59
  logging.info(f"messages: {self.messages}")
60
60
 
61
- if self.thinking:
62
- message = await self.client.beta.messages.create(
63
- model=self.model,
64
- max_tokens=self.max_tokens,
65
- temperature=self.temperature,
66
- system=self.system,
67
- messages=serialize(self.messages),
68
- betas=self.betas,
69
- tools=self.tools,
70
- thinking=self.thinking,
71
- )
72
- else:
73
- message = await self.client.beta.messages.create(
74
- model=self.model,
75
- max_tokens=self.max_tokens,
76
- temperature=self.temperature,
77
- system=self.system,
78
- messages=serialize(self.messages),
79
- betas=self.betas,
80
- tools=self.tools,
81
- )
61
+ message = await self.client.beta.messages.create(
62
+ model=self.model,
63
+ max_tokens=self.max_tokens,
64
+ temperature=self.temperature,
65
+ system=self.system,
66
+ messages=serialize(self.messages),
67
+ betas=self.betas,
68
+ tools=self.tools,
69
+ thinking=self.thinking,
70
+ output_format=self.output_format,
71
+ )
82
72
 
83
73
  return await process_claude_non_stream_response(
84
74
  message=message,
@@ -0,0 +1,54 @@
1
+ import logging
2
+ import re
3
+ from typing import AsyncGenerator
4
+
5
+ import httpx
6
+ from fastapi import HTTPException
7
+
8
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_stream_response
9
+ from llm_bridge.client.model_client.claude_client import ClaudeClient
10
+ from llm_bridge.type.chat_response import ChatResponse
11
+ from llm_bridge.type.serializer import serialize
12
+
13
+ class StreamClaudeClient(ClaudeClient):
14
+ async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
15
+ try:
16
+ logging.info(f"messages: {self.messages}")
17
+
18
+ try:
19
+ async with self.client.beta.messages.stream(
20
+ model=self.model,
21
+ max_tokens=self.max_tokens,
22
+ temperature=self.temperature,
23
+ system=self.system,
24
+ messages=serialize(self.messages),
25
+ betas=self.betas,
26
+ tools=self.tools,
27
+ thinking=self.thinking,
28
+ output_format=self.output_format,
29
+ ) as stream:
30
+ async for event in stream:
31
+ yield await process_claude_stream_response(
32
+ event=event,
33
+ input_tokens=self.input_tokens,
34
+ client=self.client,
35
+ model=self.model,
36
+ )
37
+
38
+ except Exception as e:
39
+ logging.exception(e)
40
+ yield ChatResponse(error=repr(e))
41
+
42
+ except httpx.HTTPStatusError as e:
43
+ status_code = e.response.status_code
44
+ text = e.response.text
45
+ raise HTTPException(status_code=status_code, detail=text)
46
+ except Exception as e:
47
+ logging.exception(e)
48
+ match = re.search(r'\d{3}', str(e))
49
+ if match:
50
+ error_code = int(match.group(0))
51
+ else:
52
+ error_code = 500
53
+
54
+ raise HTTPException(status_code=error_code, detail=str(e))
@@ -86,6 +86,7 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
86
86
  temperature=self.temperature,
87
87
  stream=False,
88
88
  tools=self.tools,
89
+ # text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
89
90
  )
90
91
 
91
92
  return process_openai_responses_non_stream_response(
@@ -86,6 +86,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
86
86
  temperature=self.temperature,
87
87
  stream=True,
88
88
  tools=self.tools,
89
+ # text_format=self.structured_output_base_model, # Async OpenAPI Responses Client does not support structured output
89
90
  )
90
91
 
91
92
  except httpx.HTTPStatusError as e:
@@ -22,6 +22,7 @@ class ClaudeClient(ChatClient):
22
22
  input_tokens: int,
23
23
  tools: list[BetaToolUnionParam],
24
24
  thinking: ThinkingConfigEnabledParam,
25
+ output_format: dict[str, Any] | None,
25
26
  ):
26
27
  self.model = model
27
28
  self.messages = messages
@@ -33,8 +34,10 @@ class ClaudeClient(ChatClient):
33
34
  self.input_tokens = input_tokens
34
35
  self.tools = tools
35
36
  self.thinking = thinking
37
+ self.output_format = output_format
36
38
 
37
39
  async def generate_non_stream_response(self) -> ChatResponse:
38
40
  raise NotImplementedError
41
+
39
42
  async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
40
43
  raise NotImplementedError
@@ -1,4 +1,5 @@
1
- from typing import AsyncGenerator, Iterable
1
+ from typing import AsyncGenerator, Iterable, Type, Any
2
+ from pydantic import BaseModel
2
3
 
3
4
  import openai.lib.azure
4
5
  from openai.types import Reasoning
@@ -20,6 +21,7 @@ class OpenAIClient(ChatClient):
20
21
  client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
21
22
  tools: Iterable[ToolParam],
22
23
  reasoning: Reasoning,
24
+ structured_output_base_model: Type[BaseModel] | None = None,
23
25
  ):
24
26
  self.model = model
25
27
  self.messages = messages
@@ -28,8 +30,10 @@ class OpenAIClient(ChatClient):
28
30
  self.client = client
29
31
  self.tools = tools
30
32
  self.reasoning = reasoning
33
+ self.structured_output_base_model = structured_output_base_model
31
34
 
32
35
  async def generate_non_stream_response(self) -> ChatResponse:
33
36
  raise NotImplementedError
37
+
34
38
  async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
35
39
  raise NotImplementedError
@@ -1,3 +1,5 @@
1
+ from typing import Any
2
+
1
3
  from fastapi import HTTPException
2
4
 
3
5
  from llm_bridge.client.chat_client import ChatClient
@@ -16,7 +18,7 @@ async def create_chat_client(
16
18
  stream: bool,
17
19
  thought: bool,
18
20
  code_execution: bool,
19
- structured_output_schema: dict | None,
21
+ structured_output_schema: dict[str, Any] | None,
20
22
  ) -> ChatClient:
21
23
  if api_type == 'OpenAI':
22
24
  return await create_openai_client(
@@ -28,6 +30,7 @@ async def create_chat_client(
28
30
  stream=stream,
29
31
  thought=thought,
30
32
  code_execution=code_execution,
33
+ structured_output_schema=structured_output_schema,
31
34
  )
32
35
  elif api_type == 'OpenAI-Azure':
33
36
  return await create_openai_client(
@@ -42,6 +45,7 @@ async def create_chat_client(
42
45
  stream=stream,
43
46
  thought=thought,
44
47
  code_execution=code_execution,
48
+ structured_output_schema=structured_output_schema,
45
49
  )
46
50
  elif api_type == 'OpenAI-GitHub':
47
51
  return await create_openai_client(
@@ -53,6 +57,7 @@ async def create_chat_client(
53
57
  stream=stream,
54
58
  thought=thought,
55
59
  code_execution=code_execution,
60
+ structured_output_schema=structured_output_schema,
56
61
  )
57
62
  elif api_type == 'Grok':
58
63
  return await create_openai_client(
@@ -64,6 +69,7 @@ async def create_chat_client(
64
69
  stream=stream,
65
70
  thought=thought,
66
71
  code_execution=code_execution,
72
+ structured_output_schema=structured_output_schema,
67
73
  )
68
74
  elif api_type == 'Gemini-Free':
69
75
  return await create_gemini_client(
@@ -110,6 +116,7 @@ async def create_chat_client(
110
116
  stream=stream,
111
117
  thought=thought,
112
118
  code_execution=code_execution,
119
+ structured_output_schema=structured_output_schema,
113
120
  )
114
121
  else:
115
122
  raise HTTPException(status_code=400, detail="Invalid API type")
@@ -1,4 +1,7 @@
1
+ from typing import Any
2
+
1
3
  import anthropic
4
+ from anthropic import Omit, transform_schema
2
5
  from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
3
6
  from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param
4
7
 
@@ -18,7 +21,10 @@ async def create_claude_client(
18
21
  stream: bool,
19
22
  thought: bool,
20
23
  code_execution: bool,
24
+ structured_output_schema: dict[str, Any] | None
21
25
  ):
26
+ omit = Omit()
27
+
22
28
  client = anthropic.AsyncAnthropic(
23
29
  api_key=api_key,
24
30
  )
@@ -44,7 +50,7 @@ async def create_claude_client(
44
50
  max_output,
45
51
  context_window - input_tokens,
46
52
  )
47
- thinking = None
53
+ thinking = omit
48
54
  if thought:
49
55
  thinking = ThinkingConfigEnabledParam(
50
56
  type="enabled",
@@ -55,6 +61,7 @@ async def create_claude_client(
55
61
  "context-1m-2025-08-07",
56
62
  "output-128k-2025-02-19",
57
63
  "code-execution-2025-08-25",
64
+ "structured-outputs-2025-11-13"
58
65
  ]
59
66
  tools: list[BetaToolUnionParam] = []
60
67
  tools.append(
@@ -71,6 +78,13 @@ async def create_claude_client(
71
78
  )
72
79
  )
73
80
 
81
+ output_format = omit
82
+ # if structured_output_schema: # Claude output format raises: TypeError: unhashable type: 'dict'
83
+ # output_format = {
84
+ # "type": "json_schema",
85
+ # "schema": transform_schema(structured_output_schema),
86
+ # }
87
+
74
88
  if stream:
75
89
  return StreamClaudeClient(
76
90
  model=model,
@@ -83,6 +97,7 @@ async def create_claude_client(
83
97
  input_tokens=input_tokens,
84
98
  tools=tools,
85
99
  thinking=thinking,
100
+ output_format=output_format,
86
101
  )
87
102
  else:
88
103
  return NonStreamClaudeClient(
@@ -96,4 +111,5 @@ async def create_claude_client(
96
111
  input_tokens=input_tokens,
97
112
  tools=tools,
98
113
  thinking=thinking,
114
+ output_format=output_format,
99
115
  )
@@ -1,6 +1,8 @@
1
+ from typing import Any
2
+
1
3
  from google import genai
2
4
  from google.genai import types
3
- from google.genai.types import Modality, HttpOptions, MediaResolution
5
+ from google.genai.types import Modality, MediaResolution
4
6
 
5
7
  from llm_bridge.client.implementations.gemini.non_stream_gemini_client import NonStreamGeminiClient
6
8
  from llm_bridge.client.implementations.gemini.stream_gemini_client import StreamGeminiClient
@@ -18,7 +20,7 @@ async def create_gemini_client(
18
20
  stream: bool,
19
21
  thought: bool,
20
22
  code_execution: bool,
21
- structured_output_schema: dict | None,
23
+ structured_output_schema: dict[str, Any] | None,
22
24
  ):
23
25
  client = genai.Client(
24
26
  vertexai=vertexai,
@@ -59,7 +61,6 @@ async def create_gemini_client(
59
61
  config = types.GenerateContentConfig(
60
62
  system_instruction=system_instruction,
61
63
  temperature=temperature,
62
- media_resolution=MediaResolution.MEDIA_RESOLUTION_HIGH,
63
64
  safety_settings=[
64
65
  types.SafetySetting(
65
66
  category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
@@ -87,7 +88,10 @@ async def create_gemini_client(
87
88
  response_modalities=response_modalities,
88
89
  )
89
90
 
90
- if structured_output_schema is not None:
91
+ if vertexai:
92
+ config.media_resolution=MediaResolution.MEDIA_RESOLUTION_HIGH
93
+
94
+ if structured_output_schema:
91
95
  config.response_mime_type = "application/json"
92
96
  config.response_json_schema = structured_output_schema
93
97
 
@@ -1,4 +1,5 @@
1
1
  import re
2
+ from typing import Any
2
3
 
3
4
  import openai
4
5
  from fastapi import HTTPException
@@ -13,6 +14,7 @@ from llm_bridge.client.implementations.openai.steam_openai_responses_client impo
13
14
  from llm_bridge.client.implementations.openai.stream_openai_client import StreamOpenAIClient
14
15
  from llm_bridge.logic.chat_generate.chat_message_converter import convert_messages_to_openai_responses, \
15
16
  convert_messages_to_openai
17
+ from llm_bridge.logic.chat_generate.model_client_factory.schema_converter import json_schema_to_pydantic_model
16
18
  from llm_bridge.type.message import Message
17
19
 
18
20
 
@@ -25,6 +27,7 @@ async def create_openai_client(
25
27
  stream: bool,
26
28
  thought: bool,
27
29
  code_execution: bool,
30
+ structured_output_schema: dict[str, Any] | None,
28
31
  ):
29
32
  if api_type == "OpenAI":
30
33
  client = openai.AsyncOpenAI(
@@ -91,6 +94,11 @@ async def create_openai_client(
91
94
  )
92
95
  )
93
96
 
97
+ structured_output_base_model = None
98
+ if structured_output_schema:
99
+ structured_output_base_model = json_schema_to_pydantic_model(structured_output_schema)
100
+
101
+
94
102
  if use_responses_api:
95
103
  if stream:
96
104
  return StreamOpenAIResponsesClient(
@@ -101,6 +109,7 @@ async def create_openai_client(
101
109
  client=client,
102
110
  tools=tools,
103
111
  reasoning=reasoning,
112
+ structured_output_base_model=structured_output_base_model,
104
113
  )
105
114
  else:
106
115
  return NonStreamOpenAIResponsesClient(
@@ -111,6 +120,7 @@ async def create_openai_client(
111
120
  client=client,
112
121
  tools=tools,
113
122
  reasoning=reasoning,
123
+ structured_output_base_model=structured_output_base_model,
114
124
  )
115
125
  else:
116
126
  if stream:
@@ -0,0 +1,7 @@
1
+ from typing import Any
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ def json_schema_to_pydantic_model(json_schema: dict[str, Any]) -> BaseModel:
7
+ pass
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.12.7"
7
+ version = "1.13.1"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
@@ -21,10 +21,10 @@ dependencies = [
21
21
  "fastapi",
22
22
  "httpx",
23
23
  "tenacity",
24
- "openai==1.106.1",
24
+ "openai==2.9.0",
25
25
  "tiktoken==0.11.0",
26
26
  "google-genai==1.46.0",
27
- "anthropic==0.66.0",
27
+ "anthropic==0.75.0",
28
28
  "PyMuPDF",
29
29
  "docxlatex>=1.1.1",
30
30
  "openpyxl",
@@ -1,71 +0,0 @@
1
- import logging
2
- import re
3
- from typing import AsyncGenerator
4
-
5
- import httpx
6
- from fastapi import HTTPException
7
-
8
- from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_stream_response
9
- from llm_bridge.client.model_client.claude_client import ClaudeClient
10
- from llm_bridge.type.chat_response import ChatResponse
11
- from llm_bridge.type.serializer import serialize
12
-
13
- class StreamClaudeClient(ClaudeClient):
14
- async def generate_stream_response(self) -> AsyncGenerator[ChatResponse, None]:
15
- try:
16
- logging.info(f"messages: {self.messages}")
17
-
18
- try:
19
- if self.thinking:
20
- async with self.client.beta.messages.stream(
21
- model=self.model,
22
- max_tokens=self.max_tokens,
23
- temperature=self.temperature,
24
- system=self.system,
25
- messages=serialize(self.messages),
26
- betas=self.betas,
27
- tools=self.tools,
28
- thinking=self.thinking,
29
- ) as stream:
30
- async for event in stream:
31
- yield await process_claude_stream_response(
32
- event=event,
33
- input_tokens=self.input_tokens,
34
- client=self.client,
35
- model=self.model,
36
- )
37
- else:
38
- async with self.client.beta.messages.stream(
39
- model=self.model,
40
- max_tokens=self.max_tokens,
41
- temperature=self.temperature,
42
- system=self.system,
43
- messages=serialize(self.messages),
44
- betas=self.betas,
45
- tools=self.tools,
46
- ) as stream:
47
- async for event in stream:
48
- yield await process_claude_stream_response(
49
- event=event,
50
- input_tokens=self.input_tokens,
51
- client=self.client,
52
- model=self.model,
53
- )
54
-
55
- except Exception as e:
56
- logging.exception(e)
57
- yield ChatResponse(error=repr(e))
58
-
59
- except httpx.HTTPStatusError as e:
60
- status_code = e.response.status_code
61
- text = e.response.text
62
- raise HTTPException(status_code=status_code, detail=text)
63
- except Exception as e:
64
- logging.exception(e)
65
- match = re.search(r'\d{3}', str(e))
66
- if match:
67
- error_code = int(match.group(0))
68
- else:
69
- error_code = 500
70
-
71
- raise HTTPException(status_code=error_code, detail=str(e))
File without changes
File without changes
File without changes
File without changes