LLM-Bridge 1.8.1__tar.gz → 1.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.8.1 → llm_bridge-1.9.0/LLM_Bridge.egg-info}/PKG-INFO +16 -10
  2. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/LLM_Bridge.egg-info/requires.txt +2 -2
  3. {llm_bridge-1.8.1/LLM_Bridge.egg-info → llm_bridge-1.9.0}/PKG-INFO +16 -10
  4. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/README.md +14 -8
  5. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -3
  6. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -4
  7. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +23 -12
  8. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +2 -0
  9. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -2
  10. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +20 -6
  11. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +27 -10
  12. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/model_client/openai_client.py +3 -0
  13. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +14 -6
  14. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +5 -0
  15. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +30 -12
  16. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/chat_response.py +10 -0
  17. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/pyproject.toml +3 -3
  18. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/LICENSE +0 -0
  19. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  20. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  21. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/LLM_Bridge.egg-info/top_level.txt +0 -0
  22. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/MANIFEST.in +0 -0
  23. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/__init__.py +0 -0
  24. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/__init__.py +0 -0
  25. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/chat_client.py +0 -0
  26. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/__init__.py +0 -0
  27. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  28. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  29. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  30. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  31. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  32. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  33. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  34. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  35. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  36. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/implementations/printing_status.py +0 -0
  37. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/model_client/__init__.py +0 -0
  38. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/model_client/claude_client.py +0 -0
  39. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  40. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/__init__.py +0 -0
  41. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  42. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  43. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  44. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  45. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  46. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/resources/model_prices.json +0 -0
  60. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/__init__.py +0 -0
  61. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.8.1 → llm_bridge-1.9.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.8.1
3
+ Version: 1.9.0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.99.6
16
+ Requires-Dist: openai==1.106.1
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.28.0
19
- Requires-Dist: anthropic==0.62.0
19
+ Requires-Dist: anthropic==0.66.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -49,14 +49,20 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
49
49
 
50
50
  ### Model Features
51
51
 
52
- The features listed represent the maximum capabilities of each API type, not necessarily those of every individual model.
52
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
53
53
 
54
- | Model Type | Input Format | Capabilities | Output Format |
55
- |------------|--------------------------------|----------------------|---------------|
56
- | OpenAI | Text, Image | Thinking, Web Search | Text |
57
- | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search | Text, Image |
58
- | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
- | Grok | Text, Image | | Text |
54
+ | API Type | Input Format | Capabilities | Output Format |
55
+ |----------|--------------------------------|------------------------------------------------------------|---------------|
56
+ | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
57
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking + Thought, Web Search + Citations, Code Execution | Text, Image |
58
+ | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
+ | Grok | Text, Image | | Text |
60
+
61
+ #### Planned Features
62
+
63
+ - OpenAI: Web Search: Citations, Image Output
64
+ - Gemini: Code Execution: Code, Code Output
65
+ - Claude: Code Execution, File Output
60
66
 
61
67
  ## Installation
62
68
 
@@ -1,10 +1,10 @@
1
1
  fastapi
2
2
  httpx
3
3
  tenacity
4
- openai==1.99.6
4
+ openai==1.106.1
5
5
  tiktoken==0.11.0
6
6
  google-genai==1.28.0
7
- anthropic==0.62.0
7
+ anthropic==0.66.0
8
8
  PyMuPDF
9
9
  docxlatex>=1.1.1
10
10
  openpyxl
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.8.1
3
+ Version: 1.9.0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.99.6
16
+ Requires-Dist: openai==1.106.1
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.28.0
19
- Requires-Dist: anthropic==0.62.0
19
+ Requires-Dist: anthropic==0.66.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -49,14 +49,20 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
49
49
 
50
50
  ### Model Features
51
51
 
52
- The features listed represent the maximum capabilities of each API type, not necessarily those of every individual model.
52
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
53
53
 
54
- | Model Type | Input Format | Capabilities | Output Format |
55
- |------------|--------------------------------|----------------------|---------------|
56
- | OpenAI | Text, Image | Thinking, Web Search | Text |
57
- | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search | Text, Image |
58
- | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
- | Grok | Text, Image | | Text |
54
+ | API Type | Input Format | Capabilities | Output Format |
55
+ |----------|--------------------------------|------------------------------------------------------------|---------------|
56
+ | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
57
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking + Thought, Web Search + Citations, Code Execution | Text, Image |
58
+ | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
+ | Grok | Text, Image | | Text |
60
+
61
+ #### Planned Features
62
+
63
+ - OpenAI: Web Search: Citations, Image Output
64
+ - Gemini: Code Execution: Code, Code Output
65
+ - Claude: Code Execution, File Output
60
66
 
61
67
  ## Installation
62
68
 
@@ -19,14 +19,20 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
19
19
 
20
20
  ### Model Features
21
21
 
22
- The features listed represent the maximum capabilities of each API type, not necessarily those of every individual model.
23
-
24
- | Model Type | Input Format | Capabilities | Output Format |
25
- |------------|--------------------------------|----------------------|---------------|
26
- | OpenAI | Text, Image | Thinking, Web Search | Text |
27
- | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search | Text, Image |
28
- | Claude | Text, Image, PDF | Thinking, Web Search | Text |
29
- | Grok | Text, Image | | Text |
22
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
23
+
24
+ | API Type | Input Format | Capabilities | Output Format |
25
+ |----------|--------------------------------|------------------------------------------------------------|---------------|
26
+ | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
27
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking + Thought, Web Search + Citations, Code Execution | Text, Image |
28
+ | Claude | Text, Image, PDF | Thinking, Web Search | Text |
29
+ | Grok | Text, Image | | Text |
30
+
31
+ #### Planned Features
32
+
33
+ - OpenAI: Web Search: Citations, Image Output
34
+ - Gemini: Code Execution: Code, Code Output
35
+ - Claude: Code Execution, File Output
30
36
 
31
37
  ## Installation
32
38
 
@@ -20,9 +20,6 @@ class ClaudeStreamResponseHandler:
20
20
  thought += event.delta.thinking
21
21
  elif event.delta.type == "text_delta":
22
22
  text += event.delta.text
23
- elif event.type == "citation":
24
- citation = event.citation
25
- text += f"([{citation.title}]({citation.url})) "
26
23
 
27
24
  chat_response = ChatResponse(
28
25
  text=text,
@@ -26,10 +26,6 @@ async def process_claude_non_stream_response(
26
26
  thought += content.thinking
27
27
  if content.type == "text":
28
28
  text += content.text
29
- # Unable to test: non-streaming Claude is currently not allowed
30
- if citations := content.citations:
31
- for citation in citations:
32
- text += f"([{citation.title}]({citation.url})) "
33
29
 
34
30
  chat_response = ChatResponse(
35
31
  text=text,
@@ -18,28 +18,37 @@ class GeminiResponseHandler:
18
18
  self,
19
19
  response: types.GenerateContentResponse,
20
20
  ) -> ChatResponse:
21
- text = ""
22
- thought = ""
23
- display = None
24
- image_base64 = None
25
- citations = extract_citations(response)
21
+ text: str = ""
22
+ thought: str = ""
23
+ code: str = ""
24
+ code_output: str = ""
25
+ image: Optional[str] = None
26
+ display: Optional[str] = None
27
+ citations: list[Citation] = extract_citations(response)
26
28
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
27
29
 
28
30
  printing_status = None
29
31
  if candidates := response.candidates:
30
32
  if candidates[0].content.parts:
31
33
  for part in response.candidates[0].content.parts:
32
- # Thought Output
33
- if part.text:
34
+ if part.text is not None:
35
+ # Thought
34
36
  if part.thought:
35
37
  printing_status = PrintingStatus.Thought
36
38
  thought += part.text
39
+ # Text
37
40
  elif not part.thought:
38
41
  printing_status = PrintingStatus.Response
39
42
  text += part.text
40
- # Image Output
41
- elif part.inline_data:
42
- image_base64 = base64.b64encode(part.inline_data.data).decode('utf-8')
43
+ # Code (Causing Error)
44
+ # if part.executable_code is not None:
45
+ # code += part.executable_code.code
46
+ # Code Output
47
+ if part.code_execution_result is not None:
48
+ code_output += part.code_execution_result.output
49
+ # Image
50
+ if part.inline_data is not None:
51
+ image = base64.b64encode(part.inline_data.data).decode('utf-8')
43
52
 
44
53
  # Grounding Sources
45
54
  if candidates := response.candidates:
@@ -63,7 +72,9 @@ class GeminiResponseHandler:
63
72
  return ChatResponse(
64
73
  text=text,
65
74
  thought=thought,
66
- image=image_base64,
75
+ code=code,
76
+ code_output=code_output,
77
+ image=image,
67
78
  display=display,
68
79
  citations=citations,
69
80
  input_tokens=input_tokens,
@@ -80,4 +91,4 @@ def extract_citations(response: types.GenerateContentResponse) -> list[Citation]
80
91
  citation_indices = [index + 1 for index in grounding_support.grounding_chunk_indices]
81
92
  citation_text = grounding_support.segment.text
82
93
  citations.append(Citation(text=citation_text, indices=citation_indices))
83
- return citations
94
+ return citations
@@ -5,6 +5,8 @@ async def count_gemini_tokens(
5
5
  response: types.GenerateContentResponse
6
6
  ) -> tuple[int, int]:
7
7
  usage_metadata = response.usage_metadata
8
+ if usage_metadata is None:
9
+ return 0, 0
8
10
  input_tokens = usage_metadata.prompt_token_count
9
11
  output_tokens = usage_metadata.candidates_token_count
10
12
  if output_tokens is None:
@@ -4,7 +4,6 @@ from typing import AsyncGenerator
4
4
 
5
5
  import httpx
6
6
  from fastapi import HTTPException
7
- from google.genai import types
8
7
 
9
8
  from llm_bridge.client.implementations.gemini.gemini_response_handler import GeminiResponseHandler
10
9
  from llm_bridge.client.model_client.gemini_client import GeminiClient
@@ -39,7 +38,6 @@ class StreamGeminiClient(GeminiClient):
39
38
  response_handler = GeminiResponseHandler()
40
39
  async for response_delta in response:
41
40
  yield await response_handler.process_gemini_response(response_delta)
42
-
43
41
  except Exception as e:
44
42
  logging.exception(e)
45
43
  yield ChatResponse(error=repr(e))
@@ -1,5 +1,7 @@
1
1
  import logging
2
2
  import re
3
+ from pprint import pprint
4
+ from typing import Optional
3
5
 
4
6
  import httpx
5
7
  import openai
@@ -21,7 +23,8 @@ def process_openai_responses_non_stream_response(
21
23
 
22
24
  output_list = response.output
23
25
 
24
- text = ""
26
+ text: str = ""
27
+ image: Optional[str] = None
25
28
  citations: list[Citation] = []
26
29
 
27
30
  for output in output_list:
@@ -29,15 +32,25 @@ def process_openai_responses_non_stream_response(
29
32
  for content in output.content:
30
33
  if content.type == "output_text":
31
34
  text += content.text
32
- # Citation is currently not working well in OpenAI Responses API
33
- if annotations := content.annotations:
34
- for annotation in annotations:
35
- text = content.text[annotation.start_index:annotation.end_index]
35
+ # Citation is unavailable in OpenAI Responses API
36
+ # if annotations := content.annotations:
37
+ # for annotation in annotations:
38
+ # citations.append(
39
+ # Citation(
40
+ # text=content.text[annotation.start_index:annotation.end_index],
41
+ # url=annotation.url
42
+ # )
43
+ # )
44
+ # Image Generation untestable due to organization verification requirement
45
+ # if output.type == "image_generation_call":
46
+ # image = output.result
36
47
 
37
- chat_response = ChatResponse(text=text, citations=citations)
48
+ chat_response = ChatResponse(text=text, image=image)
38
49
  output_tokens = count_openai_output_tokens(chat_response)
39
50
  return ChatResponse(
40
51
  text=text,
52
+ image=image,
53
+ citations=citations,
41
54
  input_tokens=input_tokens,
42
55
  output_tokens=output_tokens,
43
56
  )
@@ -54,6 +67,7 @@ class NonStreamOpenAIResponsesClient(OpenAIClient):
54
67
 
55
68
  response: Response = await self.client.responses.create(
56
69
  model=self.model,
70
+ reasoning=self.reasoning,
57
71
  input=serialize(self.messages),
58
72
  temperature=self.temperature,
59
73
  stream=False,
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import re
3
- from typing import AsyncGenerator
3
+ from pprint import pprint
4
+ from typing import AsyncGenerator, Optional
4
5
 
5
6
  import httpx
6
7
  import openai
@@ -11,16 +12,30 @@ from openai.types.responses import ResponseStreamEvent
11
12
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
12
13
  count_openai_output_tokens
13
14
  from llm_bridge.client.model_client.openai_client import OpenAIClient
14
- from llm_bridge.type.chat_response import ChatResponse
15
+ from llm_bridge.type.chat_response import ChatResponse, Citation
15
16
  from llm_bridge.type.serializer import serialize
16
17
 
17
18
 
18
- def process_delta(event: ResponseStreamEvent) -> str:
19
- if event.type != "response.output_text.delta":
20
- return ""
19
+ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
20
+ text: str = ""
21
+ image: Optional[str] = None
22
+ citations: list[Citation] = []
21
23
 
22
- content_delta = event.delta
23
- return content_delta
24
+ if event.type == "response.output_text.delta":
25
+ text = event.delta
26
+ # Citation is unavailable in OpenAI Responses API
27
+ if event.type == "response.output_text.annotation.added":
28
+ pass
29
+ # Image Generation untestable due to organization verification requirement
30
+ # if event.type == "response.image_generation_call.partial_image":
31
+ # image = event.partial_image_b64
32
+
33
+ chat_response = ChatResponse(
34
+ text=text,
35
+ image=image,
36
+ citations=citations,
37
+ )
38
+ return chat_response
24
39
 
25
40
 
26
41
  async def generate_chunk(
@@ -29,11 +44,12 @@ async def generate_chunk(
29
44
  ) -> AsyncGenerator[ChatResponse, None]:
30
45
  try:
31
46
  async for event in stream:
32
- content_delta = process_delta(event)
33
- chat_response = ChatResponse(text=content_delta)
47
+ chat_response = process_delta(event)
34
48
  output_tokens = count_openai_output_tokens(chat_response)
35
49
  yield ChatResponse(
36
- text=content_delta,
50
+ text=chat_response.text,
51
+ image=chat_response.image,
52
+ citations=chat_response.citations,
37
53
  input_tokens=input_tokens,
38
54
  output_tokens=output_tokens,
39
55
  )
@@ -53,6 +69,7 @@ class StreamOpenAIResponsesClient(OpenAIClient):
53
69
 
54
70
  stream: AsyncStream[ResponseStreamEvent] = await self.client.responses.create(
55
71
  model=self.model,
72
+ reasoning=self.reasoning,
56
73
  input=serialize(self.messages),
57
74
  temperature=self.temperature,
58
75
  stream=True,
@@ -1,6 +1,7 @@
1
1
  from typing import AsyncGenerator, Iterable
2
2
 
3
3
  import openai.lib.azure
4
+ from openai.types import Reasoning
4
5
  from openai.types.responses import ToolParam
5
6
 
6
7
  from llm_bridge.client.chat_client import ChatClient
@@ -18,6 +19,7 @@ class OpenAIClient(ChatClient):
18
19
  api_type: str,
19
20
  client: openai.AsyncOpenAI | openai.lib.azure.AsyncAzureOpenAI,
20
21
  tools: Iterable[ToolParam],
22
+ reasoning: Reasoning,
21
23
  ):
22
24
  self.model = model
23
25
  self.messages = messages
@@ -25,6 +27,7 @@ class OpenAIClient(ChatClient):
25
27
  self.api_type = api_type
26
28
  self.client = client
27
29
  self.tools = tools
30
+ self.reasoning = reasoning
28
31
 
29
32
  async def generate_non_stream_response(self) -> ChatResponse:
30
33
  raise NotImplementedError
@@ -1,5 +1,6 @@
1
1
  import anthropic
2
- from anthropic.types import ThinkingConfigEnabledParam
2
+ from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
3
+ from anthropic.types.beta import BetaCodeExecutionTool20250825Param, BetaWebSearchTool20250305Param
3
4
 
4
5
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
5
6
  from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
@@ -39,11 +40,18 @@ async def create_claude_client(
39
40
  budget_tokens=16000
40
41
  )
41
42
  temperature = 1
42
- betas = ["output-128k-2025-02-19"]
43
- tools = [{
44
- "type": "web_search_20250305",
45
- "name": "web_search",
46
- }]
43
+ betas: list[AnthropicBetaParam] = ["output-128k-2025-02-19", "code-execution-2025-08-25"]
44
+ tools = [
45
+ BetaWebSearchTool20250305Param(
46
+ type="web_search_20250305",
47
+ name="web_search",
48
+ ),
49
+ # Code Execution is unavailable in Claude
50
+ # BetaCodeExecutionTool20250825Param(
51
+ # type="code_execution_20250825",
52
+ # name="code_execution",
53
+ # )
54
+ ]
47
55
 
48
56
  if stream:
49
57
  return StreamClaudeClient(
@@ -38,6 +38,11 @@ async def create_gemini_client(
38
38
  url_context=types.UrlContext()
39
39
  )
40
40
  )
41
+ tools.append(
42
+ types.Tool(
43
+ code_execution=types.ToolCodeExecution()
44
+ )
45
+ )
41
46
  if "image" not in model:
42
47
  thinking_config = types.ThinkingConfig(include_thoughts=True)
43
48
  if "image" in model:
@@ -2,7 +2,10 @@ import re
2
2
 
3
3
  import openai
4
4
  from fastapi import HTTPException
5
+ from openai.types import Reasoning
5
6
  from openai.types.responses import WebSearchToolParam
7
+ from openai.types.responses.tool_param import CodeInterpreter, CodeInterpreterContainerCodeInterpreterToolAuto, \
8
+ ImageGeneration
6
9
 
7
10
  from llm_bridge.client.implementations.openai.non_stream_openai_client import NonStreamOpenAIClient
8
11
  from llm_bridge.client.implementations.openai.non_stream_openai_responses_client import NonStreamOpenAIResponsesClient
@@ -54,20 +57,31 @@ async def create_openai_client(
54
57
  else:
55
58
  openai_messages = await convert_messages_to_openai(messages)
56
59
 
57
- tools = [
58
- WebSearchToolParam(
59
- type="web_search_preview",
60
- search_context_size="high",
61
- )
62
- ]
60
+ tools = []
61
+ reasoning = None
63
62
 
64
- if re.match(r"^o\d", model):
65
- tools = None
66
- temperature = 1
67
- if re.match(r"gpt-5.*", model):
63
+ if model != "gpt-5-chat-latest":
64
+ tools.append(
65
+ WebSearchToolParam(
66
+ type="web_search",
67
+ search_context_size="high",
68
+ )
69
+ )
70
+ tools.append(
71
+ CodeInterpreter(
72
+ type="code_interpreter",
73
+ container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
+ )
75
+ )
76
+ if re.match(r"^o\d", model) or (re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest"):
68
77
  temperature = 1
69
- if model == "gpt-5-chat-latest":
70
- tools = None
78
+ if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
79
+ reasoning = Reasoning(effort="high")
80
+ tools.append(
81
+ ImageGeneration(
82
+ type="image_generation",
83
+ )
84
+ )
71
85
 
72
86
  if use_responses_api:
73
87
  if stream:
@@ -78,6 +92,7 @@ async def create_openai_client(
78
92
  api_type=api_type,
79
93
  client=client,
80
94
  tools=tools,
95
+ reasoning=reasoning,
81
96
  )
82
97
  else:
83
98
  return NonStreamOpenAIResponsesClient(
@@ -87,6 +102,7 @@ async def create_openai_client(
87
102
  api_type=api_type,
88
103
  client=client,
89
104
  tools=tools,
105
+ reasoning=reasoning,
90
106
  )
91
107
  else:
92
108
  if stream:
@@ -97,6 +113,7 @@ async def create_openai_client(
97
113
  api_type=api_type,
98
114
  client=client,
99
115
  tools=tools,
116
+ reasoning=reasoning,
100
117
  )
101
118
  else:
102
119
  return NonStreamOpenAIClient(
@@ -106,4 +123,5 @@ async def create_openai_client(
106
123
  api_type=api_type,
107
124
  client=client,
108
125
  tools=tools,
126
+ reasoning=reasoning,
109
127
  )
@@ -8,10 +8,20 @@ class Citation:
8
8
  indices: list[int]
9
9
 
10
10
 
11
+ # TODO: adapt to different Citation formats
12
+ # @dataclass
13
+ # class Citation:
14
+ # text: str
15
+ # indices: Optional[list[int]] = None
16
+ # url: Optional[str] = None
17
+
18
+
11
19
  @dataclass
12
20
  class ChatResponse:
13
21
  text: Optional[str] = None
14
22
  thought: Optional[str] = None
23
+ code: Optional[str] = None
24
+ code_output: Optional[str] = None
15
25
  image: Optional[str] = None
16
26
  display: Optional[str] = None
17
27
  citations: Optional[list[Citation]] = None
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.8.1"
7
+ version = "1.9.0"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
@@ -21,10 +21,10 @@ dependencies = [
21
21
  "fastapi",
22
22
  "httpx",
23
23
  "tenacity",
24
- "openai==1.99.6",
24
+ "openai==1.106.1",
25
25
  "tiktoken==0.11.0",
26
26
  "google-genai==1.28.0",
27
- "anthropic==0.62.0",
27
+ "anthropic==0.66.0",
28
28
  "PyMuPDF",
29
29
  "docxlatex>=1.1.1",
30
30
  "openpyxl",
File without changes
File without changes
File without changes