LLM-Bridge 1.11.5__tar.gz → 1.11.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.11.5 → llm_bridge-1.11.6/LLM_Bridge.egg-info}/PKG-INFO +1 -1
  2. {llm_bridge-1.11.5/LLM_Bridge.egg-info → llm_bridge-1.11.6}/PKG-INFO +1 -1
  3. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +15 -6
  4. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +11 -4
  5. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +4 -1
  6. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/pyproject.toml +1 -1
  7. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/LICENSE +0 -0
  8. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  9. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  10. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/LLM_Bridge.egg-info/requires.txt +0 -0
  11. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/LLM_Bridge.egg-info/top_level.txt +0 -0
  12. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/MANIFEST.in +0 -0
  13. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/README.md +0 -0
  14. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/__init__.py +0 -0
  15. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/__init__.py +0 -0
  16. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/chat_client.py +0 -0
  17. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/__init__.py +0 -0
  18. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  19. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  20. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  21. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  22. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  23. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  24. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  25. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  26. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  27. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  28. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  29. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  30. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  31. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  46. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  47. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  48. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  49. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  50. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/file_fetch.py +0 -0
  51. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  52. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  53. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  54. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  55. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  56. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/logic/model_prices.py +0 -0
  57. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/resources/__init__.py +0 -0
  58. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/resources/model_prices.json +0 -0
  59. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/__init__.py +0 -0
  60. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/chat_response.py +0 -0
  61. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.11.5 → llm_bridge-1.11.6}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.11.5
3
+ Version: 1.11.6
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.11.5
3
+ Version: 1.11.6
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -7,7 +7,8 @@ import httpx
7
7
  import openai
8
8
  from fastapi import HTTPException
9
9
  from openai import APIStatusError
10
- from openai.types.responses import WebSearchToolParam, Response
10
+ from openai.types.responses import WebSearchToolParam, Response, ResponseOutputItem, ResponseOutputMessage, \
11
+ ResponseOutputText, ResponseReasoningItem
11
12
 
12
13
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
13
14
  count_openai_output_tokens
@@ -21,19 +22,22 @@ def process_openai_responses_non_stream_response(
21
22
  input_tokens: int,
22
23
  ) -> ChatResponse:
23
24
 
24
- output_list = response.output
25
+ output_list: list[ResponseOutputItem] = response.output
25
26
 
26
27
  text: str = ""
28
+ thought: str = ""
27
29
  files: list[File] = []
28
30
  citations: list[Citation] = []
29
31
 
30
32
  for output in output_list:
31
33
  if output.type == "message":
32
- for content in output.content:
34
+ output_message: ResponseOutputMessage = output
35
+ for content in output_message.content:
33
36
  if content.type == "output_text":
34
- text += content.text
37
+ output_text: ResponseOutputText = content
38
+ text += output_text.text
35
39
  # Citation is unavailable in OpenAI Responses API
36
- # if annotations := content.annotations:
40
+ # elif annotations := content.annotations:
37
41
  # for annotation in annotations:
38
42
  # citations.append(
39
43
  # Citation(
@@ -41,7 +45,11 @@ def process_openai_responses_non_stream_response(
41
45
  # url=annotation.url
42
46
  # )
43
47
  # )
44
- # Image Generation untestable due to organization verification requirement
48
+ elif output.type == "reasoning":
49
+ reasoning_item: ResponseReasoningItem = output
50
+ for summary_delta in reasoning_item.summary:
51
+ thought += summary_delta.text
52
+ # Image Generation to be tested
45
53
  # if output.type == "image_generation_call":
46
54
  # file = File(
47
55
  # name="generated_image.png",
@@ -54,6 +62,7 @@ def process_openai_responses_non_stream_response(
54
62
  output_tokens = count_openai_output_tokens(chat_response)
55
63
  return ChatResponse(
56
64
  text=text,
65
+ thought=thought,
57
66
  files=files,
58
67
  citations=citations,
59
68
  input_tokens=input_tokens,
@@ -7,7 +7,7 @@ import httpx
7
7
  import openai
8
8
  from fastapi import HTTPException
9
9
  from openai import APIStatusError, AsyncStream
10
- from openai.types.responses import ResponseStreamEvent
10
+ from openai.types.responses import ResponseStreamEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent
11
11
 
12
12
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
13
13
  count_openai_output_tokens
@@ -18,15 +18,20 @@ from llm_bridge.type.serializer import serialize
18
18
 
19
19
  def process_delta(event: ResponseStreamEvent) -> ChatResponse:
20
20
  text: str = ""
21
+ thought: str = ""
21
22
  files: list[File] = []
22
23
  citations: list[Citation] = []
23
24
 
24
25
  if event.type == "response.output_text.delta":
25
- text = event.delta
26
+ text_delta_event: ResponseTextDeltaEvent = event
27
+ text = text_delta_event.delta
28
+ elif event.type == "response.reasoning_summary_text.delta":
29
+ reasoning_summary_text_delta_event: ResponseReasoningSummaryTextDeltaEvent = event
30
+ thought = reasoning_summary_text_delta_event.delta
26
31
  # Citation is unavailable in OpenAI Responses API
27
- if event.type == "response.output_text.annotation.added":
32
+ elif event.type == "response.output_text.annotation.added":
28
33
  pass
29
- # Image Generation untestable due to organization verification requirement
34
+ # Image Generation to be tested
30
35
  # if event.type == "response.image_generation_call.partial_image":
31
36
  # file = File(
32
37
  # name="generated_image.png",
@@ -37,6 +42,7 @@ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
37
42
 
38
43
  chat_response = ChatResponse(
39
44
  text=text,
45
+ thought=thought,
40
46
  files=files,
41
47
  citations=citations,
42
48
  )
@@ -53,6 +59,7 @@ async def generate_chunk(
53
59
  output_tokens = count_openai_output_tokens(chat_response)
54
60
  yield ChatResponse(
55
61
  text=chat_response.text,
62
+ thought=chat_response.thought,
56
63
  files=chat_response.files,
57
64
  citations=chat_response.citations,
58
65
  input_tokens=input_tokens,
@@ -77,7 +77,10 @@ async def create_openai_client(
77
77
  if re.match(r"^o\d", model) or (re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest"):
78
78
  temperature = 1
79
79
  if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
80
- reasoning = Reasoning(effort="high")
80
+ reasoning = Reasoning(
81
+ effort="high",
82
+ summary="auto",
83
+ )
81
84
  tools.append(
82
85
  ImageGeneration(
83
86
  type="image_generation",
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.11.5"
7
+ version = "1.11.6"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
File without changes
File without changes
File without changes
File without changes