LLM-Bridge 1.9.0a0__tar.gz → 1.10.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0/LLM_Bridge.egg-info}/PKG-INFO +9 -3
  2. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/LLM_Bridge.egg-info/requires.txt +2 -2
  3. {llm_bridge-1.9.0a0/LLM_Bridge.egg-info → llm_bridge-1.10.0}/PKG-INFO +9 -3
  4. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/README.md +6 -0
  5. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +22 -14
  6. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +13 -7
  7. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/openai/openai_token_couter.py +1 -1
  8. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +16 -3
  9. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/chat_response.py +8 -1
  10. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/pyproject.toml +3 -3
  11. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/LICENSE +0 -0
  12. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  13. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  14. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/LLM_Bridge.egg-info/top_level.txt +0 -0
  15. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/MANIFEST.in +0 -0
  16. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/__init__.py +0 -0
  17. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/__init__.py +0 -0
  18. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/chat_client.py +0 -0
  19. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/__init__.py +0 -0
  20. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  21. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -0
  22. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  23. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  24. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  25. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  26. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  27. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  28. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  29. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  30. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  31. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  46. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/resources/model_prices.json +0 -0
  60. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/__init__.py +0 -0
  61. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.9.0a0 → llm_bridge-1.10.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.9.0a0
3
+ Version: 1.10.0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.99.6
16
+ Requires-Dist: openai==1.106.1
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.28.0
19
- Requires-Dist: anthropic==0.62.0
19
+ Requires-Dist: anthropic==0.66.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -58,6 +58,12 @@ The features listed represent the maximum capabilities of each API type supporte
58
58
  | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
59
  | Grok | Text, Image | | Text |
60
60
 
61
+ #### Planned Features
62
+
63
+ - OpenAI: Web Search: Citations, Image Output
64
+ - Gemini: Code Execution: Code, Code Output
65
+ - Claude: Code Execution, File Output
66
+
61
67
  ## Installation
62
68
 
63
69
  ```bash
@@ -1,10 +1,10 @@
1
1
  fastapi
2
2
  httpx
3
3
  tenacity
4
- openai==1.99.6
4
+ openai==1.106.1
5
5
  tiktoken==0.11.0
6
6
  google-genai==1.28.0
7
- anthropic==0.62.0
7
+ anthropic==0.66.0
8
8
  PyMuPDF
9
9
  docxlatex>=1.1.1
10
10
  openpyxl
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.9.0a0
3
+ Version: 1.10.0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,10 +13,10 @@ License-File: LICENSE
13
13
  Requires-Dist: fastapi
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: tenacity
16
- Requires-Dist: openai==1.99.6
16
+ Requires-Dist: openai==1.106.1
17
17
  Requires-Dist: tiktoken==0.11.0
18
18
  Requires-Dist: google-genai==1.28.0
19
- Requires-Dist: anthropic==0.62.0
19
+ Requires-Dist: anthropic==0.66.0
20
20
  Requires-Dist: PyMuPDF
21
21
  Requires-Dist: docxlatex>=1.1.1
22
22
  Requires-Dist: openpyxl
@@ -58,6 +58,12 @@ The features listed represent the maximum capabilities of each API type supporte
58
58
  | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
59
  | Grok | Text, Image | | Text |
60
60
 
61
+ #### Planned Features
62
+
63
+ - OpenAI: Web Search: Citations, Image Output
64
+ - Gemini: Code Execution: Code, Code Output
65
+ - Claude: Code Execution, File Output
66
+
61
67
  ## Installation
62
68
 
63
69
  ```bash
@@ -28,6 +28,12 @@ The features listed represent the maximum capabilities of each API type supporte
28
28
  | Claude | Text, Image, PDF | Thinking, Web Search | Text |
29
29
  | Grok | Text, Image | | Text |
30
30
 
31
+ #### Planned Features
32
+
33
+ - OpenAI: Web Search: Citations, Image Output
34
+ - Gemini: Code Execution: Code, Code Output
35
+ - Claude: Code Execution, File Output
36
+
31
37
  ## Installation
32
38
 
33
39
  ```bash
@@ -1,11 +1,12 @@
1
1
  import base64
2
+ import mimetypes
2
3
  from typing import Optional
3
4
 
4
5
  from google.genai import types
5
6
 
6
7
  from llm_bridge.client.implementations.gemini.gemini_token_counter import count_gemini_tokens
7
8
  from llm_bridge.client.implementations.printing_status import PrintingStatus
8
- from llm_bridge.type.chat_response import Citation, ChatResponse
9
+ from llm_bridge.type.chat_response import Citation, ChatResponse, File
9
10
 
10
11
 
11
12
  class GeminiResponseHandler:
@@ -18,13 +19,13 @@ class GeminiResponseHandler:
18
19
  self,
19
20
  response: types.GenerateContentResponse,
20
21
  ) -> ChatResponse:
21
- text = ""
22
- thought = ""
23
- code = ""
24
- code_output = ""
25
- display = None
26
- image_base64 = None
27
- citations = extract_citations(response)
22
+ text: str = ""
23
+ thought: str = ""
24
+ code: str = ""
25
+ code_output: str = ""
26
+ files: list[File] = []
27
+ display: Optional[str] = None
28
+ citations: list[Citation] = extract_citations(response)
28
29
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
29
30
 
30
31
  printing_status = None
@@ -40,15 +41,22 @@ class GeminiResponseHandler:
40
41
  elif not part.thought:
41
42
  printing_status = PrintingStatus.Response
42
43
  text += part.text
43
- # Code (Causing Error)
44
- # if part.executable_code is not None:
45
- # code += part.executable_code.code
44
+ # Code
45
+ if part.executable_code is not None:
46
+ code += part.executable_code.code
46
47
  # Code Output
47
48
  if part.code_execution_result is not None:
48
49
  code_output += part.code_execution_result.output
49
- # Image
50
+ # File
50
51
  if part.inline_data is not None:
51
- image_base64 = base64.b64encode(part.inline_data.data).decode('utf-8')
52
+ mime_type = part.inline_data.mime_type
53
+ extension = mimetypes.guess_extension(mime_type) or ""
54
+ file = File(
55
+ name=f"generated_file{extension}",
56
+ data=base64.b64encode(part.inline_data.data).decode('utf-8'),
57
+ type=mime_type,
58
+ )
59
+ files.append(file)
52
60
 
53
61
  # Grounding Sources
54
62
  if candidates := response.candidates:
@@ -74,7 +82,7 @@ class GeminiResponseHandler:
74
82
  thought=thought,
75
83
  code=code,
76
84
  code_output=code_output,
77
- image=image_base64,
85
+ files=files,
78
86
  display=display,
79
87
  citations=citations,
80
88
  input_tokens=input_tokens,
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import re
3
3
  from pprint import pprint
4
+ from typing import Optional
4
5
 
5
6
  import httpx
6
7
  import openai
@@ -11,7 +12,7 @@ from openai.types.responses import WebSearchToolParam, Response
11
12
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
12
13
  count_openai_output_tokens
13
14
  from llm_bridge.client.model_client.openai_client import OpenAIClient
14
- from llm_bridge.type.chat_response import ChatResponse, Citation
15
+ from llm_bridge.type.chat_response import ChatResponse, Citation, File
15
16
  from llm_bridge.type.serializer import serialize
16
17
 
17
18
 
@@ -22,8 +23,8 @@ def process_openai_responses_non_stream_response(
22
23
 
23
24
  output_list = response.output
24
25
 
25
- text = ""
26
- image = None
26
+ text: str = ""
27
+ files: list[File] = []
27
28
  citations: list[Citation] = []
28
29
 
29
30
  for output in output_list:
@@ -40,15 +41,20 @@ def process_openai_responses_non_stream_response(
40
41
  # url=annotation.url
41
42
  # )
42
43
  # )
43
- # Unable to test due to organization verification requirement
44
+ # Image Generation untestable due to organization verification requirement
44
45
  # if output.type == "image_generation_call":
45
- # image = output.result
46
+ # file = File(
47
+ # name="generated_image.png",
48
+ # data=output.result,
49
+ # type="image/png",
50
+ # )
51
+ # files.append(file)
46
52
 
47
- chat_response = ChatResponse(text=text, image=image)
53
+ chat_response = ChatResponse(text=text, files=files)
48
54
  output_tokens = count_openai_output_tokens(chat_response)
49
55
  return ChatResponse(
50
56
  text=text,
51
- image=image,
57
+ files=files,
52
58
  citations=citations,
53
59
  input_tokens=input_tokens,
54
60
  output_tokens=output_tokens,
@@ -35,7 +35,7 @@ def count_openai_responses_input_tokens(messages: list[OpenAIResponsesMessage])
35
35
 
36
36
  def count_openai_output_tokens(chat_response: ChatResponse) -> int:
37
37
  text = chat_response.text
38
- file_count = 1 if chat_response.image else 0
38
+ file_count = len(chat_response.files)
39
39
 
40
40
  return num_tokens_from_text(text) + file_count * 1000
41
41
 
@@ -1,7 +1,7 @@
1
1
  import logging
2
2
  import re
3
3
  from pprint import pprint
4
- from typing import AsyncGenerator
4
+ from typing import AsyncGenerator, Optional
5
5
 
6
6
  import httpx
7
7
  import openai
@@ -12,12 +12,13 @@ from openai.types.responses import ResponseStreamEvent
12
12
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
13
13
  count_openai_output_tokens
14
14
  from llm_bridge.client.model_client.openai_client import OpenAIClient
15
- from llm_bridge.type.chat_response import ChatResponse, Citation
15
+ from llm_bridge.type.chat_response import ChatResponse, Citation, File
16
16
  from llm_bridge.type.serializer import serialize
17
17
 
18
18
 
19
19
  def process_delta(event: ResponseStreamEvent) -> ChatResponse:
20
- text = ""
20
+ text: str = ""
21
+ files: list[File] = []
21
22
  citations: list[Citation] = []
22
23
 
23
24
  if event.type == "response.output_text.delta":
@@ -25,9 +26,19 @@ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
25
26
  # Citation is unavailable in OpenAI Responses API
26
27
  if event.type == "response.output_text.annotation.added":
27
28
  pass
29
+ # Image Generation untestable due to organization verification requirement
30
+ # if event.type == "response.image_generation_call.partial_image":
31
+ # file = File(
32
+ # name="generated_image.png",
33
+ # data=event.partial_image_b64,
34
+ # type="image/png",
35
+ # )
36
+ # files.append(file)
28
37
 
29
38
  chat_response = ChatResponse(
30
39
  text=text,
40
+ files=files,
41
+ citations=citations,
31
42
  )
32
43
  return chat_response
33
44
 
@@ -42,6 +53,8 @@ async def generate_chunk(
42
53
  output_tokens = count_openai_output_tokens(chat_response)
43
54
  yield ChatResponse(
44
55
  text=chat_response.text,
56
+ files=chat_response.files,
57
+ citations=chat_response.citations,
45
58
  input_tokens=input_tokens,
46
59
  output_tokens=output_tokens,
47
60
  )
@@ -2,6 +2,13 @@ from dataclasses import dataclass
2
2
  from typing import Optional
3
3
 
4
4
 
5
+ @dataclass
6
+ class File:
7
+ name: str
8
+ data: str
9
+ type: str
10
+
11
+
5
12
  @dataclass
6
13
  class Citation:
7
14
  text: str
@@ -22,7 +29,7 @@ class ChatResponse:
22
29
  thought: Optional[str] = None
23
30
  code: Optional[str] = None
24
31
  code_output: Optional[str] = None
25
- image: Optional[str] = None
32
+ files: Optional[list[File]] = None
26
33
  display: Optional[str] = None
27
34
  citations: Optional[list[Citation]] = None
28
35
  error: Optional[str] = None
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.9.0-alpha.0"
7
+ version = "1.10.0"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
@@ -21,10 +21,10 @@ dependencies = [
21
21
  "fastapi",
22
22
  "httpx",
23
23
  "tenacity",
24
- "openai==1.99.6",
24
+ "openai==1.106.1",
25
25
  "tiktoken==0.11.0",
26
26
  "google-genai==1.28.0",
27
- "anthropic==0.62.0",
27
+ "anthropic==0.66.0",
28
28
  "PyMuPDF",
29
29
  "docxlatex>=1.1.1",
30
30
  "openpyxl",
File without changes
File without changes
File without changes