LLM-Bridge 1.15.0__tar.gz → 1.15.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/PKG-INFO +1 -1
  2. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/claude/claude_response_handler.py +84 -15
  3. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +1 -44
  4. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +2 -1
  5. llm_bridge-1.15.1/llm_bridge/logic/chat_generate/media_processor.py +23 -0
  6. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +10 -2
  7. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +2 -2
  8. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +1 -1
  9. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +1 -1
  10. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +1 -1
  11. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/chat_response.py +1 -1
  12. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/pyproject.toml +1 -1
  13. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/usage/main.py +8 -9
  14. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/uv.lock +1 -1
  15. llm_bridge-1.15.0/llm_bridge/logic/chat_generate/media_processor.py +0 -20
  16. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/.gitattributes +0 -0
  17. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/.github/workflows/python-publish.yml +0 -0
  18. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/.gitignore +0 -0
  19. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/LICENSE +0 -0
  20. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/MANIFEST.in +0 -0
  21. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/README.md +0 -0
  22. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/__init__.py +0 -0
  23. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/__init__.py +0 -0
  24. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/chat_client.py +0 -0
  25. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/__init__.py +0 -0
  26. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  27. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  28. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  29. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  30. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  31. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  32. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  33. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  34. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  35. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  36. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  37. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  38. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  39. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/implementations/printing_status.py +0 -0
  40. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/model_client/__init__.py +0 -0
  41. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/model_client/claude_client.py +0 -0
  42. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  43. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/client/model_client/openai_client.py +0 -0
  44. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/__init__.py +0 -0
  45. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  46. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  47. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  48. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  49. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  50. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  51. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  52. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  53. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/file_fetch.py +0 -0
  54. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  55. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  56. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  57. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  58. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  59. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/logic/model_prices.py +0 -0
  60. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/resources/__init__.py +0 -0
  61. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/resources/model_prices.json +0 -0
  62. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/__init__.py +0 -0
  63. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/message.py +0 -0
  64. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/model_message/__init__.py +0 -0
  65. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/model_message/claude_message.py +0 -0
  66. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  67. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/model_message/openai_message.py +0 -0
  68. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  69. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/llm_bridge/type/serializer.py +0 -0
  70. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/tests/__init__.py +0 -0
  71. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/tests/chat_client_factory_test.py +0 -0
  72. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/tests/message_preprocessor_test.py +0 -0
  73. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/usage/.env.example +0 -0
  74. {llm_bridge-1.15.0 → llm_bridge-1.15.1}/usage/workflow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.15.0
3
+ Version: 1.15.1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,20 +1,36 @@
1
1
  from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
+ from anthropic._response import AsyncBinaryAPIResponse
2
3
  from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
3
4
  BetaBashCodeExecutionToolResultBlock, \
4
5
  BetaTextEditorCodeExecutionToolResultBlock, BetaTextEditorCodeExecutionViewResultBlock, \
5
6
  BetaTextEditorCodeExecutionStrReplaceResultBlock, \
6
- BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock
7
+ BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock, \
8
+ BetaBashCodeExecutionOutputBlock, BetaMessage, FileMetadata
7
9
  from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
8
10
 
9
11
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
10
- from llm_bridge.type.chat_response import ChatResponse
11
-
12
+ from llm_bridge.logic.chat_generate.media_processor import bytes_to_base64
13
+ from llm_bridge.type.chat_response import ChatResponse, File
14
+
15
+
16
+ async def download_claude_file(client: AsyncAnthropic, file_id: str) -> File:
17
+ file_metadata: FileMetadata = await client.beta.files.retrieve_metadata(file_id)
18
+ file_content: AsyncBinaryAPIResponse = await client.beta.files.download(file_id)
19
+ data = await file_content.read()
20
+ return File(
21
+ name=file_metadata.filename,
22
+ data=bytes_to_base64(data),
23
+ type=file_metadata.mime_type,
24
+ )
12
25
 
13
- def process_content_block(content_block: ContentBlock) -> ChatResponse:
14
- text = ""
15
- thought = ""
16
- code = ""
17
- code_output = ""
26
+ async def process_content_block(
27
+ content_block: ContentBlock, client: AsyncAnthropic
28
+ ) -> ChatResponse:
29
+ text: str = ""
30
+ thought: str = ""
31
+ code: str = ""
32
+ code_output: str = ""
33
+ files: list[File] = []
18
34
 
19
35
  if content_block.type == "text":
20
36
  text_block: BetaTextBlock = content_block
@@ -31,23 +47,72 @@ def process_content_block(content_block: ContentBlock) -> ChatResponse:
31
47
  elif content_block.type == "bash_code_execution_tool_result":
32
48
  bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
33
49
  if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
34
- content: BetaBashCodeExecutionResultBlock = content_block.content
35
- code_output += content.stdout
50
+ result: BetaBashCodeExecutionResultBlock = content_block.content
51
+ code_output += result.stdout
52
+ outputs: list[BetaBashCodeExecutionOutputBlock] = result.content
53
+ file_ids = [output.file_id for output in outputs]
54
+ for file_id in file_ids:
55
+ file = await download_claude_file(client, file_id)
56
+ files.append(file)
36
57
 
37
58
  elif content_block.type == "text_editor_code_execution_tool_result":
38
59
  text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = content_block
39
60
  if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
40
- content: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
41
- code_output += content.content
61
+ result: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
62
+ code_output += result.content
42
63
  elif text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_str_replace_result":
43
- content: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
44
- code_output += content.lines
64
+ result: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
65
+ code_output += result.lines
66
+
67
+ return ChatResponse(
68
+ text=text,
69
+ thought=thought,
70
+ code=code,
71
+ code_output=code_output,
72
+ files=files,
73
+ )
74
+
75
+
76
+ async def process_claude_non_stream_response(
77
+ message: BetaMessage,
78
+ input_tokens: int,
79
+ client: AsyncAnthropic,
80
+ model: str,
81
+ ) -> ChatResponse:
82
+ text = ""
83
+ thought = ""
84
+ code = ""
85
+ code_output = ""
86
+ files: list[File] = []
45
87
 
88
+ for content_block in message.content:
89
+ content_block_chat_response = await process_content_block(content_block, client)
90
+ text += content_block_chat_response.text
91
+ thought += content_block_chat_response.thought
92
+ code += content_block_chat_response.code
93
+ code_output += content_block_chat_response.code_output
94
+ files.extend(content_block_chat_response.files)
95
+
96
+ chat_response = ChatResponse(
97
+ text=text,
98
+ thought=thought,
99
+ code=code,
100
+ code_output=code_output,
101
+ files=files,
102
+ )
103
+ output_tokens = await count_claude_output_tokens(
104
+ client=client,
105
+ model=model,
106
+ chat_response=chat_response,
107
+ )
46
108
  return ChatResponse(
47
109
  text=text,
48
110
  thought=thought,
49
111
  code=code,
50
112
  code_output=code_output,
113
+ files=files,
114
+ input_tokens=input_tokens,
115
+ output_tokens=output_tokens,
51
116
  )
52
117
 
53
118
 
@@ -61,6 +126,7 @@ async def process_claude_stream_response(
61
126
  thought = ""
62
127
  code = ""
63
128
  code_output = ""
129
+ files: list[File] = []
64
130
 
65
131
  if event.type == "content_block_delta":
66
132
  event_delta: BetaRawContentBlockDelta = event.delta
@@ -79,17 +145,19 @@ async def process_claude_stream_response(
79
145
 
80
146
  if event.type == "content_block_start":
81
147
  content_block: ContentBlock = event.content_block
82
- content_block_chat_response = process_content_block(content_block)
148
+ content_block_chat_response = await process_content_block(content_block, client)
83
149
  text += content_block_chat_response.text
84
150
  thought += content_block_chat_response.thought
85
151
  code += content_block_chat_response.code
86
152
  code_output += content_block_chat_response.code_output
153
+ files.extend(content_block_chat_response.files)
87
154
 
88
155
  chat_response = ChatResponse(
89
156
  text=text,
90
157
  thought=thought,
91
158
  code=code,
92
159
  code_output=code_output,
160
+ files=files,
93
161
  )
94
162
  output_tokens = await count_claude_output_tokens(
95
163
  client=client,
@@ -101,6 +169,7 @@ async def process_claude_stream_response(
101
169
  thought=thought,
102
170
  code=code,
103
171
  code_output=code_output,
172
+ files=files,
104
173
  input_tokens=input_tokens,
105
174
  output_tokens=output_tokens,
106
175
  )
@@ -2,57 +2,14 @@ import logging
2
2
  import re
3
3
 
4
4
  import httpx
5
- from anthropic import AsyncAnthropic
6
- from anthropic.types.beta import BetaMessage, BetaBashCodeExecutionToolResultBlock, BetaTextBlock, BetaThinkingBlock, \
7
- BetaServerToolUseBlock
8
5
  from fastapi import HTTPException
9
6
 
10
- from llm_bridge.client.implementations.claude.claude_response_handler import process_content_block
11
- from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
7
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_non_stream_response
12
8
  from llm_bridge.client.model_client.claude_client import ClaudeClient
13
9
  from llm_bridge.type.chat_response import ChatResponse
14
10
  from llm_bridge.type.serializer import serialize
15
11
 
16
12
 
17
- async def process_claude_non_stream_response(
18
- message: BetaMessage,
19
- input_tokens: int,
20
- client: AsyncAnthropic,
21
- model: str,
22
- ) -> ChatResponse:
23
- text = ""
24
- thought = ""
25
- code = ""
26
- code_output = ""
27
-
28
- for content_block in message.content:
29
- content_block_chat_response = process_content_block(content_block)
30
- text += content_block_chat_response.text
31
- thought += content_block_chat_response.thought
32
- code += content_block_chat_response.code
33
- code_output += content_block_chat_response.code_output
34
-
35
- chat_response = ChatResponse(
36
- text=text,
37
- thought=thought,
38
- code=code,
39
- code_output=code_output,
40
- )
41
- output_tokens = await count_claude_output_tokens(
42
- client=client,
43
- model=model,
44
- chat_response=chat_response,
45
- )
46
- return ChatResponse(
47
- text=text,
48
- thought=thought,
49
- code=code,
50
- code_output=code_output,
51
- input_tokens=input_tokens,
52
- output_tokens=output_tokens,
53
- )
54
-
55
-
56
13
  class NonStreamClaudeClient(ClaudeClient):
57
14
  async def generate_non_stream_response(self) -> ChatResponse:
58
15
  try:
@@ -7,6 +7,7 @@ from google.genai.types import Part
7
7
 
8
8
  from llm_bridge.client.implementations.gemini.gemini_token_counter import count_gemini_tokens
9
9
  from llm_bridge.client.implementations.printing_status import PrintingStatus
10
+ from llm_bridge.logic.chat_generate.media_processor import bytes_to_base64
10
11
  from llm_bridge.type.chat_response import ChatResponse, File
11
12
 
12
13
 
@@ -57,7 +58,7 @@ class GeminiResponseHandler:
57
58
  extension = mimetypes.guess_extension(mime_type) or ""
58
59
  file = File(
59
60
  name=f"generated_file{extension}",
60
- data=base64.b64encode(part.inline_data.data).decode('utf-8'),
61
+ data=bytes_to_base64(part.inline_data.data),
61
62
  type=mime_type,
62
63
  )
63
64
  files.append(file)
@@ -0,0 +1,23 @@
1
+ import base64
2
+
3
+ from llm_bridge.logic.file_fetch import fetch_file_data
4
+
5
+
6
+ def bytes_to_base64(data: bytes) -> str:
7
+ return base64.b64encode(data).decode('utf-8')
8
+
9
+
10
+ async def get_bytes_content_from_url(req_url: str) -> tuple[bytes, str]:
11
+ file_data, media_type = await fetch_file_data(req_url)
12
+ return file_data, media_type
13
+
14
+
15
+ async def get_base64_content_from_url(req_url: str) -> tuple[str, str]:
16
+ media_data, media_type = await get_bytes_content_from_url(req_url)
17
+ base64_media = bytes_to_base64(media_data)
18
+ return base64_media, media_type
19
+
20
+
21
+ async def get_openai_image_content_from_url(req_img_url: str) -> str:
22
+ base64_image, media_type = await get_base64_content_from_url(req_img_url)
23
+ return f"data:{media_type};base64,{base64_image}"
@@ -3,7 +3,8 @@ from typing import Any
3
3
  import anthropic
4
4
  from anthropic import Omit, transform_schema
5
5
  from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
6
- from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param
6
+ from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param, \
7
+ BetaToolBash20250124Param
7
8
 
8
9
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
9
10
  from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
@@ -61,7 +62,8 @@ async def create_claude_client(
61
62
  "context-1m-2025-08-07",
62
63
  "output-128k-2025-02-19",
63
64
  "code-execution-2025-08-25",
64
- "structured-outputs-2025-11-13"
65
+ "files-api-2025-04-14",
66
+ "structured-outputs-2025-11-13",
65
67
  ]
66
68
  tools: list[BetaToolUnionParam] = []
67
69
  tools.append(
@@ -71,6 +73,12 @@ async def create_claude_client(
71
73
  )
72
74
  )
73
75
  if code_execution:
76
+ tools.append(
77
+ BetaToolBash20250124Param(
78
+ type="bash_20250124",
79
+ name="bash",
80
+ )
81
+ )
74
82
  tools.append(
75
83
  BetaCodeExecutionTool20250825Param(
76
84
  type="code_execution_20250825",
@@ -25,7 +25,7 @@ async def convert_message_to_claude(message: Message) -> ClaudeMessage:
25
25
  file_url = content_item.data
26
26
  file_type, sub_type = await get_file_type(file_url)
27
27
  if file_type == "image":
28
- base64_image, media_type = await media_processor.get_encoded_content_from_url(file_url)
28
+ base64_image, media_type = await media_processor.get_base64_content_from_url(file_url)
29
29
  image_content = ImageBlockParam(
30
30
  type="image",
31
31
  source=Base64ImageSourceParam(
@@ -36,7 +36,7 @@ async def convert_message_to_claude(message: Message) -> ClaudeMessage:
36
36
  )
37
37
  claude_content.append(image_content)
38
38
  elif sub_type == "pdf":
39
- file_data, media_type = await media_processor.get_encoded_content_from_url(file_url)
39
+ file_data, media_type = await media_processor.get_base64_content_from_url(file_url)
40
40
  pdf_content = DocumentBlockParam(
41
41
  type="document",
42
42
  source=Base64PDFSourceParam(
@@ -23,7 +23,7 @@ async def convert_message_to_gemini(message: Message) -> GeminiMessage:
23
23
  file_url = content_item.data
24
24
  file_type, sub_type = await get_file_type(file_url)
25
25
  if sub_type == "pdf" or file_type in ("image", "video", "audio"):
26
- file_data, media_type = await media_processor.get_raw_content_from_url(file_url)
26
+ file_data, media_type = await media_processor.get_bytes_content_from_url(file_url)
27
27
  if media_type == 'video/webm':
28
28
  media_type = 'audio/webm'
29
29
  parts.append(types.Part.from_bytes(data=file_data, mime_type=media_type))
@@ -28,7 +28,7 @@ async def convert_message_to_openai(message: Message) -> OpenAIMessage:
28
28
  )
29
29
  content.append(image_content)
30
30
  elif file_type == "audio":
31
- encoded_string, _ = await media_processor.get_encoded_content_from_url(file_url)
31
+ encoded_string, _ = await media_processor.get_base64_content_from_url(file_url)
32
32
  audio_content = ChatCompletionContentPartInputAudioParam(
33
33
  type="input_audio",
34
34
  input_audio=InputAudio(data=encoded_string, format=sub_type)
@@ -30,7 +30,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
30
30
  )
31
31
  content.append(image_content)
32
32
  elif sub_type == "pdf":
33
- file_data, _ = await media_processor.get_encoded_content_from_url(file_url)
33
+ file_data, _ = await media_processor.get_base64_content_from_url(file_url)
34
34
  pdf_content = ResponseInputFileParam(
35
35
  type="input_file",
36
36
  filename=get_file_name(file_url),
@@ -5,7 +5,7 @@ from typing import Optional
5
5
  @dataclass
6
6
  class File:
7
7
  name: str
8
- data: str
8
+ data: str # Base64
9
9
  type: str
10
10
 
11
11
 
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.15.0"
7
+ version = "1.15.1"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -98,7 +98,7 @@ messages = [
98
98
  # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
99
99
 
100
100
  # Web Search
101
- Content(type=ContentType.Text, data="What's the weather in NYC today?"),
101
+ # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
102
102
 
103
103
  # Image Understanding
104
104
  # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
@@ -114,8 +114,7 @@ messages = [
114
114
  # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
115
115
 
116
116
  # File Output
117
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1758384216123-script.py"),
118
- # Content(type=ContentType.Text, data="Please implement a minimum example of Neural Network in `script.py`"),
117
+ Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
119
118
 
120
119
  # Structured Output
121
120
  # Content(type=ContentType.Text, data="Please generate a product."),
@@ -139,23 +138,23 @@ messages = [
139
138
  # model = "gpt-5-pro"
140
139
  # model = "gpt-5"
141
140
  # model = "gpt-4.1"
142
- model = "gemini-3-pro-preview"
141
+ # model = "gemini-3-pro-preview"
143
142
  # model = "gemini-3-pro-image-preview"
144
143
  # model = "gemini-3-flash-preview"
145
144
  # model = "grok-4-1-fast-reasoning"
146
- # model = "claude-sonnet-4-5"
145
+ model = "claude-sonnet-4-5"
147
146
  # model = "claude-opus-4-5"
148
- api_type = "Gemini-Vertex"
147
+ # api_type = "Gemini-Vertex"
149
148
  # api_type = "Gemini-Free"
150
149
  # api_type = "Gemini-Paid"
151
150
  # api_type = "OpenAI"
152
151
  # api_type = "OpenAI-Azure"
153
152
  # api_type = "OpenAI-GitHub"
154
- # api_type = "Claude"
153
+ api_type = "Claude"
155
154
  # api_type = "Grok"
156
155
  temperature = 0
157
- # stream = True
158
- stream = False
156
+ stream = True
157
+ # stream = False
159
158
  thought = True
160
159
  # thought = False
161
160
  code_execution = True
@@ -357,7 +357,7 @@ wheels = [
357
357
 
358
358
  [[package]]
359
359
  name = "llm-bridge"
360
- version = "1.15.0a0"
360
+ version = "1.15.1"
361
361
  source = { editable = "." }
362
362
  dependencies = [
363
363
  { name = "anthropic" },
@@ -1,20 +0,0 @@
1
- import base64
2
-
3
- from llm_bridge.logic.file_fetch import fetch_file_data
4
-
5
-
6
- async def get_raw_content_from_url(req_url: str) -> tuple[bytes, str]:
7
- file_data, media_type = await fetch_file_data(req_url)
8
- return file_data, media_type
9
-
10
-
11
- # Base64 Encoded
12
- async def get_encoded_content_from_url(req_url: str) -> tuple[str, str]:
13
- media_data, media_type = await get_raw_content_from_url(req_url)
14
- base64_media = base64.b64encode(media_data).decode('utf-8')
15
- return base64_media, media_type
16
-
17
-
18
- async def get_openai_image_content_from_url(req_img_url: str) -> str:
19
- base64_image, media_type = await get_encoded_content_from_url(req_img_url)
20
- return f"data:{media_type};base64,{base64_image}"
File without changes
File without changes
File without changes
File without changes
File without changes