LLM-Bridge 1.9.0a1__tar.gz → 1.10.0a1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1/LLM_Bridge.egg-info}/PKG-INFO +1 -1
- {llm_bridge-1.9.0a1/LLM_Bridge.egg-info → llm_bridge-1.10.0a1}/PKG-INFO +1 -1
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +5 -4
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +4 -4
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/openai_token_couter.py +1 -1
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +4 -4
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/chat_response.py +1 -1
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/pyproject.toml +1 -1
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/LICENSE +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/LLM_Bridge.egg-info/requires.txt +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/LLM_Bridge.egg-info/top_level.txt +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/MANIFEST.in +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/README.md +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/chat_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/printing_status.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/model_client/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/model_client/claude_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/model_client/gemini_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/model_client/openai_client.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/file_fetch.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/model_prices.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/resources/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/resources/model_prices.json +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/message.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/model_message/__init__.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/model_message/claude_message.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/model_message/gemini_message.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/model_message/openai_message.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/serializer.py +0 -0
- {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/setup.cfg +0 -0
|
@@ -22,7 +22,7 @@ class GeminiResponseHandler:
|
|
|
22
22
|
thought: str = ""
|
|
23
23
|
code: str = ""
|
|
24
24
|
code_output: str = ""
|
|
25
|
-
|
|
25
|
+
files: list[str] = []
|
|
26
26
|
display: Optional[str] = None
|
|
27
27
|
citations: list[Citation] = extract_citations(response)
|
|
28
28
|
input_tokens, stage_output_tokens = await count_gemini_tokens(response)
|
|
@@ -46,9 +46,10 @@ class GeminiResponseHandler:
|
|
|
46
46
|
# Code Output
|
|
47
47
|
if part.code_execution_result is not None:
|
|
48
48
|
code_output += part.code_execution_result.output
|
|
49
|
-
#
|
|
49
|
+
# File
|
|
50
50
|
if part.inline_data is not None:
|
|
51
|
-
|
|
51
|
+
file = base64.b64encode(part.inline_data.data).decode('utf-8')
|
|
52
|
+
files.append(file)
|
|
52
53
|
|
|
53
54
|
# Grounding Sources
|
|
54
55
|
if candidates := response.candidates:
|
|
@@ -74,7 +75,7 @@ class GeminiResponseHandler:
|
|
|
74
75
|
thought=thought,
|
|
75
76
|
code=code,
|
|
76
77
|
code_output=code_output,
|
|
77
|
-
|
|
78
|
+
files=files,
|
|
78
79
|
display=display,
|
|
79
80
|
citations=citations,
|
|
80
81
|
input_tokens=input_tokens,
|
|
@@ -24,7 +24,7 @@ def process_openai_responses_non_stream_response(
|
|
|
24
24
|
output_list = response.output
|
|
25
25
|
|
|
26
26
|
text: str = ""
|
|
27
|
-
|
|
27
|
+
files: list[str] = []
|
|
28
28
|
citations: list[Citation] = []
|
|
29
29
|
|
|
30
30
|
for output in output_list:
|
|
@@ -43,13 +43,13 @@ def process_openai_responses_non_stream_response(
|
|
|
43
43
|
# )
|
|
44
44
|
# Image Generation untestable due to organization verification requirement
|
|
45
45
|
# if output.type == "image_generation_call":
|
|
46
|
-
#
|
|
46
|
+
# files.append(output.result)
|
|
47
47
|
|
|
48
|
-
chat_response = ChatResponse(text=text,
|
|
48
|
+
chat_response = ChatResponse(text=text, files=files)
|
|
49
49
|
output_tokens = count_openai_output_tokens(chat_response)
|
|
50
50
|
return ChatResponse(
|
|
51
51
|
text=text,
|
|
52
|
-
|
|
52
|
+
files=files,
|
|
53
53
|
citations=citations,
|
|
54
54
|
input_tokens=input_tokens,
|
|
55
55
|
output_tokens=output_tokens,
|
|
@@ -35,7 +35,7 @@ def count_openai_responses_input_tokens(messages: list[OpenAIResponsesMessage])
|
|
|
35
35
|
|
|
36
36
|
def count_openai_output_tokens(chat_response: ChatResponse) -> int:
|
|
37
37
|
text = chat_response.text
|
|
38
|
-
file_count =
|
|
38
|
+
file_count = len(chat_response.files)
|
|
39
39
|
|
|
40
40
|
return num_tokens_from_text(text) + file_count * 1000
|
|
41
41
|
|
|
@@ -18,7 +18,7 @@ from llm_bridge.type.serializer import serialize
|
|
|
18
18
|
|
|
19
19
|
def process_delta(event: ResponseStreamEvent) -> ChatResponse:
|
|
20
20
|
text: str = ""
|
|
21
|
-
|
|
21
|
+
files: list[str] = []
|
|
22
22
|
citations: list[Citation] = []
|
|
23
23
|
|
|
24
24
|
if event.type == "response.output_text.delta":
|
|
@@ -28,11 +28,11 @@ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
|
|
|
28
28
|
pass
|
|
29
29
|
# Image Generation untestable due to organization verification requirement
|
|
30
30
|
# if event.type == "response.image_generation_call.partial_image":
|
|
31
|
-
#
|
|
31
|
+
# files.append(event.partial_image_b64)
|
|
32
32
|
|
|
33
33
|
chat_response = ChatResponse(
|
|
34
34
|
text=text,
|
|
35
|
-
|
|
35
|
+
files=files,
|
|
36
36
|
citations=citations,
|
|
37
37
|
)
|
|
38
38
|
return chat_response
|
|
@@ -48,7 +48,7 @@ async def generate_chunk(
|
|
|
48
48
|
output_tokens = count_openai_output_tokens(chat_response)
|
|
49
49
|
yield ChatResponse(
|
|
50
50
|
text=chat_response.text,
|
|
51
|
-
|
|
51
|
+
files=chat_response.files,
|
|
52
52
|
citations=chat_response.citations,
|
|
53
53
|
input_tokens=input_tokens,
|
|
54
54
|
output_tokens=output_tokens,
|
|
@@ -22,7 +22,7 @@ class ChatResponse:
|
|
|
22
22
|
thought: Optional[str] = None
|
|
23
23
|
code: Optional[str] = None
|
|
24
24
|
code_output: Optional[str] = None
|
|
25
|
-
|
|
25
|
+
files: Optional[list[str]] = None
|
|
26
26
|
display: Optional[str] = None
|
|
27
27
|
citations: Optional[list[Citation]] = None
|
|
28
28
|
error: Optional[str] = None
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/claude/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/openai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/client/implementations/printing_status.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/chat_client_factory.py
RENAMED
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/chat_message_converter.py
RENAMED
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/chat_generate/media_processor.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/document_processor.py
RENAMED
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/logic/message_preprocess/file_type_checker.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.9.0a1 → llm_bridge-1.10.0a1}/llm_bridge/type/model_message/openai_responses_message.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|