LLM-Bridge 1.11.3__tar.gz → 1.11.3b0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0/LLM_Bridge.egg-info}/PKG-INFO +1 -1
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/LLM_Bridge.egg-info/SOURCES.txt +1 -1
- {llm_bridge-1.11.3/LLM_Bridge.egg-info → llm_bridge-1.11.3b0}/PKG-INFO +1 -1
- llm_bridge-1.11.3b0/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +70 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +21 -6
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/claude/stream_claude_client.py +3 -2
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/pyproject.toml +1 -1
- llm_bridge-1.11.3/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -106
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/LICENSE +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/LLM_Bridge.egg-info/requires.txt +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/LLM_Bridge.egg-info/top_level.txt +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/MANIFEST.in +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/README.md +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/chat_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/printing_status.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/model_client/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/model_client/claude_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/model_client/gemini_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/model_client/openai_client.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/file_fetch.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/model_prices.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/resources/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/resources/model_prices.json +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/chat_response.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/message.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/model_message/__init__.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/model_message/claude_message.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/model_message/gemini_message.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/model_message/openai_message.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/serializer.py +0 -0
- {llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/setup.cfg +0 -0
|
@@ -13,7 +13,7 @@ llm_bridge/client/chat_client.py
|
|
|
13
13
|
llm_bridge/client/implementations/__init__.py
|
|
14
14
|
llm_bridge/client/implementations/printing_status.py
|
|
15
15
|
llm_bridge/client/implementations/claude/__init__.py
|
|
16
|
-
llm_bridge/client/implementations/claude/
|
|
16
|
+
llm_bridge/client/implementations/claude/claude_stream_response_handler.py
|
|
17
17
|
llm_bridge/client/implementations/claude/claude_token_counter.py
|
|
18
18
|
llm_bridge/client/implementations/claude/non_stream_claude_client.py
|
|
19
19
|
llm_bridge/client/implementations/claude/stream_claude_client.py
|
llm_bridge-1.11.3b0/llm_bridge/client/implementations/claude/claude_stream_response_handler.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
from anthropic import BetaMessageStreamEvent, AsyncAnthropic
|
|
2
|
+
from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
|
|
3
|
+
BetaBashCodeExecutionToolResultBlock, \
|
|
4
|
+
BetaTextEditorCodeExecutionToolResultBlock
|
|
5
|
+
from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
|
|
6
|
+
|
|
7
|
+
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
8
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ClaudeStreamResponseHandler:
|
|
12
|
+
async def process_claude_stream_response(
|
|
13
|
+
self,
|
|
14
|
+
event: BetaMessageStreamEvent,
|
|
15
|
+
input_tokens: int,
|
|
16
|
+
client: AsyncAnthropic,
|
|
17
|
+
model: str,
|
|
18
|
+
) -> ChatResponse:
|
|
19
|
+
text = ""
|
|
20
|
+
thought = ""
|
|
21
|
+
code = ""
|
|
22
|
+
code_output = ""
|
|
23
|
+
|
|
24
|
+
if event.type == "content_block_delta":
|
|
25
|
+
event_delta: BetaRawContentBlockDelta = event.delta
|
|
26
|
+
|
|
27
|
+
if event_delta.type == "text_delta":
|
|
28
|
+
text_delta: BetaTextDelta = event_delta
|
|
29
|
+
text += text_delta.text
|
|
30
|
+
|
|
31
|
+
elif event_delta.type == "thinking_delta":
|
|
32
|
+
thinking_delta: BetaThinkingDelta = event_delta
|
|
33
|
+
thought += thinking_delta.thinking
|
|
34
|
+
|
|
35
|
+
elif event_delta.type == "input_json_delta":
|
|
36
|
+
input_json_delta: BetaInputJSONDelta = event_delta
|
|
37
|
+
code += input_json_delta.partial_json
|
|
38
|
+
|
|
39
|
+
if event.type == "content_block_start":
|
|
40
|
+
event_content_block: ContentBlock = event.content_block
|
|
41
|
+
|
|
42
|
+
if event_content_block.type == "bash_code_execution_tool_result":
|
|
43
|
+
bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = event_content_block
|
|
44
|
+
if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
|
|
45
|
+
code_output += event_content_block.content.stdout
|
|
46
|
+
|
|
47
|
+
elif event_content_block.type == "text_editor_code_execution_tool_result":
|
|
48
|
+
text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = event_content_block
|
|
49
|
+
if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
|
|
50
|
+
code_output += event_content_block.content.content
|
|
51
|
+
|
|
52
|
+
chat_response = ChatResponse(
|
|
53
|
+
text=text,
|
|
54
|
+
thought=thought,
|
|
55
|
+
code=code,
|
|
56
|
+
code_output=code_output,
|
|
57
|
+
)
|
|
58
|
+
output_tokens = await count_claude_output_tokens(
|
|
59
|
+
client=client,
|
|
60
|
+
model=model,
|
|
61
|
+
chat_response=chat_response,
|
|
62
|
+
)
|
|
63
|
+
return ChatResponse(
|
|
64
|
+
text=text,
|
|
65
|
+
thought=thought,
|
|
66
|
+
code=code,
|
|
67
|
+
code_output=code_output,
|
|
68
|
+
input_tokens=input_tokens,
|
|
69
|
+
output_tokens=output_tokens,
|
|
70
|
+
)
|
|
@@ -7,7 +7,6 @@ from anthropic.types.beta import BetaMessage, BetaBashCodeExecutionToolResultBlo
|
|
|
7
7
|
BetaServerToolUseBlock
|
|
8
8
|
from fastapi import HTTPException
|
|
9
9
|
|
|
10
|
-
from llm_bridge.client.implementations.claude.claude_response_handler import process_content_block
|
|
11
10
|
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
12
11
|
from llm_bridge.client.model_client.claude_client import ClaudeClient
|
|
13
12
|
from llm_bridge.type.chat_response import ChatResponse
|
|
@@ -26,11 +25,27 @@ async def process_claude_non_stream_response(
|
|
|
26
25
|
code_output = ""
|
|
27
26
|
|
|
28
27
|
for content_block in message.content:
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
28
|
+
if content_block.type == "text":
|
|
29
|
+
text_block: BetaTextBlock = content_block
|
|
30
|
+
text += text_block.text
|
|
31
|
+
|
|
32
|
+
elif content_block.type == "thinking":
|
|
33
|
+
thinking_block: BetaThinkingBlock = content_block
|
|
34
|
+
thought += thinking_block.thinking
|
|
35
|
+
|
|
36
|
+
elif content_block.type == "server_tool_use":
|
|
37
|
+
server_tool_use_block: BetaServerToolUseBlock = content_block
|
|
38
|
+
code += server_tool_use_block.input
|
|
39
|
+
|
|
40
|
+
elif content_block.type == "bash_code_execution_tool_result":
|
|
41
|
+
bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
|
|
42
|
+
if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
|
|
43
|
+
code_output += bash_code_execution_tool_result_block.content.stdout
|
|
44
|
+
|
|
45
|
+
elif content_block.type == "text_editor_code_execution_tool_result":
|
|
46
|
+
text_editor_code_execution_tool_result: BetaBashCodeExecutionToolResultBlock = content_block
|
|
47
|
+
if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
|
|
48
|
+
code_output += content_block.content.content
|
|
34
49
|
|
|
35
50
|
chat_response = ChatResponse(
|
|
36
51
|
text=text,
|
|
@@ -5,7 +5,7 @@ from typing import AsyncGenerator
|
|
|
5
5
|
import httpx
|
|
6
6
|
from fastapi import HTTPException
|
|
7
7
|
|
|
8
|
-
from llm_bridge.client.implementations.claude.
|
|
8
|
+
from llm_bridge.client.implementations.claude.claude_stream_response_handler import ClaudeStreamResponseHandler
|
|
9
9
|
from llm_bridge.client.model_client.claude_client import ClaudeClient
|
|
10
10
|
from llm_bridge.type.chat_response import ChatResponse
|
|
11
11
|
from llm_bridge.type.serializer import serialize
|
|
@@ -26,8 +26,9 @@ class StreamClaudeClient(ClaudeClient):
|
|
|
26
26
|
betas=self.betas,
|
|
27
27
|
tools=self.tools,
|
|
28
28
|
) as stream:
|
|
29
|
+
stream_response_handler = ClaudeStreamResponseHandler()
|
|
29
30
|
async for event in stream:
|
|
30
|
-
yield await process_claude_stream_response(
|
|
31
|
+
yield await stream_response_handler.process_claude_stream_response(
|
|
31
32
|
event=event,
|
|
32
33
|
input_tokens=self.input_tokens,
|
|
33
34
|
client=self.client,
|
|
@@ -1,106 +0,0 @@
|
|
|
1
|
-
from anthropic import BetaMessageStreamEvent, AsyncAnthropic
|
|
2
|
-
from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
|
|
3
|
-
BetaBashCodeExecutionToolResultBlock, \
|
|
4
|
-
BetaTextEditorCodeExecutionToolResultBlock, BetaTextEditorCodeExecutionViewResultBlock, \
|
|
5
|
-
BetaTextEditorCodeExecutionStrReplaceResultBlock, \
|
|
6
|
-
BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock
|
|
7
|
-
from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
|
|
8
|
-
|
|
9
|
-
from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
|
|
10
|
-
from llm_bridge.type.chat_response import ChatResponse
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def process_content_block(content_block: ContentBlock) -> ChatResponse:
|
|
14
|
-
text = ""
|
|
15
|
-
thought = ""
|
|
16
|
-
code = ""
|
|
17
|
-
code_output = ""
|
|
18
|
-
|
|
19
|
-
if content_block.type == "text":
|
|
20
|
-
text_block: BetaTextBlock = content_block
|
|
21
|
-
text += text_block.text
|
|
22
|
-
|
|
23
|
-
elif content_block.type == "thinking":
|
|
24
|
-
thinking_block: BetaThinkingBlock = content_block
|
|
25
|
-
thought += thinking_block.thinking
|
|
26
|
-
|
|
27
|
-
elif content_block.type == "server_tool_use":
|
|
28
|
-
server_tool_use_block: BetaServerToolUseBlock = content_block
|
|
29
|
-
code += str(server_tool_use_block.input)
|
|
30
|
-
|
|
31
|
-
elif content_block.type == "bash_code_execution_tool_result":
|
|
32
|
-
bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
|
|
33
|
-
if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
|
|
34
|
-
content: BetaBashCodeExecutionResultBlock = content_block.content
|
|
35
|
-
code_output += content.stdout
|
|
36
|
-
|
|
37
|
-
elif content_block.type == "text_editor_code_execution_tool_result":
|
|
38
|
-
text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = content_block
|
|
39
|
-
if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
|
|
40
|
-
content: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
|
|
41
|
-
code_output += content.content
|
|
42
|
-
elif text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_str_replace_result":
|
|
43
|
-
content: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
|
|
44
|
-
code_output += content.lines
|
|
45
|
-
|
|
46
|
-
return ChatResponse(
|
|
47
|
-
text=text,
|
|
48
|
-
thought=thought,
|
|
49
|
-
code=code,
|
|
50
|
-
code_output=code_output,
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
async def process_claude_stream_response(
|
|
55
|
-
event: BetaMessageStreamEvent,
|
|
56
|
-
input_tokens: int,
|
|
57
|
-
client: AsyncAnthropic,
|
|
58
|
-
model: str,
|
|
59
|
-
) -> ChatResponse:
|
|
60
|
-
text = ""
|
|
61
|
-
thought = ""
|
|
62
|
-
code = ""
|
|
63
|
-
code_output = ""
|
|
64
|
-
|
|
65
|
-
if event.type == "content_block_delta":
|
|
66
|
-
event_delta: BetaRawContentBlockDelta = event.delta
|
|
67
|
-
|
|
68
|
-
if event_delta.type == "text_delta":
|
|
69
|
-
text_delta: BetaTextDelta = event_delta
|
|
70
|
-
text += text_delta.text
|
|
71
|
-
|
|
72
|
-
elif event_delta.type == "thinking_delta":
|
|
73
|
-
thinking_delta: BetaThinkingDelta = event_delta
|
|
74
|
-
thought += thinking_delta.thinking
|
|
75
|
-
|
|
76
|
-
elif event_delta.type == "input_json_delta":
|
|
77
|
-
input_json_delta: BetaInputJSONDelta = event_delta
|
|
78
|
-
code += input_json_delta.partial_json
|
|
79
|
-
|
|
80
|
-
if event.type == "content_block_start":
|
|
81
|
-
content_block: ContentBlock = event.content_block
|
|
82
|
-
content_block_chat_response = process_content_block(content_block)
|
|
83
|
-
text += content_block_chat_response.text
|
|
84
|
-
thought += content_block_chat_response.thought
|
|
85
|
-
code += content_block_chat_response.code
|
|
86
|
-
code_output += content_block_chat_response.code_output
|
|
87
|
-
|
|
88
|
-
chat_response = ChatResponse(
|
|
89
|
-
text=text,
|
|
90
|
-
thought=thought,
|
|
91
|
-
code=code,
|
|
92
|
-
code_output=code_output,
|
|
93
|
-
)
|
|
94
|
-
output_tokens = await count_claude_output_tokens(
|
|
95
|
-
client=client,
|
|
96
|
-
model=model,
|
|
97
|
-
chat_response=chat_response,
|
|
98
|
-
)
|
|
99
|
-
return ChatResponse(
|
|
100
|
-
text=text,
|
|
101
|
-
thought=thought,
|
|
102
|
-
code=code,
|
|
103
|
-
code_output=code_output,
|
|
104
|
-
input_tokens=input_tokens,
|
|
105
|
-
output_tokens=output_tokens,
|
|
106
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/claude/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/openai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/client/implementations/printing_status.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/chat_client_factory.py
RENAMED
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/chat_generate/chat_message_converter.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/document_processor.py
RENAMED
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/logic/message_preprocess/file_type_checker.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llm_bridge-1.11.3 → llm_bridge-1.11.3b0}/llm_bridge/type/model_message/openai_responses_message.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|