LLM-Bridge 1.11.2__py3-none-any.whl → 1.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,106 @@
1
+ from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
+ from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
3
+ BetaBashCodeExecutionToolResultBlock, \
4
+ BetaTextEditorCodeExecutionToolResultBlock, BetaTextEditorCodeExecutionViewResultBlock, \
5
+ BetaTextEditorCodeExecutionStrReplaceResultBlock, \
6
+ BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock
7
+ from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
8
+
9
+ from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
10
+ from llm_bridge.type.chat_response import ChatResponse
11
+
12
+
13
+ def process_content_block(content_block: ContentBlock) -> ChatResponse:
14
+ text = ""
15
+ thought = ""
16
+ code = ""
17
+ code_output = ""
18
+
19
+ if content_block.type == "text":
20
+ text_block: BetaTextBlock = content_block
21
+ text += text_block.text
22
+
23
+ elif content_block.type == "thinking":
24
+ thinking_block: BetaThinkingBlock = content_block
25
+ thought += thinking_block.thinking
26
+
27
+ elif content_block.type == "server_tool_use":
28
+ server_tool_use_block: BetaServerToolUseBlock = content_block
29
+ code += str(server_tool_use_block.input)
30
+
31
+ elif content_block.type == "bash_code_execution_tool_result":
32
+ bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
33
+ if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
34
+ content: BetaBashCodeExecutionResultBlock = content_block.content
35
+ code_output += content.stdout
36
+
37
+ elif content_block.type == "text_editor_code_execution_tool_result":
38
+ text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = content_block
39
+ if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
40
+ content: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
41
+ code_output += content.content
42
+ elif text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_str_replace_result":
43
+ content: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
44
+ code_output += content.lines
45
+
46
+ return ChatResponse(
47
+ text=text,
48
+ thought=thought,
49
+ code=code,
50
+ code_output=code_output,
51
+ )
52
+
53
+
54
+ async def process_claude_stream_response(
55
+ event: BetaMessageStreamEvent,
56
+ input_tokens: int,
57
+ client: AsyncAnthropic,
58
+ model: str,
59
+ ) -> ChatResponse:
60
+ text = ""
61
+ thought = ""
62
+ code = ""
63
+ code_output = ""
64
+
65
+ if event.type == "content_block_delta":
66
+ event_delta: BetaRawContentBlockDelta = event.delta
67
+
68
+ if event_delta.type == "text_delta":
69
+ text_delta: BetaTextDelta = event_delta
70
+ text += text_delta.text
71
+
72
+ elif event_delta.type == "thinking_delta":
73
+ thinking_delta: BetaThinkingDelta = event_delta
74
+ thought += thinking_delta.thinking
75
+
76
+ elif event_delta.type == "input_json_delta":
77
+ input_json_delta: BetaInputJSONDelta = event_delta
78
+ code += input_json_delta.partial_json
79
+
80
+ if event.type == "content_block_start":
81
+ content_block: ContentBlock = event.content_block
82
+ content_block_chat_response = process_content_block(content_block)
83
+ text += content_block_chat_response.text
84
+ thought += content_block_chat_response.thought
85
+ code += content_block_chat_response.code
86
+ code_output += content_block_chat_response.code_output
87
+
88
+ chat_response = ChatResponse(
89
+ text=text,
90
+ thought=thought,
91
+ code=code,
92
+ code_output=code_output,
93
+ )
94
+ output_tokens = await count_claude_output_tokens(
95
+ client=client,
96
+ model=model,
97
+ chat_response=chat_response,
98
+ )
99
+ return ChatResponse(
100
+ text=text,
101
+ thought=thought,
102
+ code=code,
103
+ code_output=code_output,
104
+ input_tokens=input_tokens,
105
+ output_tokens=output_tokens,
106
+ )
@@ -28,7 +28,11 @@ async def count_claude_output_tokens(
28
28
  model: str,
29
29
  chat_response: ChatResponse,
30
30
  ) -> int:
31
- text = chat_response.text.strip()
31
+ text = ""
32
+ text += chat_response.text.strip()
33
+ text += chat_response.thought.strip()
34
+ text += chat_response.code.strip()
35
+ text += chat_response.code_output.strip()
32
36
  if text == "":
33
37
  return 0
34
38
 
@@ -3,9 +3,11 @@ import re
3
3
 
4
4
  import httpx
5
5
  from anthropic import AsyncAnthropic
6
- from anthropic.types.beta import BetaMessage
6
+ from anthropic.types.beta import BetaMessage, BetaBashCodeExecutionToolResultBlock, BetaTextBlock, BetaThinkingBlock, \
7
+ BetaServerToolUseBlock
7
8
  from fastapi import HTTPException
8
9
 
10
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_content_block
9
11
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
10
12
  from llm_bridge.client.model_client.claude_client import ClaudeClient
11
13
  from llm_bridge.type.chat_response import ChatResponse
@@ -20,16 +22,21 @@ async def process_claude_non_stream_response(
20
22
  ) -> ChatResponse:
21
23
  text = ""
22
24
  thought = ""
25
+ code = ""
26
+ code_output = ""
23
27
 
24
- for content in message.content:
25
- if content.type == "thinking":
26
- thought += content.thinking
27
- if content.type == "text":
28
- text += content.text
28
+ for content_block in message.content:
29
+ content_block_chat_response = process_content_block(content_block)
30
+ text += content_block_chat_response.text
31
+ thought += content_block_chat_response.thought
32
+ code += content_block_chat_response.code
33
+ code_output += content_block_chat_response.code_output
29
34
 
30
35
  chat_response = ChatResponse(
31
36
  text=text,
32
37
  thought=thought,
38
+ code=code,
39
+ code_output=code_output,
33
40
  )
34
41
  output_tokens = await count_claude_output_tokens(
35
42
  client=client,
@@ -39,6 +46,8 @@ async def process_claude_non_stream_response(
39
46
  return ChatResponse(
40
47
  text=text,
41
48
  thought=thought,
49
+ code=code,
50
+ code_output=code_output,
42
51
  input_tokens=input_tokens,
43
52
  output_tokens=output_tokens,
44
53
  )
@@ -5,7 +5,7 @@ from typing import AsyncGenerator
5
5
  import httpx
6
6
  from fastapi import HTTPException
7
7
 
8
- from llm_bridge.client.implementations.claude.claude_stream_response_handler import ClaudeStreamResponseHandler
8
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_stream_response
9
9
  from llm_bridge.client.model_client.claude_client import ClaudeClient
10
10
  from llm_bridge.type.chat_response import ChatResponse
11
11
  from llm_bridge.type.serializer import serialize
@@ -26,9 +26,8 @@ class StreamClaudeClient(ClaudeClient):
26
26
  betas=self.betas,
27
27
  tools=self.tools,
28
28
  ) as stream:
29
- stream_response_handler = ClaudeStreamResponseHandler()
30
29
  async for event in stream:
31
- yield await stream_response_handler.process_claude_stream_response(
30
+ yield await process_claude_stream_response(
32
31
  event=event,
33
32
  input_tokens=self.input_tokens,
34
33
  client=self.client,
@@ -1,6 +1,6 @@
1
1
  import anthropic
2
2
  from anthropic.types import ThinkingConfigEnabledParam, AnthropicBetaParam
3
- from anthropic.types.beta import BetaCodeExecutionTool20250825Param, BetaWebSearchTool20250305Param
3
+ from anthropic.types.beta import BetaWebSearchTool20250305Param, BetaToolUnionParam, BetaCodeExecutionTool20250825Param
4
4
 
5
5
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens
6
6
  from llm_bridge.client.implementations.claude.non_stream_claude_client import NonStreamClaudeClient
@@ -34,23 +34,29 @@ async def create_claude_client(
34
34
  messages=claude_messages,
35
35
  )
36
36
 
37
- max_tokens = min(32000, 200000 - input_tokens)
37
+ max_tokens = min(
38
+ 32_000, # Max output: Claude 4.5 64K; Claude 4.1 32K
39
+ 200_000 - input_tokens # Context window: Claude Sonnet 4.5 beta: 1M; otherwise 200K
40
+ )
38
41
  thinking = ThinkingConfigEnabledParam(
39
42
  type="enabled",
40
- budget_tokens=16000
43
+ budget_tokens=min(32_000, max_tokens) // 2
41
44
  )
42
45
  temperature = 1
43
- betas: list[AnthropicBetaParam] = ["output-128k-2025-02-19", "code-execution-2025-08-25"]
44
- tools = [
46
+ betas: list[AnthropicBetaParam] = [
47
+ "context-1m-2025-08-07",
48
+ "output-128k-2025-02-19",
49
+ "code-execution-2025-08-25",
50
+ ]
51
+ tools: list[BetaToolUnionParam] = [
45
52
  BetaWebSearchTool20250305Param(
46
53
  type="web_search_20250305",
47
54
  name="web_search",
48
55
  ),
49
- # Code Execution is unavailable in Claude
50
- # BetaCodeExecutionTool20250825Param(
51
- # type="code_execution_20250825",
52
- # name="code_execution",
53
- # )
56
+ BetaCodeExecutionTool20250825Param(
57
+ type="code_execution_20250825",
58
+ name="code_execution",
59
+ )
54
60
  ]
55
61
 
56
62
  if stream:
@@ -79,5 +85,3 @@ async def create_claude_client(
79
85
  input_tokens=input_tokens,
80
86
  tools=tools,
81
87
  )
82
-
83
-
@@ -152,8 +152,8 @@
152
152
  {
153
153
  "apiType": "Claude",
154
154
  "model": "claude-sonnet-4-5",
155
- "input": 3,
156
- "output": 15
155
+ "input": 6,
156
+ "output": 22.5
157
157
  },
158
158
  {
159
159
  "apiType": "Claude",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.11.2
3
+ Version: 1.11.3
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -4,10 +4,10 @@ llm_bridge/client/chat_client.py,sha256=XISF2BM-WkZJNbnvcLfMcbSzlrE0XMDulyE_VG9z
4
4
  llm_bridge/client/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62HeXTThDx1L6Zbs_IT0HPLPspI,102
6
6
  llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=qNy0w3ObKopYp3NBWaz25tGm_bEO9RnEk6qi94W1WIs,1190
8
- llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=g8M7BFY2zM0jrLFyfGPW-4KYetib3x098XKvEdbZh30,1182
9
- llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=xnge1J-j_Er4K4L1UxhjuxAs_Pl6vralxTKk9yItwjI,2500
10
- llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=q4w1UYc1yZJw5UFOtnxCoeg8MFp5soc1d57YiCTCCGE,2109
7
+ llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=d5e1rlxfao_rjhgT1Rky-xlCRJIK2M-e9LKNUATOczc,4143
8
+ llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=m_aoLJkFPJqSBA3Thzv5vg3GnaucZh41SAgT28sLeBA,1324
9
+ llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=1khCk0vJkCQ09Q8wuCqX1ZUV54qcwtTGa21ij8ziyak,2990
10
+ llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=gOvdoSa_pNAbZ882pG4NAOOwNtjth-X4M3Gt34orXww,2005
11
11
  llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=LADXq5h_bbuCclp4RTf70YtQ0_9JFRRzo7j4V5Ts7y4,4388
13
13
  llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=GdnwJWPhGZMB_xC0fz88zQRparIHzTemkQoqfDcxVEA,687
@@ -31,7 +31,7 @@ llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=H0rcRHytSfYKz_mwRfJ
31
31
  llm_bridge/logic/chat_generate/chat_message_converter.py,sha256=40VTBOPXg_ocrEZMdt1ObYlm-mhRL35zWzzxv8m2xRc,1538
32
32
  llm_bridge/logic/chat_generate/media_processor.py,sha256=ZR8G24EHwZZr2T9iFDRmScDGyJ_kvThApABzSzK0CL0,702
33
33
  llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=j8RwLDul_zdZIIZfzrJji3VmqnYVAV61Xjrbp4NC69k,2603
34
+ llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=unSrPGhQ4wO4xeMnXOGlCfd6BZE7NNYs6mYVcchXOvc,2800
35
35
  llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=ms0v1TnVA_JJFKhOkbF_qHeRJEAZ3SH2QOYUi2w_FBI,3614
36
36
  llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=uSDkNcUKdyzfJBE_KPq9Uqpt_DpDulluGjUT-iq8li0,4363
37
37
  llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -45,7 +45,7 @@ llm_bridge/logic/message_preprocess/document_processor.py,sha256=IsVqoFgWNa9i8cR
45
45
  llm_bridge/logic/message_preprocess/file_type_checker.py,sha256=nkrVki1a2udCeVqUnfIVi7Wxx8OMKbBuHw3FOlm17uo,1603
46
46
  llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=ERws57Dsu-f5LpWKqJ_SEP7omNWXeGoJaocX91P6QDQ,1907
47
47
  llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- llm_bridge/resources/model_prices.json,sha256=-6KjPzsOtqC6EYL-WFTnszoVzrP5GOFJhPX943BNYZU,3440
48
+ llm_bridge/resources/model_prices.json,sha256=JFCzCLulZ6z9qKKMjMuvZPk_d8Z9rM1GF0szkvfypv0,3442
49
49
  llm_bridge/type/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  llm_bridge/type/chat_response.py,sha256=zEw-my_I0-7msmlTySdBGE2vWUIPILex0UrUPqTJiYY,754
51
51
  llm_bridge/type/message.py,sha256=NyWmSSrciFfvF81aBwAH8qFpo5IpRhh8QXMselbYen8,370
@@ -55,8 +55,8 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
55
55
  llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
56
56
  llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
57
57
  llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
58
- llm_bridge-1.11.2.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
- llm_bridge-1.11.2.dist-info/METADATA,sha256=GeH0Z_IgD9zRenLpq-p7OFYc5JGazD4Lqr5xvBNz9AM,7849
60
- llm_bridge-1.11.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- llm_bridge-1.11.2.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
- llm_bridge-1.11.2.dist-info/RECORD,,
58
+ llm_bridge-1.11.3.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
+ llm_bridge-1.11.3.dist-info/METADATA,sha256=iyJlwk0I2O3Qm6hEwEGhTFpsYxKK7GUJZE8X9Zw9Z9c,7849
60
+ llm_bridge-1.11.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
+ llm_bridge-1.11.3.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
+ llm_bridge-1.11.3.dist-info/RECORD,,
@@ -1,38 +0,0 @@
1
- from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
-
3
- from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
4
- from llm_bridge.type.chat_response import ChatResponse
5
-
6
-
7
- class ClaudeStreamResponseHandler:
8
- async def process_claude_stream_response(
9
- self,
10
- event: BetaMessageStreamEvent,
11
- input_tokens: int,
12
- client: AsyncAnthropic,
13
- model: str,
14
- ) -> ChatResponse:
15
- text = ""
16
- thought = ""
17
-
18
- if event.type == "content_block_delta":
19
- if event.delta.type == "thinking_delta":
20
- thought += event.delta.thinking
21
- elif event.delta.type == "text_delta":
22
- text += event.delta.text
23
-
24
- chat_response = ChatResponse(
25
- text=text,
26
- thought=thought,
27
- )
28
- output_tokens = await count_claude_output_tokens(
29
- client=client,
30
- model=model,
31
- chat_response=chat_response,
32
- )
33
- return ChatResponse(
34
- text=text,
35
- thought=thought,
36
- input_tokens=input_tokens,
37
- output_tokens=output_tokens,
38
- )