LLM-Bridge 1.11.3b0__tar.gz → 1.11.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4/LLM_Bridge.egg-info}/PKG-INFO +1 -1
  2. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/LLM_Bridge.egg-info/SOURCES.txt +1 -1
  3. {llm_bridge-1.11.3b0/LLM_Bridge.egg-info → llm_bridge-1.11.4}/PKG-INFO +1 -1
  4. llm_bridge-1.11.4/llm_bridge/client/implementations/claude/claude_response_handler.py +106 -0
  5. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +6 -21
  6. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/claude/stream_claude_client.py +2 -3
  7. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +33 -28
  8. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/pyproject.toml +1 -1
  9. llm_bridge-1.11.3b0/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -70
  10. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/LICENSE +0 -0
  11. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  12. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/LLM_Bridge.egg-info/requires.txt +0 -0
  13. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/LLM_Bridge.egg-info/top_level.txt +0 -0
  14. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/MANIFEST.in +0 -0
  15. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/README.md +0 -0
  16. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/__init__.py +0 -0
  17. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/__init__.py +0 -0
  18. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/chat_client.py +0 -0
  19. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/__init__.py +0 -0
  20. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  21. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  22. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  23. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  24. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  25. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  26. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  27. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  28. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  29. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  30. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  31. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  46. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/resources/model_prices.json +0 -0
  60. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/__init__.py +0 -0
  61. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/chat_response.py +0 -0
  62. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/message.py +0 -0
  63. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/model_message/__init__.py +0 -0
  64. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/model_message/claude_message.py +0 -0
  65. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/model_message/gemini_message.py +0 -0
  66. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/model_message/openai_message.py +0 -0
  67. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  68. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/llm_bridge/type/serializer.py +0 -0
  69. {llm_bridge-1.11.3b0 → llm_bridge-1.11.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.11.3b0
3
+ Version: 1.11.4
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -13,7 +13,7 @@ llm_bridge/client/chat_client.py
13
13
  llm_bridge/client/implementations/__init__.py
14
14
  llm_bridge/client/implementations/printing_status.py
15
15
  llm_bridge/client/implementations/claude/__init__.py
16
- llm_bridge/client/implementations/claude/claude_stream_response_handler.py
16
+ llm_bridge/client/implementations/claude/claude_response_handler.py
17
17
  llm_bridge/client/implementations/claude/claude_token_counter.py
18
18
  llm_bridge/client/implementations/claude/non_stream_claude_client.py
19
19
  llm_bridge/client/implementations/claude/stream_claude_client.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.11.3b0
3
+ Version: 1.11.4
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -0,0 +1,106 @@
1
+ from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
+ from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
3
+ BetaBashCodeExecutionToolResultBlock, \
4
+ BetaTextEditorCodeExecutionToolResultBlock, BetaTextEditorCodeExecutionViewResultBlock, \
5
+ BetaTextEditorCodeExecutionStrReplaceResultBlock, \
6
+ BetaServerToolUseBlock, BetaBashCodeExecutionResultBlock, BetaTextBlock, BetaThinkingBlock
7
+ from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
8
+
9
+ from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
10
+ from llm_bridge.type.chat_response import ChatResponse
11
+
12
+
13
+ def process_content_block(content_block: ContentBlock) -> ChatResponse:
14
+ text = ""
15
+ thought = ""
16
+ code = ""
17
+ code_output = ""
18
+
19
+ if content_block.type == "text":
20
+ text_block: BetaTextBlock = content_block
21
+ text += text_block.text
22
+
23
+ elif content_block.type == "thinking":
24
+ thinking_block: BetaThinkingBlock = content_block
25
+ thought += thinking_block.thinking
26
+
27
+ elif content_block.type == "server_tool_use":
28
+ server_tool_use_block: BetaServerToolUseBlock = content_block
29
+ code += str(server_tool_use_block.input)
30
+
31
+ elif content_block.type == "bash_code_execution_tool_result":
32
+ bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
33
+ if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
34
+ content: BetaBashCodeExecutionResultBlock = content_block.content
35
+ code_output += content.stdout
36
+
37
+ elif content_block.type == "text_editor_code_execution_tool_result":
38
+ text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = content_block
39
+ if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
40
+ content: BetaTextEditorCodeExecutionViewResultBlock = content_block.content
41
+ code_output += content.content
42
+ elif text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_str_replace_result":
43
+ content: BetaTextEditorCodeExecutionStrReplaceResultBlock = content_block.content
44
+ code_output += content.lines
45
+
46
+ return ChatResponse(
47
+ text=text,
48
+ thought=thought,
49
+ code=code,
50
+ code_output=code_output,
51
+ )
52
+
53
+
54
+ async def process_claude_stream_response(
55
+ event: BetaMessageStreamEvent,
56
+ input_tokens: int,
57
+ client: AsyncAnthropic,
58
+ model: str,
59
+ ) -> ChatResponse:
60
+ text = ""
61
+ thought = ""
62
+ code = ""
63
+ code_output = ""
64
+
65
+ if event.type == "content_block_delta":
66
+ event_delta: BetaRawContentBlockDelta = event.delta
67
+
68
+ if event_delta.type == "text_delta":
69
+ text_delta: BetaTextDelta = event_delta
70
+ text += text_delta.text
71
+
72
+ elif event_delta.type == "thinking_delta":
73
+ thinking_delta: BetaThinkingDelta = event_delta
74
+ thought += thinking_delta.thinking
75
+
76
+ elif event_delta.type == "input_json_delta":
77
+ input_json_delta: BetaInputJSONDelta = event_delta
78
+ code += input_json_delta.partial_json
79
+
80
+ if event.type == "content_block_start":
81
+ content_block: ContentBlock = event.content_block
82
+ content_block_chat_response = process_content_block(content_block)
83
+ text += content_block_chat_response.text
84
+ thought += content_block_chat_response.thought
85
+ code += content_block_chat_response.code
86
+ code_output += content_block_chat_response.code_output
87
+
88
+ chat_response = ChatResponse(
89
+ text=text,
90
+ thought=thought,
91
+ code=code,
92
+ code_output=code_output,
93
+ )
94
+ output_tokens = await count_claude_output_tokens(
95
+ client=client,
96
+ model=model,
97
+ chat_response=chat_response,
98
+ )
99
+ return ChatResponse(
100
+ text=text,
101
+ thought=thought,
102
+ code=code,
103
+ code_output=code_output,
104
+ input_tokens=input_tokens,
105
+ output_tokens=output_tokens,
106
+ )
@@ -7,6 +7,7 @@ from anthropic.types.beta import BetaMessage, BetaBashCodeExecutionToolResultBlo
7
7
  BetaServerToolUseBlock
8
8
  from fastapi import HTTPException
9
9
 
10
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_content_block
10
11
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
11
12
  from llm_bridge.client.model_client.claude_client import ClaudeClient
12
13
  from llm_bridge.type.chat_response import ChatResponse
@@ -25,27 +26,11 @@ async def process_claude_non_stream_response(
25
26
  code_output = ""
26
27
 
27
28
  for content_block in message.content:
28
- if content_block.type == "text":
29
- text_block: BetaTextBlock = content_block
30
- text += text_block.text
31
-
32
- elif content_block.type == "thinking":
33
- thinking_block: BetaThinkingBlock = content_block
34
- thought += thinking_block.thinking
35
-
36
- elif content_block.type == "server_tool_use":
37
- server_tool_use_block: BetaServerToolUseBlock = content_block
38
- code += server_tool_use_block.input
39
-
40
- elif content_block.type == "bash_code_execution_tool_result":
41
- bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = content_block
42
- if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
43
- code_output += bash_code_execution_tool_result_block.content.stdout
44
-
45
- elif content_block.type == "text_editor_code_execution_tool_result":
46
- text_editor_code_execution_tool_result: BetaBashCodeExecutionToolResultBlock = content_block
47
- if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
48
- code_output += content_block.content.content
29
+ content_block_chat_response = process_content_block(content_block)
30
+ text += content_block_chat_response.text
31
+ thought += content_block_chat_response.thought
32
+ code += content_block_chat_response.code
33
+ code_output += content_block_chat_response.code_output
49
34
 
50
35
  chat_response = ChatResponse(
51
36
  text=text,
@@ -5,7 +5,7 @@ from typing import AsyncGenerator
5
5
  import httpx
6
6
  from fastapi import HTTPException
7
7
 
8
- from llm_bridge.client.implementations.claude.claude_stream_response_handler import ClaudeStreamResponseHandler
8
+ from llm_bridge.client.implementations.claude.claude_response_handler import process_claude_stream_response
9
9
  from llm_bridge.client.model_client.claude_client import ClaudeClient
10
10
  from llm_bridge.type.chat_response import ChatResponse
11
11
  from llm_bridge.type.serializer import serialize
@@ -26,9 +26,8 @@ class StreamClaudeClient(ClaudeClient):
26
26
  betas=self.betas,
27
27
  tools=self.tools,
28
28
  ) as stream:
29
- stream_response_handler = ClaudeStreamResponseHandler()
30
29
  async for event in stream:
31
- yield await stream_response_handler.process_claude_stream_response(
30
+ yield await process_claude_stream_response(
32
31
  event=event,
33
32
  input_tokens=self.input_tokens,
34
33
  client=self.client,
@@ -3,6 +3,7 @@ import mimetypes
3
3
  from typing import Optional
4
4
 
5
5
  from google.genai import types
6
+ from google.genai.types import Part
6
7
 
7
8
  from llm_bridge.client.implementations.gemini.gemini_token_counter import count_gemini_tokens
8
9
  from llm_bridge.client.implementations.printing_status import PrintingStatus
@@ -28,35 +29,39 @@ class GeminiResponseHandler:
28
29
  citations: list[Citation] = extract_citations(response)
29
30
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
30
31
 
31
- printing_status = None
32
+ parts: list[Part] = []
32
33
  if candidates := response.candidates:
33
- if candidates[0].content.parts:
34
- for part in response.candidates[0].content.parts:
35
- if part.text is not None:
36
- # Thought
37
- if part.thought:
38
- printing_status = PrintingStatus.Thought
39
- thought += part.text
40
- # Text
41
- elif not part.thought:
42
- printing_status = PrintingStatus.Response
43
- text += part.text
44
- # Code
45
- if part.executable_code is not None:
46
- code += part.executable_code.code
47
- # Code Output
48
- if part.code_execution_result is not None:
49
- code_output += part.code_execution_result.output
50
- # File
51
- if part.inline_data is not None:
52
- mime_type = part.inline_data.mime_type
53
- extension = mimetypes.guess_extension(mime_type) or ""
54
- file = File(
55
- name=f"generated_file{extension}",
56
- data=base64.b64encode(part.inline_data.data).decode('utf-8'),
57
- type=mime_type,
58
- )
59
- files.append(file)
34
+ if content := candidates[0].content:
35
+ if content.parts:
36
+ parts = content.parts
37
+
38
+ printing_status: PrintingStatus | None = None
39
+ for part in parts:
40
+ if part.text is not None:
41
+ # Thought
42
+ if part.thought:
43
+ printing_status = PrintingStatus.Thought
44
+ thought += part.text
45
+ # Text
46
+ elif not part.thought:
47
+ printing_status = PrintingStatus.Response
48
+ text += part.text
49
+ # Code
50
+ if part.executable_code is not None:
51
+ code += part.executable_code.code
52
+ # Code Output
53
+ if part.code_execution_result is not None:
54
+ code_output += part.code_execution_result.output
55
+ # File
56
+ if part.inline_data is not None:
57
+ mime_type = part.inline_data.mime_type
58
+ extension = mimetypes.guess_extension(mime_type) or ""
59
+ file = File(
60
+ name=f"generated_file{extension}",
61
+ data=base64.b64encode(part.inline_data.data).decode('utf-8'),
62
+ type=mime_type,
63
+ )
64
+ files.append(file)
60
65
 
61
66
  # Grounding Sources
62
67
  if candidates := response.candidates:
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.11.3-beta.0"
7
+ version = "1.11.4"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
@@ -1,70 +0,0 @@
1
- from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
- from anthropic.types.beta import BetaRawContentBlockDelta, BetaThinkingDelta, BetaTextDelta, BetaInputJSONDelta, \
3
- BetaBashCodeExecutionToolResultBlock, \
4
- BetaTextEditorCodeExecutionToolResultBlock
5
- from anthropic.types.beta.beta_raw_content_block_start_event import ContentBlock
6
-
7
- from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
8
- from llm_bridge.type.chat_response import ChatResponse
9
-
10
-
11
- class ClaudeStreamResponseHandler:
12
- async def process_claude_stream_response(
13
- self,
14
- event: BetaMessageStreamEvent,
15
- input_tokens: int,
16
- client: AsyncAnthropic,
17
- model: str,
18
- ) -> ChatResponse:
19
- text = ""
20
- thought = ""
21
- code = ""
22
- code_output = ""
23
-
24
- if event.type == "content_block_delta":
25
- event_delta: BetaRawContentBlockDelta = event.delta
26
-
27
- if event_delta.type == "text_delta":
28
- text_delta: BetaTextDelta = event_delta
29
- text += text_delta.text
30
-
31
- elif event_delta.type == "thinking_delta":
32
- thinking_delta: BetaThinkingDelta = event_delta
33
- thought += thinking_delta.thinking
34
-
35
- elif event_delta.type == "input_json_delta":
36
- input_json_delta: BetaInputJSONDelta = event_delta
37
- code += input_json_delta.partial_json
38
-
39
- if event.type == "content_block_start":
40
- event_content_block: ContentBlock = event.content_block
41
-
42
- if event_content_block.type == "bash_code_execution_tool_result":
43
- bash_code_execution_tool_result_block: BetaBashCodeExecutionToolResultBlock = event_content_block
44
- if bash_code_execution_tool_result_block.content.type == "bash_code_execution_result":
45
- code_output += event_content_block.content.stdout
46
-
47
- elif event_content_block.type == "text_editor_code_execution_tool_result":
48
- text_editor_code_execution_tool_result: BetaTextEditorCodeExecutionToolResultBlock = event_content_block
49
- if text_editor_code_execution_tool_result.content.type == "text_editor_code_execution_view_result":
50
- code_output += event_content_block.content.content
51
-
52
- chat_response = ChatResponse(
53
- text=text,
54
- thought=thought,
55
- code=code,
56
- code_output=code_output,
57
- )
58
- output_tokens = await count_claude_output_tokens(
59
- client=client,
60
- model=model,
61
- chat_response=chat_response,
62
- )
63
- return ChatResponse(
64
- text=text,
65
- thought=thought,
66
- code=code,
67
- code_output=code_output,
68
- input_tokens=input_tokens,
69
- output_tokens=output_tokens,
70
- )
File without changes
File without changes
File without changes
File without changes