LLM-Bridge 1.7.20__py3-none-any.whl → 1.8.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,10 @@
1
1
  from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
2
 
3
3
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
4
- from llm_bridge.client.implementations.printing_status import PrintingStatus
5
4
  from llm_bridge.type.chat_response import ChatResponse
6
5
 
7
6
 
8
7
  class ClaudeStreamResponseHandler:
9
- def __init__(self):
10
- self.printing_status = None
11
-
12
8
  async def process_claude_stream_response(
13
9
  self,
14
10
  event: BetaMessageStreamEvent,
@@ -17,21 +13,21 @@ class ClaudeStreamResponseHandler:
17
13
  model: str,
18
14
  ) -> ChatResponse:
19
15
  text = ""
16
+ thought = ""
17
+
20
18
  if event.type == "content_block_delta":
21
19
  if event.delta.type == "thinking_delta":
22
- if not self.printing_status:
23
- text += "# Model Thought:\n\n"
24
- self.printing_status = PrintingStatus.Thought
25
- text += event.delta.thinking
20
+ thought += event.delta.thinking
26
21
  elif event.delta.type == "text_delta":
27
- if self.printing_status == PrintingStatus.Thought:
28
- text += "\n\n# Model Response:\n\n"
29
- self.printing_status = PrintingStatus.Response
30
22
  text += event.delta.text
31
23
  elif event.type == "citation":
32
24
  citation = event.citation
33
25
  text += f"([{citation.title}]({citation.url})) "
34
- chat_response = ChatResponse(text=text)
26
+
27
+ chat_response = ChatResponse(
28
+ text=text,
29
+ thought=thought,
30
+ )
35
31
  output_tokens = await count_claude_output_tokens(
36
32
  client=client,
37
33
  model=model,
@@ -39,6 +35,7 @@ class ClaudeStreamResponseHandler:
39
35
  )
40
36
  return ChatResponse(
41
37
  text=text,
38
+ thought=thought,
42
39
  input_tokens=input_tokens,
43
40
  output_tokens=output_tokens,
44
41
  )
@@ -19,27 +19,30 @@ async def process_claude_non_stream_response(
19
19
  model: str,
20
20
  ) -> ChatResponse:
21
21
  text = ""
22
+ thought = ""
22
23
 
23
24
  for content in message.content:
24
25
  if content.type == "thinking":
25
- text += "# Model Thought:\n\n"
26
- text += content.thinking
26
+ thought += content.thinking
27
27
  if content.type == "text":
28
- text += "\n\n# Model Response:\n\n"
29
28
  text += content.text
30
- # Unable to test since streaming Claude is currently not allowed
29
+ # Unable to test: non-streaming Claude is currently not allowed
31
30
  if citations := content.citations:
32
31
  for citation in citations:
33
32
  text += f"([{citation.title}]({citation.url})) "
34
33
 
35
- chat_response = ChatResponse(text=text)
34
+ chat_response = ChatResponse(
35
+ text=text,
36
+ thought=thought,
37
+ )
36
38
  output_tokens = await count_claude_output_tokens(
37
39
  client=client,
38
40
  model=model,
39
41
  chat_response=chat_response,
40
42
  )
41
43
  return ChatResponse(
42
- text=content,
44
+ text=text,
45
+ thought=thought,
43
46
  input_tokens=input_tokens,
44
47
  output_tokens=output_tokens,
45
48
  )
@@ -3,13 +3,9 @@ import re
3
3
  from typing import AsyncGenerator
4
4
 
5
5
  import httpx
6
- from anthropic import AsyncAnthropic, BetaMessageStreamEvent
7
- from anthropic.types.beta import BetaMessage
8
6
  from fastapi import HTTPException
9
7
 
10
8
  from llm_bridge.client.implementations.claude.claude_stream_response_handler import ClaudeStreamResponseHandler
11
- from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens, \
12
- count_claude_output_tokens
13
9
  from llm_bridge.client.model_client.claude_client import ClaudeClient
14
10
  from llm_bridge.type.chat_response import ChatResponse
15
11
  from llm_bridge.type.serializer import serialize
@@ -1,4 +1,5 @@
1
1
  import base64
2
+ from typing import Optional
2
3
 
3
4
  from google.genai import types
4
5
 
@@ -9,32 +10,33 @@ from llm_bridge.type.chat_response import Citation, ChatResponse
9
10
 
10
11
  class GeminiResponseHandler:
11
12
  def __init__(self):
12
- self.printing_status = None
13
- self.prev_output_tokens = 0
14
- self.prev_printing_status = None
13
+ self.printing_status: Optional[PrintingStatus] = None
14
+ self.prev_output_tokens: int = 0
15
+ self.prev_printing_status: Optional[PrintingStatus] = None
15
16
 
16
17
  async def process_gemini_response(
17
18
  self,
18
19
  response: types.GenerateContentResponse,
19
20
  ) -> ChatResponse:
20
21
  text = ""
22
+ thought = ""
21
23
  display = None
22
24
  image_base64 = None
23
25
  citations = extract_citations(response)
24
26
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
25
27
 
28
+ printing_status = None
26
29
  if candidates := response.candidates:
27
30
  if candidates[0].content.parts:
28
31
  for part in response.candidates[0].content.parts:
29
32
  # Thought Output
30
33
  if part.text:
31
- if part.thought and not self.printing_status:
32
- text += "# Model Thought:\n\n"
33
- self.printing_status = PrintingStatus.Thought
34
- elif not part.thought and self.printing_status == PrintingStatus.Thought:
35
- text += f"\n\n# Model Response:\n\n"
36
- self.printing_status = PrintingStatus.Response
37
- text += part.text
34
+ if part.thought:
35
+ printing_status = PrintingStatus.Thought
36
+ thought += part.text
37
+ elif not part.thought:
38
+ printing_status = PrintingStatus.Response
39
+ text += part.text
38
40
  # Image Output
39
41
  elif part.inline_data:
40
42
  image_base64 = base64.b64encode(part.inline_data.data).decode('utf-8')
@@ -50,16 +52,17 @@ class GeminiResponseHandler:
50
52
  if chunk.web:
51
53
  text += f"{i}. [{chunk.web.title}]({chunk.web.uri})\n"
52
54
 
53
- if self.printing_status == self.prev_printing_status:
55
+ if printing_status == self.prev_printing_status and printing_status == PrintingStatus.Response:
54
56
  output_tokens = stage_output_tokens - self.prev_output_tokens
55
57
  else:
56
58
  output_tokens = stage_output_tokens
57
59
 
58
60
  self.prev_output_tokens = stage_output_tokens
59
- self.prev_printing_status = self.printing_status
61
+ self.prev_printing_status = printing_status
60
62
 
61
63
  return ChatResponse(
62
64
  text=text,
65
+ thought=thought,
63
66
  image=image_base64,
64
67
  display=display,
65
68
  citations=citations,
@@ -11,6 +11,7 @@ class Citation:
11
11
  @dataclass
12
12
  class ChatResponse:
13
13
  text: Optional[str] = None
14
+ thought: Optional[str] = None
14
15
  image: Optional[str] = None
15
16
  display: Optional[str] = None
16
17
  citations: Optional[list[Citation]] = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.7.20
3
+ Version: 1.8.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -4,12 +4,12 @@ llm_bridge/client/chat_client.py,sha256=XISF2BM-WkZJNbnvcLfMcbSzlrE0XMDulyE_VG9z
4
4
  llm_bridge/client/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62HeXTThDx1L6Zbs_IT0HPLPspI,102
6
6
  llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=y7iwW5rk-zdqady-NfDajQlQP-BNtIs5a2v6kIUEMvw,1711
7
+ llm_bridge/client/implementations/claude/claude_stream_response_handler.py,sha256=alBobZZLaCUSf3O09IrwT8NSrpgQJVS893ViPRK-khI,1328
8
8
  llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=g8M7BFY2zM0jrLFyfGPW-4KYetib3x098XKvEdbZh30,1182
9
- llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=d3vNeUn3YTYKAcn_zxeyIIltTNfWPL3oqqftzG6kAKo,2745
10
- llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=K1HaLlXC5Y2h1f4jg_vH0iFJ0w0KxWZNn01ed8-e-XM,2349
9
+ llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=Zv0QmpIpEL0rXvy9f1EB_WC4YkhtUBc7IadsTZlWIec,2735
10
+ llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=q4w1UYc1yZJw5UFOtnxCoeg8MFp5soc1d57YiCTCCGE,2109
11
11
  llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=Iv4NRqKeEppwidFcOy8ipF3nQUBKDyl2VYdF32f_wJs,3468
12
+ llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=qIEc4vC0vUfwkOgDjzq6sewQChpKuuayud1svvRU6Os,3510
13
13
  llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=7vWdqv00enbQ1C4z2e-b3Hw2VxgScaf2mNnAEgjruoQ,453
14
14
  llm_bridge/client/implementations/gemini/non_stream_gemini_client.py,sha256=JGNNpeln42SoXg2vGIC9xG5GGlBh6dIhz4BzYIkgraA,1302
15
15
  llm_bridge/client/implementations/gemini/stream_gemini_client.py,sha256=GiLCZkl9mYtTP0RNqhFkYIuGT5UBJKw9ycWH5q49SHU,1577
@@ -47,7 +47,7 @@ llm_bridge/logic/message_preprocess/message_preprocessor.py,sha256=ERws57Dsu-f5L
47
47
  llm_bridge/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  llm_bridge/resources/model_prices.json,sha256=_2ZXKjnMDa6YSKfnWEPR_vUtuMw3cEi1d2L3IZ2kVxs,2707
49
49
  llm_bridge/type/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
- llm_bridge/type/chat_response.py,sha256=nNAgcWeMBV14VKTMTxWuChgv4zYqy-mVi4Xq9tqV7uE,410
50
+ llm_bridge/type/chat_response.py,sha256=pIiSVjW9SLVnV-3xaGjqZMfRVd0hP1Rz2dbq1m_mCWQ,444
51
51
  llm_bridge/type/message.py,sha256=NyWmSSrciFfvF81aBwAH8qFpo5IpRhh8QXMselbYen8,370
52
52
  llm_bridge/type/serializer.py,sha256=moCL9y_HTO2CFg2w_jc5MljDxKgHiCo_qiz-o4l2jYU,515
53
53
  llm_bridge/type/model_message/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -55,8 +55,8 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
55
55
  llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
56
56
  llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
57
57
  llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
58
- llm_bridge-1.7.20.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
- llm_bridge-1.7.20.dist-info/METADATA,sha256=0Ot79AXFh8xDFB1OeI29IdOHhjzWRtM_daNYDMp6ejg,7505
60
- llm_bridge-1.7.20.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- llm_bridge-1.7.20.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
- llm_bridge-1.7.20.dist-info/RECORD,,
58
+ llm_bridge-1.8.0a0.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
+ llm_bridge-1.8.0a0.dist-info/METADATA,sha256=YQ72-UyYTit8-1KCubR6-aSefNwlMTggIDk0yKCl9Lc,7506
60
+ llm_bridge-1.8.0a0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
+ llm_bridge-1.8.0a0.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
+ llm_bridge-1.8.0a0.dist-info/RECORD,,