LLM-Bridge 1.7.21__tar.gz → 1.8.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0/LLM_Bridge.egg-info}/PKG-INFO +1 -1
  2. {llm_bridge-1.7.21/LLM_Bridge.egg-info → llm_bridge-1.8.0a0}/PKG-INFO +1 -1
  3. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +9 -12
  4. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +8 -5
  5. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -4
  6. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +15 -12
  7. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/chat_response.py +1 -0
  8. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/pyproject.toml +1 -1
  9. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/LICENSE +0 -0
  10. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  11. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  12. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/LLM_Bridge.egg-info/requires.txt +0 -0
  13. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/LLM_Bridge.egg-info/top_level.txt +0 -0
  14. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/MANIFEST.in +0 -0
  15. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/README.md +0 -0
  16. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/__init__.py +0 -0
  17. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/__init__.py +0 -0
  18. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/chat_client.py +0 -0
  19. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/__init__.py +0 -0
  20. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  21. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  22. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  23. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  24. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  25. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  26. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  27. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  28. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  29. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  30. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  31. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  46. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/resources/model_prices.json +0 -0
  60. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/__init__.py +0 -0
  61. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.7.21 → llm_bridge-1.8.0a0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.7.21
3
+ Version: 1.8.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.7.21
3
+ Version: 1.8.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,14 +1,10 @@
1
1
  from anthropic import BetaMessageStreamEvent, AsyncAnthropic
2
2
 
3
3
  from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_output_tokens
4
- from llm_bridge.client.implementations.printing_status import PrintingStatus
5
4
  from llm_bridge.type.chat_response import ChatResponse
6
5
 
7
6
 
8
7
  class ClaudeStreamResponseHandler:
9
- def __init__(self):
10
- self.printing_status = None
11
-
12
8
  async def process_claude_stream_response(
13
9
  self,
14
10
  event: BetaMessageStreamEvent,
@@ -17,21 +13,21 @@ class ClaudeStreamResponseHandler:
17
13
  model: str,
18
14
  ) -> ChatResponse:
19
15
  text = ""
16
+ thought = ""
17
+
20
18
  if event.type == "content_block_delta":
21
19
  if event.delta.type == "thinking_delta":
22
- if not self.printing_status:
23
- text += "# Model Thought:\n\n"
24
- self.printing_status = PrintingStatus.Thought
25
- text += event.delta.thinking
20
+ thought += event.delta.thinking
26
21
  elif event.delta.type == "text_delta":
27
- if self.printing_status == PrintingStatus.Thought:
28
- text += "\n\n# Model Response:\n\n"
29
- self.printing_status = PrintingStatus.Response
30
22
  text += event.delta.text
31
23
  elif event.type == "citation":
32
24
  citation = event.citation
33
25
  text += f"([{citation.title}]({citation.url})) "
34
- chat_response = ChatResponse(text=text)
26
+
27
+ chat_response = ChatResponse(
28
+ text=text,
29
+ thought=thought,
30
+ )
35
31
  output_tokens = await count_claude_output_tokens(
36
32
  client=client,
37
33
  model=model,
@@ -39,6 +35,7 @@ class ClaudeStreamResponseHandler:
39
35
  )
40
36
  return ChatResponse(
41
37
  text=text,
38
+ thought=thought,
42
39
  input_tokens=input_tokens,
43
40
  output_tokens=output_tokens,
44
41
  )
@@ -19,20 +19,22 @@ async def process_claude_non_stream_response(
19
19
  model: str,
20
20
  ) -> ChatResponse:
21
21
  text = ""
22
+ thought = ""
22
23
 
23
24
  for content in message.content:
24
25
  if content.type == "thinking":
25
- text += "# Model Thought:\n\n"
26
- text += content.thinking
26
+ thought += content.thinking
27
27
  if content.type == "text":
28
- text += "\n\n# Model Response:\n\n"
29
28
  text += content.text
30
- # Unable to test since streaming Claude is currently not allowed
29
+ # Unable to test: non-streaming Claude is currently not allowed
31
30
  if citations := content.citations:
32
31
  for citation in citations:
33
32
  text += f"([{citation.title}]({citation.url})) "
34
33
 
35
- chat_response = ChatResponse(text=text)
34
+ chat_response = ChatResponse(
35
+ text=text,
36
+ thought=thought,
37
+ )
36
38
  output_tokens = await count_claude_output_tokens(
37
39
  client=client,
38
40
  model=model,
@@ -40,6 +42,7 @@ async def process_claude_non_stream_response(
40
42
  )
41
43
  return ChatResponse(
42
44
  text=text,
45
+ thought=thought,
43
46
  input_tokens=input_tokens,
44
47
  output_tokens=output_tokens,
45
48
  )
@@ -3,13 +3,9 @@ import re
3
3
  from typing import AsyncGenerator
4
4
 
5
5
  import httpx
6
- from anthropic import AsyncAnthropic, BetaMessageStreamEvent
7
- from anthropic.types.beta import BetaMessage
8
6
  from fastapi import HTTPException
9
7
 
10
8
  from llm_bridge.client.implementations.claude.claude_stream_response_handler import ClaudeStreamResponseHandler
11
- from llm_bridge.client.implementations.claude.claude_token_counter import count_claude_input_tokens, \
12
- count_claude_output_tokens
13
9
  from llm_bridge.client.model_client.claude_client import ClaudeClient
14
10
  from llm_bridge.type.chat_response import ChatResponse
15
11
  from llm_bridge.type.serializer import serialize
@@ -1,4 +1,5 @@
1
1
  import base64
2
+ from typing import Optional
2
3
 
3
4
  from google.genai import types
4
5
 
@@ -9,32 +10,33 @@ from llm_bridge.type.chat_response import Citation, ChatResponse
9
10
 
10
11
  class GeminiResponseHandler:
11
12
  def __init__(self):
12
- self.printing_status = None
13
- self.prev_output_tokens = 0
14
- self.prev_printing_status = None
13
+ self.printing_status: Optional[PrintingStatus] = None
14
+ self.prev_output_tokens: int = 0
15
+ self.prev_printing_status: Optional[PrintingStatus] = None
15
16
 
16
17
  async def process_gemini_response(
17
18
  self,
18
19
  response: types.GenerateContentResponse,
19
20
  ) -> ChatResponse:
20
21
  text = ""
22
+ thought = ""
21
23
  display = None
22
24
  image_base64 = None
23
25
  citations = extract_citations(response)
24
26
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
25
27
 
28
+ printing_status = None
26
29
  if candidates := response.candidates:
27
30
  if candidates[0].content.parts:
28
31
  for part in response.candidates[0].content.parts:
29
32
  # Thought Output
30
33
  if part.text:
31
- if part.thought and not self.printing_status:
32
- text += "# Model Thought:\n\n"
33
- self.printing_status = PrintingStatus.Thought
34
- elif not part.thought and self.printing_status == PrintingStatus.Thought:
35
- text += f"\n\n# Model Response:\n\n"
36
- self.printing_status = PrintingStatus.Response
37
- text += part.text
34
+ if part.thought:
35
+ printing_status = PrintingStatus.Thought
36
+ thought += part.text
37
+ elif not part.thought:
38
+ printing_status = PrintingStatus.Response
39
+ text += part.text
38
40
  # Image Output
39
41
  elif part.inline_data:
40
42
  image_base64 = base64.b64encode(part.inline_data.data).decode('utf-8')
@@ -50,16 +52,17 @@ class GeminiResponseHandler:
50
52
  if chunk.web:
51
53
  text += f"{i}. [{chunk.web.title}]({chunk.web.uri})\n"
52
54
 
53
- if self.printing_status == self.prev_printing_status:
55
+ if printing_status == self.prev_printing_status and printing_status == PrintingStatus.Response:
54
56
  output_tokens = stage_output_tokens - self.prev_output_tokens
55
57
  else:
56
58
  output_tokens = stage_output_tokens
57
59
 
58
60
  self.prev_output_tokens = stage_output_tokens
59
- self.prev_printing_status = self.printing_status
61
+ self.prev_printing_status = printing_status
60
62
 
61
63
  return ChatResponse(
62
64
  text=text,
65
+ thought=thought,
63
66
  image=image_base64,
64
67
  display=display,
65
68
  citations=citations,
@@ -11,6 +11,7 @@ class Citation:
11
11
  @dataclass
12
12
  class ChatResponse:
13
13
  text: Optional[str] = None
14
+ thought: Optional[str] = None
14
15
  image: Optional[str] = None
15
16
  display: Optional[str] = None
16
17
  citations: Optional[list[Citation]] = None
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.7.21"
7
+ version = "1.8.0-alpha.0"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
File without changes
File without changes
File without changes
File without changes