LLM-Bridge 1.9.0a1__tar.gz → 1.10.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0/LLM_Bridge.egg-info}/PKG-INFO +1 -1
  2. {llm_bridge-1.9.0a1/LLM_Bridge.egg-info → llm_bridge-1.10.0a0}/PKG-INFO +1 -1
  3. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +4 -4
  4. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +2 -2
  5. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/openai/openai_token_couter.py +1 -1
  6. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +2 -2
  7. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/chat_response.py +1 -1
  8. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/pyproject.toml +1 -1
  9. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/LICENSE +0 -0
  10. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/LLM_Bridge.egg-info/SOURCES.txt +0 -0
  11. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/LLM_Bridge.egg-info/dependency_links.txt +0 -0
  12. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/LLM_Bridge.egg-info/requires.txt +0 -0
  13. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/LLM_Bridge.egg-info/top_level.txt +0 -0
  14. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/MANIFEST.in +0 -0
  15. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/README.md +0 -0
  16. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/__init__.py +0 -0
  17. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/__init__.py +0 -0
  18. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/chat_client.py +0 -0
  19. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/__init__.py +0 -0
  20. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  21. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/claude/claude_stream_response_handler.py +0 -0
  22. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  23. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  24. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  25. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  26. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  27. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  28. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  29. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  30. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  31. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  32. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/implementations/printing_status.py +0 -0
  33. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/model_client/__init__.py +0 -0
  34. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/model_client/claude_client.py +0 -0
  35. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  36. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/client/model_client/openai_client.py +0 -0
  37. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/__init__.py +0 -0
  38. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  39. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  40. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  41. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  42. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  43. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  44. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  45. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  46. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  47. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  48. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  49. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  50. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  51. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/file_fetch.py +0 -0
  52. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  53. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  54. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  55. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  56. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  57. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/logic/model_prices.py +0 -0
  58. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/resources/__init__.py +0 -0
  59. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/resources/model_prices.json +0 -0
  60. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/__init__.py +0 -0
  61. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/message.py +0 -0
  62. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/model_message/__init__.py +0 -0
  63. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/model_message/claude_message.py +0 -0
  64. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  65. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/model_message/openai_message.py +0 -0
  66. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  67. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/llm_bridge/type/serializer.py +0 -0
  68. {llm_bridge-1.9.0a1 → llm_bridge-1.10.0a0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.9.0a1
3
+ Version: 1.10.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.9.0a1
3
+ Version: 1.10.0a0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -22,7 +22,7 @@ class GeminiResponseHandler:
22
22
  thought: str = ""
23
23
  code: str = ""
24
24
  code_output: str = ""
25
- image: Optional[str] = None
25
+ file: Optional[str] = None
26
26
  display: Optional[str] = None
27
27
  citations: list[Citation] = extract_citations(response)
28
28
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
@@ -46,9 +46,9 @@ class GeminiResponseHandler:
46
46
  # Code Output
47
47
  if part.code_execution_result is not None:
48
48
  code_output += part.code_execution_result.output
49
- # Image
49
+ # File
50
50
  if part.inline_data is not None:
51
- image = base64.b64encode(part.inline_data.data).decode('utf-8')
51
+ file = base64.b64encode(part.inline_data.data).decode('utf-8')
52
52
 
53
53
  # Grounding Sources
54
54
  if candidates := response.candidates:
@@ -74,7 +74,7 @@ class GeminiResponseHandler:
74
74
  thought=thought,
75
75
  code=code,
76
76
  code_output=code_output,
77
- image=image,
77
+ file=file,
78
78
  display=display,
79
79
  citations=citations,
80
80
  input_tokens=input_tokens,
@@ -45,11 +45,11 @@ def process_openai_responses_non_stream_response(
45
45
  # if output.type == "image_generation_call":
46
46
  # image = output.result
47
47
 
48
- chat_response = ChatResponse(text=text, image=image)
48
+ chat_response = ChatResponse(text=text, file=image)
49
49
  output_tokens = count_openai_output_tokens(chat_response)
50
50
  return ChatResponse(
51
51
  text=text,
52
- image=image,
52
+ file=image,
53
53
  citations=citations,
54
54
  input_tokens=input_tokens,
55
55
  output_tokens=output_tokens,
@@ -35,7 +35,7 @@ def count_openai_responses_input_tokens(messages: list[OpenAIResponsesMessage])
35
35
 
36
36
  def count_openai_output_tokens(chat_response: ChatResponse) -> int:
37
37
  text = chat_response.text
38
- file_count = 1 if chat_response.image else 0
38
+ file_count = 1 if chat_response.file else 0
39
39
 
40
40
  return num_tokens_from_text(text) + file_count * 1000
41
41
 
@@ -32,7 +32,7 @@ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
32
32
 
33
33
  chat_response = ChatResponse(
34
34
  text=text,
35
- image=image,
35
+ file=image,
36
36
  citations=citations,
37
37
  )
38
38
  return chat_response
@@ -48,7 +48,7 @@ async def generate_chunk(
48
48
  output_tokens = count_openai_output_tokens(chat_response)
49
49
  yield ChatResponse(
50
50
  text=chat_response.text,
51
- image=chat_response.image,
51
+ file=chat_response.file,
52
52
  citations=chat_response.citations,
53
53
  input_tokens=input_tokens,
54
54
  output_tokens=output_tokens,
@@ -22,7 +22,7 @@ class ChatResponse:
22
22
  thought: Optional[str] = None
23
23
  code: Optional[str] = None
24
24
  code_output: Optional[str] = None
25
- image: Optional[str] = None
25
+ file: Optional[str] = None
26
26
  display: Optional[str] = None
27
27
  citations: Optional[list[Citation]] = None
28
28
  error: Optional[str] = None
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.9.0-alpha.1"
7
+ version = "1.10.0-alpha.0"
8
8
  authors = [
9
9
  {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
10
  ]
File without changes
File without changes
File without changes
File without changes