LLM-Bridge 1.15.4__tar.gz → 1.15.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/.github/workflows/python-publish.yml +2 -0
  2. llm_bridge-1.15.6/.github/workflows/python-test.yaml +31 -0
  3. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/PKG-INFO +1 -1
  4. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +2 -2
  5. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/file_type_checker.py +8 -5
  6. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/message_preprocessor.py +3 -3
  7. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/model_prices.py +9 -5
  8. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/pyproject.toml +1 -1
  9. llm_bridge-1.15.6/tests/file_type_checker_test.py +17 -0
  10. llm_bridge-1.15.6/tests/model_prices_test.py +10 -0
  11. llm_bridge-1.15.6/usage/config.py +129 -0
  12. llm_bridge-1.15.6/usage/main.py +100 -0
  13. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/uv.lock +1 -1
  14. llm_bridge-1.15.4/tests/chat_client_factory_test.py +0 -20
  15. llm_bridge-1.15.4/tests/message_preprocessor_test.py +0 -26
  16. llm_bridge-1.15.4/usage/main.py +0 -226
  17. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/.gitattributes +0 -0
  18. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/.gitignore +0 -0
  19. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/LICENSE +0 -0
  20. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/MANIFEST.in +0 -0
  21. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/README.md +0 -0
  22. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/__init__.py +0 -0
  23. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/__init__.py +0 -0
  24. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/chat_client.py +0 -0
  25. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/__init__.py +0 -0
  26. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  27. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  28. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  29. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  30. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  31. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  32. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  33. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  34. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  35. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  36. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  37. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  38. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  39. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/openai_responses_response_handler.py +0 -0
  40. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  41. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  42. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  43. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/implementations/printing_status.py +0 -0
  44. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/model_client/__init__.py +0 -0
  45. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/model_client/claude_client.py +0 -0
  46. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/model_client/gemini_client.py +0 -0
  47. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/client/model_client/openai_client.py +0 -0
  48. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/__init__.py +0 -0
  49. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  50. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  51. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  52. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  53. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  54. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  55. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  56. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  57. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  58. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  59. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  60. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  61. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  62. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/file_fetch.py +0 -0
  63. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  64. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  65. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  66. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/resources/__init__.py +0 -0
  67. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/resources/model_prices.json +0 -0
  68. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/__init__.py +0 -0
  69. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/chat_response.py +0 -0
  70. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/message.py +0 -0
  71. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/model_message/__init__.py +0 -0
  72. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/model_message/claude_message.py +0 -0
  73. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/model_message/gemini_message.py +0 -0
  74. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/model_message/openai_message.py +0 -0
  75. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  76. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/llm_bridge/type/serializer.py +0 -0
  77. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/tests/__init__.py +0 -0
  78. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/usage/.env.example +0 -0
  79. {llm_bridge-1.15.4 → llm_bridge-1.15.6}/usage/workflow.py +0 -0
@@ -1,3 +1,5 @@
1
+ # https://docs.astral.sh/uv/guides/integration/github/
2
+
1
3
  name: "Publish"
2
4
 
3
5
  on:
@@ -0,0 +1,31 @@
1
+ # https://docs.astral.sh/uv/guides/integration/github/
2
+
3
+ name: Python test
4
+
5
+ permissions:
6
+ contents: read
7
+
8
+ on:
9
+ push:
10
+ branches: [ "main" ]
11
+ pull_request:
12
+ branches: [ "main" ]
13
+
14
+ jobs:
15
+ uv:
16
+ name: python
17
+ runs-on: ubuntu-latest
18
+
19
+ steps:
20
+ - uses: actions/checkout@v5
21
+
22
+ - name: Enable caching
23
+ uses: astral-sh/setup-uv@v7
24
+ with:
25
+ enable-cache: true
26
+
27
+ - name: Install the project
28
+ run: uv sync --locked --all-extras --dev
29
+
30
+ - name: Run tests
31
+ run: uv run pytest tests
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.15.4
3
+ Version: 1.15.6
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -2,7 +2,7 @@ from openai.types.responses import ResponseInputTextParam, ResponseInputImagePar
2
2
  ResponseInputContentParam, EasyInputMessageParam, ResponseOutputMessageParam, ResponseInputFileParam
3
3
 
4
4
  from llm_bridge.logic.chat_generate import media_processor
5
- from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_file_name
5
+ from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_filename_without_timestamp
6
6
  from llm_bridge.type.message import Message, ContentType
7
7
  from llm_bridge.type.model_message.openai_responses_message import OpenAIResponsesMessage
8
8
 
@@ -33,7 +33,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
33
33
  file_data, _ = await media_processor.get_base64_content_from_url(file_url)
34
34
  pdf_content = ResponseInputFileParam(
35
35
  type="input_file",
36
- filename=get_file_name(file_url),
36
+ filename=get_filename_without_timestamp(file_url),
37
37
  file_data=f"data:application/pdf;base64,{file_data}",
38
38
  )
39
39
  content.append(pdf_content)
@@ -1,6 +1,7 @@
1
1
  import mimetypes
2
2
  import os
3
3
  import re
4
+ from pathlib import PurePosixPath
4
5
 
5
6
  from llm_bridge.logic.file_fetch import fetch_file_data
6
7
  from llm_bridge.logic.message_preprocess.code_file_extensions import code_file_extensions
@@ -14,9 +15,12 @@ def is_file_type_supported(file_name: str) -> bool:
14
15
 
15
16
 
16
17
  async def get_file_type(file_url: str) -> tuple[str, str]:
17
- file_name = get_file_name(file_url)
18
+ file_name = get_filename_without_timestamp(file_url)
19
+
20
+ # Treat filenames without an extension as their own extension
21
+ suffix = PurePosixPath(file_name).suffix.lower()
22
+ file_extension = suffix if suffix else '.' + file_name.lower()
18
23
 
19
- file_extension = '.' + file_name.split('.')[-1].lower() # Treat filenames without an extension as their own extension
20
24
  if file_extension in code_file_extensions:
21
25
  return 'text', 'code'
22
26
  if file_extension == '.pdf':
@@ -41,9 +45,8 @@ async def get_file_type(file_url: str) -> tuple[str, str]:
41
45
  return 'unknown', 'unknown'
42
46
 
43
47
 
44
- # Without Timestamp
45
- def get_file_name(file_url: str) -> str:
46
- base_name = os.path.basename(file_url)
48
+ def get_filename_without_timestamp(file_url: str) -> str:
49
+ base_name = PurePosixPath(file_url).name
47
50
  match = re.search(r'-(.+)', base_name)
48
51
  if match:
49
52
  return match.group(1)
@@ -1,5 +1,5 @@
1
1
  from llm_bridge.logic.message_preprocess import document_processor
2
- from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_file_name
2
+ from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_filename_without_timestamp
3
3
  from llm_bridge.type.message import Message, Role, Content, ContentType
4
4
 
5
5
 
@@ -24,12 +24,12 @@ async def extract_text_files_to_message(message: Message, api_type: str) -> None
24
24
  if sub_type == "pdf" and api_type in ("OpenAI", "OpenAI-Azure", "Gemini-Vertex", "Gemini-Free", "Gemini-Paid", "Claude"):
25
25
  continue
26
26
 
27
- filename = get_file_name(file_url)
27
+ filename = get_filename_without_timestamp(file_url)
28
28
  file_text = await document_processor.extract_text_from_file(file_url)
29
29
 
30
30
  message.contents[i] = Content(
31
31
  type=ContentType.Text,
32
- data=f"{filename}: \n{file_text}\n"
32
+ data=f"<file name=\"{filename}\">\n{file_text}\n</file>"
33
33
  )
34
34
 
35
35
 
@@ -1,5 +1,5 @@
1
- import importlib.resources
2
1
  import json
2
+ from importlib.resources import files
3
3
  from typing import TypedDict
4
4
 
5
5
  from fastapi import HTTPException
@@ -12,13 +12,17 @@ class ModelPrice(TypedDict):
12
12
  output: float
13
13
 
14
14
 
15
- def load_json_file(package, filename):
16
- with importlib.resources.open_text(package, filename) as f:
17
- return json.load(f)
15
+ def load_json_file(package: str, filename: str):
16
+ content = files(package).joinpath(filename).read_text(encoding="utf-8")
17
+ return json.loads(content)
18
18
 
19
19
 
20
20
  def get_model_prices() -> list[ModelPrice]:
21
- return load_json_file("llm_bridge.resources", "model_prices.json")
21
+ prices = load_json_file("llm_bridge.resources", "model_prices.json")
22
+ for price in prices:
23
+ price["input"] = float(price["input"])
24
+ price["output"] = float(price["output"])
25
+ return prices
22
26
 
23
27
 
24
28
  def find_model_prices(api_type: str, model: str) -> ModelPrice | None:
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.15.4"
7
+ version = "1.15.6"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -0,0 +1,17 @@
1
+ import pytest
2
+
3
+ from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type
4
+
5
+
6
+ @pytest.mark.asyncio
7
+ async def test_get_file_type_with_extension():
8
+ file_type, sub_type = await get_file_type("https://example.com/1767243600000-document.pdf")
9
+ assert file_type == "text"
10
+ assert sub_type == "pdf"
11
+
12
+
13
+ @pytest.mark.asyncio
14
+ async def test_get_file_type_without_extension():
15
+ file_type, sub_type = await get_file_type("https://example.com/1767243600000-Dockerfile")
16
+ assert file_type == "text"
17
+ assert sub_type == "code"
@@ -0,0 +1,10 @@
1
+ from llm_bridge.logic.model_prices import get_model_prices
2
+
3
+
4
+ def test_get_model_prices_types():
5
+ result = get_model_prices()
6
+ for model_price in result:
7
+ assert isinstance(model_price["apiType"], str)
8
+ assert isinstance(model_price["model"], str)
9
+ assert isinstance(model_price["input"], float)
10
+ assert isinstance(model_price["output"], float)
@@ -0,0 +1,129 @@
1
+ from llm_bridge import *
2
+
3
+ structured_output_schema = {
4
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
5
+ "$id": "https://example.com/product.schema.json",
6
+ "title": "Product",
7
+ "description": "A product from Acme's catalog",
8
+ "type": "object",
9
+ "properties": {
10
+ "productId": {
11
+ "description": "The unique identifier for a product",
12
+ "type": "integer"
13
+ },
14
+ "productName": {
15
+ "description": "Name of the product",
16
+ "type": "string"
17
+ },
18
+ "price": {
19
+ "description": "The price of the product",
20
+ "type": "number",
21
+ "exclusiveMinimum": 0
22
+ },
23
+ "tags": {
24
+ "description": "Tags for the product",
25
+ "type": "array",
26
+ "items": {
27
+ "type": "string"
28
+ },
29
+ "minItems": 1,
30
+ "uniqueItems": True
31
+ }
32
+ },
33
+ "required": [
34
+ "productId",
35
+ "productName",
36
+ "price"
37
+ ]
38
+ }
39
+ structured_output_schema = None
40
+
41
+ messages = [
42
+ Message(
43
+ role=Role.System,
44
+ contents=[
45
+ Content(type=ContentType.Text, data="You are a helpful assistant.")
46
+ ]
47
+ ),
48
+ Message(
49
+ role=Role.User,
50
+ contents=[
51
+ Content(type=ContentType.Text, data="Hello")
52
+ ]
53
+ ),
54
+ Message(
55
+ role=Role.Assistant,
56
+ contents=[
57
+ Content(type=ContentType.Text, data="Hello! How can I assist you today?")
58
+ ]
59
+ ),
60
+ # Message(
61
+ # role=Role.User,
62
+ # contents=[
63
+ # # Thinking
64
+ # # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
65
+ #
66
+ # # Web Search
67
+ # # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
68
+ #
69
+ # # Image Understanding
70
+ # # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
71
+ # # Content(type=ContentType.Text, data="What is in this image?"),
72
+ #
73
+ # # Image Generation
74
+ # # Content(type=ContentType.Text, data="Please generate an image of a cat."),
75
+ #
76
+ # # URL Context
77
+ # # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
78
+ #
79
+ # # Code Execution
80
+ # # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
81
+ #
82
+ # # File Output
83
+ # # Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
84
+ #
85
+ # # Structured Output
86
+ # # Content(type=ContentType.Text, data="Please generate a product."),
87
+ # ]
88
+ # ),
89
+ Message(
90
+ role=Role.User,
91
+ contents=[
92
+ Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1769429581512-Test.txt"),
93
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
94
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
95
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
96
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
97
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1753804900037-Calculus.docx"),
98
+ Content(type=ContentType.Text, data="What's this?"),
99
+ ]
100
+ ),
101
+ ]
102
+ # See /llm_bridge/resources/model_prices.json for available models
103
+ # model = "gpt-5.2"
104
+ # model = "gpt-5.1"
105
+ # model = "gpt-5-pro"
106
+ # model = "gpt-5"
107
+ # model = "gpt-4.1"
108
+ # model = "gpt-5-codex"
109
+ # model = "gemini-3-pro-preview"
110
+ # model = "gemini-3-pro-image-preview"
111
+ model = "gemini-3-flash-preview"
112
+ # model = "grok-4-1-fast-reasoning"
113
+ # model = "claude-sonnet-4-5"
114
+ # model = "claude-opus-4-5"
115
+ api_type = "Gemini-Vertex"
116
+ # api_type = "Gemini-Free"
117
+ # api_type = "Gemini-Paid"
118
+ # api_type = "OpenAI"
119
+ # api_type = "OpenAI-Azure"
120
+ # api_type = "OpenAI-GitHub"
121
+ # api_type = "Claude"
122
+ # api_type = "Grok"
123
+ temperature = 0
124
+ stream = True
125
+ # stream = False
126
+ thought = True
127
+ # thought = False
128
+ code_execution = True
129
+ # code_execution = False
@@ -0,0 +1,100 @@
1
+ import asyncio
2
+ import logging
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+ from pprint import pprint
7
+
8
+ from dotenv import load_dotenv
9
+
10
+ from usage.config import *
11
+ from usage.workflow import workflow
12
+
13
+ script_dir = Path(__file__).parent.resolve()
14
+
15
+ # Env
16
+ load_dotenv(script_dir / ".env")
17
+
18
+ # Logging Output File
19
+ output_path = script_dir / "output.log"
20
+ output_path.parent.mkdir(parents=True, exist_ok=True)
21
+ output_file = output_path.open("w", encoding="utf-8")
22
+ sys.stdout = output_file
23
+
24
+ logging.basicConfig(
25
+ level=logging.INFO,
26
+ format='%(asctime)s - %(levelname)s - %(message)s',
27
+ stream=output_file
28
+ )
29
+
30
+ api_keys = {
31
+ "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
32
+ "AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
33
+ "AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
34
+ "GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
35
+ "GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
36
+ "GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
37
+ "GEMINI_VERTEX_API_KEY": os.environ.get("GEMINI_VERTEX_API_KEY"),
38
+ "ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
39
+ "XAI_API_KEY": os.environ.get("XAI_API_KEY"),
40
+ }
41
+
42
+
43
+ async def main():
44
+ input_tokens = 0
45
+ output_tokens = 0
46
+ response = await workflow(
47
+ api_keys,
48
+ messages,
49
+ model,
50
+ api_type,
51
+ temperature,
52
+ stream,
53
+ thought,
54
+ code_execution,
55
+ structured_output_schema,
56
+ )
57
+ text = ""
58
+ thought_text = ""
59
+ code_text = ""
60
+ code_output_text = ""
61
+ files = []
62
+
63
+ if stream:
64
+ async for chunk in response:
65
+ pprint(chunk)
66
+ if chunk.text:
67
+ text += chunk.text
68
+ if chunk.thought:
69
+ thought_text += chunk.thought
70
+ if chunk.input_tokens:
71
+ input_tokens = chunk.input_tokens
72
+ if chunk.output_tokens:
73
+ output_tokens += chunk.output_tokens
74
+ if chunk.code:
75
+ code_text += chunk.code
76
+ if chunk.code_output:
77
+ code_output_text += chunk.code_output
78
+ if chunk.files:
79
+ files.extend(chunk.files)
80
+ else:
81
+ pprint(response)
82
+ text = response.text
83
+ thought_text = response.thought
84
+ code_text = response.code
85
+ code_output_text = response.code_output
86
+ input_tokens = response.input_tokens
87
+ output_tokens = response.output_tokens
88
+ files = response.files
89
+ total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
90
+ print(f"Thought:\n{thought_text}\n")
91
+ print(f"Code:\n{code_text}\n")
92
+ print(f"Code Output:\n{code_output_text}\n")
93
+ print(f"Text:\n{text}\n")
94
+ print(f"Files:\n{files}\n")
95
+ print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')
96
+
97
+
98
+ if __name__ == "__main__":
99
+ asyncio.run(main())
100
+ output_file.close()
@@ -357,7 +357,7 @@ wheels = [
357
357
 
358
358
  [[package]]
359
359
  name = "llm-bridge"
360
- version = "1.15.4"
360
+ version = "1.15.6"
361
361
  source = { editable = "." }
362
362
  dependencies = [
363
363
  { name = "anthropic" },
@@ -1,20 +0,0 @@
1
- import pytest
2
-
3
- from llm_bridge.type.message import Message, Role, Content, ContentType
4
-
5
-
6
- @pytest.fixture
7
- def sample_messages():
8
- return [
9
- Message(role=Role.System, contents=[
10
- Content(type=ContentType.Text, data="You are a helpful assistant.")
11
- ]),
12
- Message(role=Role.User, contents=[
13
- Content(type=ContentType.Text, data="Hello")
14
- ])
15
- ]
16
-
17
-
18
- @pytest.mark.asyncio
19
- async def test_placeholder():
20
- assert True
@@ -1,26 +0,0 @@
1
- import pytest
2
-
3
- from llm_bridge.logic.message_preprocess.message_preprocessor import extract_system_messages
4
- from llm_bridge.type.message import Message, Role, Content, ContentType
5
-
6
-
7
- @pytest.fixture
8
- def sample_messages():
9
- return [
10
- Message(role=Role.System, contents=[
11
- Content(type=ContentType.Text, data="You are a helpful assistant.")
12
- ]),
13
- Message(role=Role.User, contents=[
14
- Content(type=ContentType.Text, data="Hello")
15
- ])
16
- ]
17
-
18
- def test_extract_system_messages(sample_messages):
19
- extracted_text = extract_system_messages(sample_messages)
20
-
21
- assert extracted_text == "You are a helpful assistant.\n"
22
-
23
- assert len(sample_messages) == 1
24
- assert sample_messages[0].role == Role.User
25
- assert sample_messages[0].contents[0].type == ContentType.Text
26
- assert sample_messages[0].contents[0].data == "Hello"
@@ -1,226 +0,0 @@
1
- import asyncio
2
- import logging
3
- import os
4
- import sys
5
- from pprint import pprint
6
-
7
- from dotenv import load_dotenv
8
-
9
- from llm_bridge import *
10
- from usage.workflow import workflow
11
-
12
- output_file = open("./usage/output.log", "w", encoding="utf-8")
13
- sys.stdout = output_file
14
-
15
- logging.basicConfig(
16
- level=logging.INFO,
17
- format='%(asctime)s - %(levelname)s - %(message)s',
18
- stream=output_file
19
- )
20
-
21
- script_dir = os.path.dirname(os.path.abspath(__file__))
22
- env_path = os.path.join(script_dir, ".env")
23
- load_dotenv(env_path)
24
-
25
- api_keys = {
26
- "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
27
- "AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
28
- "AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
29
- "GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
30
- "GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
31
- "GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
32
- "GEMINI_VERTEX_API_KEY": os.environ.get("GEMINI_VERTEX_API_KEY"),
33
- "ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
34
- "XAI_API_KEY": os.environ.get("XAI_API_KEY"),
35
- }
36
-
37
- structured_output_schema = {
38
- "$schema": "https://json-schema.org/draft/2020-12/schema",
39
- "$id": "https://example.com/product.schema.json",
40
- "title": "Product",
41
- "description": "A product from Acme's catalog",
42
- "type": "object",
43
- "properties": {
44
- "productId": {
45
- "description": "The unique identifier for a product",
46
- "type": "integer"
47
- },
48
- "productName": {
49
- "description": "Name of the product",
50
- "type": "string"
51
- },
52
- "price": {
53
- "description": "The price of the product",
54
- "type": "number",
55
- "exclusiveMinimum": 0
56
- },
57
- "tags": {
58
- "description": "Tags for the product",
59
- "type": "array",
60
- "items": {
61
- "type": "string"
62
- },
63
- "minItems": 1,
64
- "uniqueItems": True
65
- }
66
- },
67
- "required": [
68
- "productId",
69
- "productName",
70
- "price"
71
- ]
72
- }
73
- structured_output_schema = None
74
-
75
- messages = [
76
- Message(
77
- role=Role.System,
78
- contents=[
79
- Content(type=ContentType.Text, data="You are a helpful assistant.")
80
- ]
81
- ),
82
- Message(
83
- role=Role.User,
84
- contents=[
85
- Content(type=ContentType.Text, data="Hello")
86
- ]
87
- ),
88
- Message(
89
- role=Role.Assistant,
90
- contents=[
91
- Content(type=ContentType.Text, data="Hello! How can I assist you today?")
92
- ]
93
- ),
94
- Message(
95
- role=Role.User,
96
- contents=[
97
- # Thinking
98
- # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
99
-
100
- # Web Search
101
- # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
102
-
103
- # Image Understanding
104
- # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
105
- # Content(type=ContentType.Text, data="What is in this image?"),
106
-
107
- # Image Generation
108
- # Content(type=ContentType.Text, data="Please generate an image of a cat."),
109
-
110
- # URL Context
111
- # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
112
-
113
- # Code Execution
114
- # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
115
-
116
- # File Output
117
- Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
118
-
119
- # Structured Output
120
- # Content(type=ContentType.Text, data="Please generate a product."),
121
- ]
122
- ),
123
- # Message(
124
- # role=Role.User,
125
- # contents=[
126
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
127
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
128
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
129
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
130
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1753804900037-Calculus.docx"),
131
- # Content(type=ContentType.Text, data="What's this?"),
132
- # ]
133
- # ),
134
- ]
135
- # See /llm_bridge/resources/model_prices.json for available models
136
- model = "gpt-5.2"
137
- # model = "gpt-5.1"
138
- # model = "gpt-5-pro"
139
- # model = "gpt-5"
140
- # model = "gpt-4.1"
141
- # model = "gpt-5-codex"
142
- # model = "gemini-3-pro-preview"
143
- # model = "gemini-3-pro-image-preview"
144
- # model = "gemini-3-flash-preview"
145
- # model = "grok-4-1-fast-reasoning"
146
- # model = "claude-sonnet-4-5"
147
- # model = "claude-opus-4-5"
148
- # api_type = "Gemini-Vertex"
149
- # api_type = "Gemini-Free"
150
- # api_type = "Gemini-Paid"
151
- api_type = "OpenAI"
152
- # api_type = "OpenAI-Azure"
153
- # api_type = "OpenAI-GitHub"
154
- # api_type = "Claude"
155
- # api_type = "Grok"
156
- temperature = 0
157
- stream = True
158
- # stream = False
159
- thought = True
160
- # thought = False
161
- code_execution = True
162
- # code_execution = False
163
-
164
-
165
- async def main():
166
- model_prices = get_model_prices()
167
- pprint(model_prices)
168
- print(structured_output_schema)
169
-
170
- input_tokens = 0
171
- output_tokens = 0
172
- response = await workflow(
173
- api_keys,
174
- messages,
175
- model,
176
- api_type,
177
- temperature,
178
- stream,
179
- thought,
180
- code_execution,
181
- structured_output_schema,
182
- )
183
- text = ""
184
- thought_text = ""
185
- code_text = ""
186
- code_output_text = ""
187
- files = []
188
-
189
- if stream:
190
- async for chunk in response:
191
- pprint(chunk)
192
- if chunk.text:
193
- text += chunk.text
194
- if chunk.thought:
195
- thought_text += chunk.thought
196
- if chunk.input_tokens:
197
- input_tokens = chunk.input_tokens
198
- if chunk.output_tokens:
199
- output_tokens += chunk.output_tokens
200
- if chunk.code:
201
- code_text += chunk.code
202
- if chunk.code_output:
203
- code_output_text += chunk.code_output
204
- if chunk.files:
205
- files.extend(chunk.files)
206
- else:
207
- pprint(response)
208
- text = response.text
209
- thought_text = response.thought
210
- code_text = response.code
211
- code_output_text = response.code_output
212
- input_tokens = response.input_tokens
213
- output_tokens = response.output_tokens
214
- files = response.files
215
- total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
216
- print(f"Thought:\n{thought_text}\n")
217
- print(f"Code:\n{code_text}\n")
218
- print(f"Code Output:\n{code_output_text}\n")
219
- print(f"Text:\n{text}\n")
220
- print(f"Files:\n{files}\n")
221
- print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')
222
-
223
-
224
- if __name__ == "__main__":
225
- asyncio.run(main())
226
- output_file.close()
File without changes
File without changes
File without changes
File without changes
File without changes