LLM-Bridge 1.14.0a0__tar.gz → 1.14.0a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. llm_bridge-1.14.0a1/.gitattributes +2 -0
  2. llm_bridge-1.14.0a1/.github/workflows/python-publish.yml +32 -0
  3. llm_bridge-1.14.0a1/.gitignore +160 -0
  4. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/PKG-INFO +10 -11
  5. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/pyproject.toml +16 -20
  6. llm_bridge-1.14.0a1/tests/__init__.py +0 -0
  7. llm_bridge-1.14.0a1/tests/chat_client_factory_test.py +20 -0
  8. llm_bridge-1.14.0a1/tests/message_preprocessor_test.py +26 -0
  9. llm_bridge-1.14.0a1/usage/.env.example +9 -0
  10. llm_bridge-1.14.0a1/usage/main.py +225 -0
  11. llm_bridge-1.14.0a1/usage/workflow.py +34 -0
  12. llm_bridge-1.14.0a1/uv.lock +1025 -0
  13. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/PKG-INFO +0 -102
  14. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/SOURCES.txt +0 -67
  15. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/dependency_links.txt +0 -1
  16. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/requires.txt +0 -11
  17. llm_bridge-1.14.0a0/LLM_Bridge.egg-info/top_level.txt +0 -1
  18. llm_bridge-1.14.0a0/setup.cfg +0 -4
  19. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/LICENSE +0 -0
  20. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/MANIFEST.in +0 -0
  21. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/README.md +0 -0
  22. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/__init__.py +0 -0
  23. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/__init__.py +0 -0
  24. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/chat_client.py +0 -0
  25. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/__init__.py +0 -0
  26. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  27. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  28. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  29. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  30. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  31. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  32. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  33. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  34. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  35. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  36. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  37. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  38. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  39. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  40. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  41. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  42. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/implementations/printing_status.py +0 -0
  43. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/__init__.py +0 -0
  44. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/claude_client.py +0 -0
  45. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/gemini_client.py +0 -0
  46. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/client/model_client/openai_client.py +0 -0
  47. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/__init__.py +0 -0
  48. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  49. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  50. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  51. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  52. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  53. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  54. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  55. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  56. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  57. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  58. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  59. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  60. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  61. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  62. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/file_fetch.py +0 -0
  63. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  64. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  65. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  66. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  67. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  68. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/logic/model_prices.py +0 -0
  69. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/resources/__init__.py +0 -0
  70. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/resources/model_prices.json +0 -0
  71. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/__init__.py +0 -0
  72. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/chat_response.py +0 -0
  73. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/message.py +0 -0
  74. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/__init__.py +0 -0
  75. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/claude_message.py +0 -0
  76. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/gemini_message.py +0 -0
  77. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/openai_message.py +0 -0
  78. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  79. {llm_bridge-1.14.0a0 → llm_bridge-1.14.0a1}/llm_bridge/type/serializer.py +0 -0
@@ -0,0 +1,2 @@
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
@@ -0,0 +1,32 @@
1
+ name: "Publish"
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ # Publish on any tag starting with a `v`, e.g., v0.1.0
7
+ - v*
8
+
9
+ jobs:
10
+ run:
11
+ runs-on: ubuntu-latest
12
+ environment:
13
+ name: pypi
14
+ permissions:
15
+ id-token: write
16
+ contents: read
17
+ steps:
18
+ - name: Checkout
19
+ uses: actions/checkout@v5
20
+ - name: Install uv
21
+ uses: astral-sh/setup-uv@v7
22
+ - name: Install Python 3.12
23
+ run: uv python install 3.12
24
+ - name: Build
25
+ run: uv build
26
+ # Check that basic features work and we didn't miss to include crucial files
27
+ # - name: Smoke test (wheel)
28
+ # run: uv run --isolated --no-project --with dist/*.whl tests/smoke_test.py
29
+ # - name: Smoke test (source distribution)
30
+ # run: uv run --isolated --no-project --with dist/*.tar.gz tests/smoke_test.py
31
+ - name: Publish
32
+ run: uv publish
@@ -0,0 +1,160 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ .idea/
@@ -1,27 +1,26 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.14.0a0
3
+ Version: 1.14.0a1
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
7
- Keywords: llm,ai
7
+ License-File: LICENSE
8
+ Keywords: ai,llm
8
9
  Classifier: Framework :: FastAPI
9
10
  Classifier: Programming Language :: Python :: 3
10
11
  Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
12
+ Requires-Dist: anthropic==0.75.0
13
+ Requires-Dist: docxlatex>=1.1.1
13
14
  Requires-Dist: fastapi
15
+ Requires-Dist: google-genai==1.46.0
14
16
  Requires-Dist: httpx
15
- Requires-Dist: tenacity
16
17
  Requires-Dist: openai==2.9.0
17
- Requires-Dist: tiktoken==0.11.0
18
- Requires-Dist: google-genai==1.46.0
19
- Requires-Dist: anthropic==0.75.0
20
- Requires-Dist: PyMuPDF
21
- Requires-Dist: docxlatex>=1.1.1
22
18
  Requires-Dist: openpyxl
19
+ Requires-Dist: pymupdf
23
20
  Requires-Dist: python-pptx
24
- Dynamic: license-file
21
+ Requires-Dist: tenacity
22
+ Requires-Dist: tiktoken==0.11.0
23
+ Description-Content-Type: text/markdown
25
24
 
26
25
  # LLM Bridge
27
26
 
@@ -1,22 +1,10 @@
1
1
  [build-system]
2
- requires = ["setuptools"]
3
- build-backend = "setuptools.build_meta"
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.14.0-alpha.0"
8
- authors = [
9
- {name = "windsnow1025", email = "windsnow1025@gmail.com"}
10
- ]
11
- description = "A Bridge for LLMs"
12
- readme = "README.md"
13
- requires-python = ">=3.12"
14
- keywords = ["llm", "ai"]
15
- license = "MIT"
16
- classifiers = [
17
- "Framework :: FastAPI",
18
- "Programming Language :: Python :: 3",
19
- ]
7
+ version = "1.14.0-alpha.1"
20
8
  dependencies = [
21
9
  "fastapi",
22
10
  "httpx",
@@ -30,6 +18,18 @@ dependencies = [
30
18
  "openpyxl",
31
19
  "python-pptx", # pptx
32
20
  ]
21
+ requires-python = ">=3.12"
22
+ authors = [
23
+ {name = "windsnow1025", email = "windsnow1025@gmail.com"}
24
+ ]
25
+ description = "A Bridge for LLMs"
26
+ readme = "README.md"
27
+ license = "MIT"
28
+ keywords = ["llm", "ai"]
29
+ classifiers = [
30
+ "Framework :: FastAPI",
31
+ "Programming Language :: Python :: 3",
32
+ ]
33
33
 
34
34
  [dependency-groups]
35
35
  dev = [
@@ -38,9 +38,5 @@ dev = [
38
38
  "python-dotenv", #dotenv
39
39
  ]
40
40
 
41
- [tool.setuptools.packages.find]
42
- where = ["."]
43
- include = ["llm_bridge*"]
44
-
45
41
  [tool.pytest.ini_options]
46
- asyncio_default_fixture_loop_scope = "function"
42
+ asyncio_default_fixture_loop_scope = "function"
File without changes
@@ -0,0 +1,20 @@
1
+ import pytest
2
+
3
+ from llm_bridge.type.message import Message, Role, Content, ContentType
4
+
5
+
6
+ @pytest.fixture
7
+ def sample_messages():
8
+ return [
9
+ Message(role=Role.System, contents=[
10
+ Content(type=ContentType.Text, data="You are a helpful assistant.")
11
+ ]),
12
+ Message(role=Role.User, contents=[
13
+ Content(type=ContentType.Text, data="Hello")
14
+ ])
15
+ ]
16
+
17
+
18
+ @pytest.mark.asyncio
19
+ async def test_placeholder():
20
+ assert True
@@ -0,0 +1,26 @@
1
+ import pytest
2
+
3
+ from llm_bridge.logic.message_preprocess.message_preprocessor import extract_system_messages
4
+ from llm_bridge.type.message import Message, Role, Content, ContentType
5
+
6
+
7
+ @pytest.fixture
8
+ def sample_messages():
9
+ return [
10
+ Message(role=Role.System, contents=[
11
+ Content(type=ContentType.Text, data="You are a helpful assistant.")
12
+ ]),
13
+ Message(role=Role.User, contents=[
14
+ Content(type=ContentType.Text, data="Hello")
15
+ ])
16
+ ]
17
+
18
+ def test_extract_system_messages(sample_messages):
19
+ extracted_text = extract_system_messages(sample_messages)
20
+
21
+ assert extracted_text == "You are a helpful assistant.\n"
22
+
23
+ assert len(sample_messages) == 1
24
+ assert sample_messages[0].role == Role.User
25
+ assert sample_messages[0].contents[0].type == ContentType.Text
26
+ assert sample_messages[0].contents[0].data == "Hello"
@@ -0,0 +1,9 @@
1
+ OPENAI_API_KEY=
2
+ GEMINI_FREE_API_KEY=
3
+ GEMINI_PAID_API_KEY=
4
+ GEMINI_VERTEX_API_KEY=
5
+ ANTHROPIC_API_KEY=
6
+ AZURE_API_KEY=
7
+ AZURE_API_BASE=
8
+ GITHUB_API_KEY=
9
+ XAI_API_KEY=
@@ -0,0 +1,225 @@
1
+ import asyncio
2
+ import logging
3
+ import os
4
+ import sys
5
+ from pprint import pprint
6
+
7
+ from dotenv import load_dotenv
8
+
9
+ from llm_bridge import *
10
+ from usage.workflow import workflow
11
+
12
+ output_file = open("./usage/output.log", "w", encoding="utf-8")
13
+ sys.stdout = output_file
14
+
15
+ logging.basicConfig(
16
+ level=logging.INFO,
17
+ format='%(asctime)s - %(levelname)s - %(message)s',
18
+ stream=output_file
19
+ )
20
+
21
+ script_dir = os.path.dirname(os.path.abspath(__file__))
22
+ env_path = os.path.join(script_dir, ".env")
23
+ load_dotenv(env_path)
24
+
25
+ api_keys = {
26
+ "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
27
+ "AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
28
+ "AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
29
+ "GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
30
+ "GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
31
+ "GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
32
+ "GEMINI_VERTEX_API_KEY": os.environ.get("GEMINI_VERTEX_API_KEY"),
33
+ "ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
34
+ "XAI_API_KEY": os.environ.get("XAI_API_KEY"),
35
+ }
36
+
37
+ structured_output_schema = {
38
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
39
+ "$id": "https://example.com/product.schema.json",
40
+ "title": "Product",
41
+ "description": "A product from Acme's catalog",
42
+ "type": "object",
43
+ "properties": {
44
+ "productId": {
45
+ "description": "The unique identifier for a product",
46
+ "type": "integer"
47
+ },
48
+ "productName": {
49
+ "description": "Name of the product",
50
+ "type": "string"
51
+ },
52
+ "price": {
53
+ "description": "The price of the product",
54
+ "type": "number",
55
+ "exclusiveMinimum": 0
56
+ },
57
+ "tags": {
58
+ "description": "Tags for the product",
59
+ "type": "array",
60
+ "items": {
61
+ "type": "string"
62
+ },
63
+ "minItems": 1,
64
+ "uniqueItems": True
65
+ }
66
+ },
67
+ "required": [
68
+ "productId",
69
+ "productName",
70
+ "price"
71
+ ]
72
+ }
73
+ # structured_output_schema = None
74
+
75
+ messages = [
76
+ Message(
77
+ role=Role.System,
78
+ contents=[
79
+ Content(type=ContentType.Text, data="You are a helpful assistant.")
80
+ ]
81
+ ),
82
+ Message(
83
+ role=Role.User,
84
+ contents=[
85
+ Content(type=ContentType.Text, data="Hello")
86
+ ]
87
+ ),
88
+ Message(
89
+ role=Role.Assistant,
90
+ contents=[
91
+ Content(type=ContentType.Text, data="Hello! How can I assist you today?")
92
+ ]
93
+ ),
94
+ Message(
95
+ role=Role.User,
96
+ contents=[
97
+ # Thinking
98
+ # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
99
+
100
+ # Web Search
101
+ # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
102
+
103
+ # Image Understanding
104
+ # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
105
+ # Content(type=ContentType.Text, data="What is in this image?"),
106
+
107
+ # Image Generation
108
+ # Content(type=ContentType.Text, data="Please generate an image of a cat."),
109
+
110
+ # URL Context
111
+ # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
112
+
113
+ # Code Execution
114
+ # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
115
+
116
+ # File Output
117
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1758384216123-script.py"),
118
+ # Content(type=ContentType.Text, data="Please implement a minimum example of Neural Network in `script.py`"),
119
+
120
+ # Structured Output
121
+ Content(type=ContentType.Text, data="Please generate a product."),
122
+ ]
123
+ ),
124
+ # Message(
125
+ # role=Role.User,
126
+ # contents=[
127
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
128
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
129
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
130
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
131
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1753804900037-Calculus.docx"),
132
+ # Content(type=ContentType.Text, data="What's this?"),
133
+ # ]
134
+ # ),
135
+ ]
136
+ # See /llm_bridge/resources/model_prices.json for available models
137
+ # model = "gpt-5.1"
138
+ # model = "gpt-5-pro"
139
+ # model = "gpt-5"
140
+ # model = "gpt-4.1"
141
+ # model = "gemini-3-pro-preview"
142
+ # model = "gemini-3-pro-image-preview"
143
+ # model = "gemini-flash-latest"
144
+ # model = "grok-4-1-fast-reasoning"
145
+ model = "claude-sonnet-4-5"
146
+ # model = "claude-opus-4-5"
147
+ # api_type = "Gemini-Vertex"
148
+ # api_type = "Gemini-Free"
149
+ # api_type = "Gemini-Paid"
150
+ # api_type = "OpenAI"
151
+ # api_type = "OpenAI-Azure"
152
+ # api_type = "OpenAI-GitHub"
153
+ api_type = "Claude"
154
+ # api_type = "Grok"
155
+ temperature = 0
156
+ stream = True
157
+ # stream = False
158
+ thought = True
159
+ # thought = False
160
+ code_execution = True
161
+ # code_execution = False
162
+
163
+
164
+ async def main():
165
+ model_prices = get_model_prices()
166
+ pprint(model_prices)
167
+ print(structured_output_schema)
168
+
169
+ input_tokens = 0
170
+ output_tokens = 0
171
+ response = await workflow(
172
+ api_keys,
173
+ messages,
174
+ model,
175
+ api_type,
176
+ temperature,
177
+ stream,
178
+ thought,
179
+ code_execution,
180
+ structured_output_schema,
181
+ )
182
+ text = ""
183
+ thought_text = ""
184
+ code_text = ""
185
+ code_output_text = ""
186
+ files = []
187
+
188
+ if stream:
189
+ async for chunk in response:
190
+ pprint(chunk)
191
+ if chunk.text:
192
+ text += chunk.text
193
+ if chunk.thought:
194
+ thought_text += chunk.thought
195
+ if chunk.input_tokens:
196
+ input_tokens = chunk.input_tokens
197
+ if chunk.output_tokens:
198
+ output_tokens += chunk.output_tokens
199
+ if chunk.code:
200
+ code_text += chunk.code
201
+ if chunk.code_output:
202
+ code_output_text += chunk.code_output
203
+ if chunk.files:
204
+ files.extend(chunk.files)
205
+ else:
206
+ pprint(response)
207
+ text = response.text
208
+ thought_text = response.thought
209
+ code_text = response.code
210
+ code_output_text = response.code_output
211
+ input_tokens = response.input_tokens
212
+ output_tokens = response.output_tokens
213
+ files = response.files
214
+ total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
215
+ print(f"Thought:\n{thought_text}\n")
216
+ print(f"Code:\n{code_text}\n")
217
+ print(f"Code Output:\n{code_output_text}\n")
218
+ print(f"Text:\n{text}\n")
219
+ print(f"Files:\n{files}\n")
220
+ print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')
221
+
222
+
223
+ if __name__ == "__main__":
224
+ asyncio.run(main())
225
+ output_file.close()
@@ -0,0 +1,34 @@
1
+ from typing import AsyncGenerator, Any
2
+
3
+ from llm_bridge import *
4
+
5
+
6
+ async def workflow(
7
+ api_keys: dict[str, str],
8
+ messages: list[Message],
9
+ model: str,
10
+ api_type: str,
11
+ temperature: float,
12
+ stream: bool,
13
+ thought: bool,
14
+ code_execution: bool,
15
+ structured_output_schema: dict[str, Any] | None,
16
+ ) -> ChatResponse | AsyncGenerator[ChatResponse, None]:
17
+ await preprocess_messages(messages, api_type)
18
+
19
+ chat_client = await create_chat_client(
20
+ api_keys=api_keys,
21
+ messages=messages,
22
+ model=model,
23
+ api_type=api_type,
24
+ temperature=temperature,
25
+ stream=stream,
26
+ thought=thought,
27
+ code_execution=code_execution,
28
+ structured_output_schema=structured_output_schema,
29
+ )
30
+
31
+ if stream:
32
+ return chat_client.generate_stream_response()
33
+ else:
34
+ return await chat_client.generate_non_stream_response()