LLM-Bridge 1.15.6__tar.gz → 1.15.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. llm_bridge-1.15.8/.run/pytest.run.xml +18 -0
  2. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/PKG-INFO +2 -8
  3. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/README.md +1 -7
  4. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +13 -7
  5. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/message_preprocess/file_type_checker.py +3 -3
  6. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/pyproject.toml +4 -4
  7. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/tests/file_type_checker_test.py +8 -1
  8. llm_bridge-1.15.8/usage/config.py +132 -0
  9. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/uv.lock +4 -4
  10. llm_bridge-1.15.6/usage/config.py +0 -129
  11. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/.gitattributes +0 -0
  12. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/.github/workflows/python-publish.yml +0 -0
  13. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/.github/workflows/python-test.yaml +0 -0
  14. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/.gitignore +0 -0
  15. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/LICENSE +0 -0
  16. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/MANIFEST.in +0 -0
  17. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/__init__.py +0 -0
  18. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/__init__.py +0 -0
  19. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/chat_client.py +0 -0
  20. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/__init__.py +0 -0
  21. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  22. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  23. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  24. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  25. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  26. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  27. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +0 -0
  28. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  29. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  30. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  31. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  32. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  33. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +0 -0
  34. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/openai_responses_response_handler.py +0 -0
  35. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  36. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +0 -0
  37. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  38. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/implementations/printing_status.py +0 -0
  39. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/model_client/__init__.py +0 -0
  40. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/model_client/claude_client.py +0 -0
  41. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/model_client/gemini_client.py +0 -0
  42. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/client/model_client/openai_client.py +0 -0
  43. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/__init__.py +0 -0
  44. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  45. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  46. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  47. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  48. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  49. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  50. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  51. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +0 -0
  52. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  53. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  54. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  55. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  56. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  57. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/file_fetch.py +0 -0
  58. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  59. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  60. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  61. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  62. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/logic/model_prices.py +0 -0
  63. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/resources/__init__.py +0 -0
  64. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/resources/model_prices.json +0 -0
  65. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/__init__.py +0 -0
  66. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/chat_response.py +0 -0
  67. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/message.py +0 -0
  68. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/model_message/__init__.py +0 -0
  69. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/model_message/claude_message.py +0 -0
  70. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/model_message/gemini_message.py +0 -0
  71. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/model_message/openai_message.py +0 -0
  72. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  73. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/llm_bridge/type/serializer.py +0 -0
  74. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/tests/__init__.py +0 -0
  75. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/tests/model_prices_test.py +0 -0
  76. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/usage/.env.example +0 -0
  77. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/usage/main.py +0 -0
  78. {llm_bridge-1.15.6 → llm_bridge-1.15.8}/usage/workflow.py +0 -0
@@ -0,0 +1,18 @@
1
+ <component name="ProjectRunConfigurationManager">
2
+ <configuration default="false" name="pytest" type="UvRunConfigurationType" factoryName="UvRunConfigurationType">
3
+ <option name="args">
4
+ <list />
5
+ </option>
6
+ <option name="checkSync" value="true" />
7
+ <option name="env">
8
+ <map />
9
+ </option>
10
+ <option name="runType" value="MODULE" />
11
+ <option name="scriptOrModule" value="pytest" />
12
+ <option name="uvArgs">
13
+ <list />
14
+ </option>
15
+ <option name="uvSdkKey" value="uv (LLM-Bridge)" />
16
+ <method v="2" />
17
+ </configuration>
18
+ </component>
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.15.6
3
+ Version: 1.15.8
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -24,7 +24,7 @@ Description-Content-Type: text/markdown
24
24
 
25
25
  # LLM Bridge
26
26
 
27
- LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
27
+ LLM Bridge is a unified API wrapper for native interactions with various LLM providers.
28
28
 
29
29
  GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
30
30
 
@@ -81,12 +81,6 @@ The features listed represent the maximum capabilities of each API type supporte
81
81
 
82
82
  Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
83
83
 
84
- ### Test
85
-
86
- ```bash
87
- uv run pytest
88
- ```
89
-
90
84
  ### Build
91
85
 
92
86
  ```bash
@@ -1,6 +1,6 @@
1
1
  # LLM Bridge
2
2
 
3
- LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
3
+ LLM Bridge is a unified API wrapper for native interactions with various LLM providers.
4
4
 
5
5
  GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
6
6
 
@@ -57,12 +57,6 @@ The features listed represent the maximum capabilities of each API type supporte
57
57
 
58
58
  Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
59
59
 
60
- ### Test
61
-
62
- ```bash
63
- uv run pytest
64
- ```
65
-
66
60
  ### Build
67
61
 
68
62
  ```bash
@@ -3,18 +3,19 @@ from openai.types.responses import ResponseInputTextParam, ResponseInputImagePar
3
3
 
4
4
  from llm_bridge.logic.chat_generate import media_processor
5
5
  from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type, get_filename_without_timestamp
6
- from llm_bridge.type.message import Message, ContentType
6
+ from llm_bridge.type.message import Message, ContentType, Role
7
7
  from llm_bridge.type.model_message.openai_responses_message import OpenAIResponsesMessage
8
8
 
9
9
 
10
10
  async def convert_message_to_openai_responses(message: Message) -> OpenAIResponsesMessage:
11
- role = message.role.value
11
+ role = message.role
12
12
  content: list[ResponseInputContentParam | ResponseOutputTextParam] = []
13
+ contains_pdf = False
13
14
 
14
15
  for content_item in message.contents:
15
16
  if content_item.type == ContentType.Text:
16
- if role == "assistant":
17
- text_content = ResponseOutputTextParam(type="output_text", text=content_item.data)
17
+ if role == Role.Assistant:
18
+ text_content = ResponseOutputTextParam(type="output_text", text=content_item.data, annotations=[])
18
19
  else:
19
20
  text_content = ResponseInputTextParam(type="input_text", text=content_item.data)
20
21
  content.append(text_content)
@@ -30,6 +31,7 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
30
31
  )
31
32
  content.append(image_content)
32
33
  elif sub_type == "pdf":
34
+ contains_pdf = True
33
35
  file_data, _ = await media_processor.get_base64_content_from_url(file_url)
34
36
  pdf_content = ResponseInputFileParam(
35
37
  type="input_file",
@@ -52,7 +54,11 @@ async def convert_message_to_openai_responses(message: Message) -> OpenAIRespons
52
54
  )
53
55
  content.append(text_content)
54
56
 
55
- if role in ("user", "system"):
56
- return EasyInputMessageParam(role=role, content=content)
57
+ # Force system role to user if the message contains a PDF
58
+ if role == Role.System and contains_pdf:
59
+ role = Role.User
60
+
61
+ if role in (Role.User, Role.System):
62
+ return EasyInputMessageParam(role=role.value, content=content)
57
63
  else:
58
- return ResponseOutputMessageParam(role=role, content=content)
64
+ return ResponseOutputMessageParam(role=role.value, content=content)
@@ -15,11 +15,11 @@ def is_file_type_supported(file_name: str) -> bool:
15
15
 
16
16
 
17
17
  async def get_file_type(file_url: str) -> tuple[str, str]:
18
- file_name = get_filename_without_timestamp(file_url)
18
+ file_name: str = get_filename_without_timestamp(file_url)
19
19
 
20
20
  # Treat filenames without an extension as their own extension
21
- suffix = PurePosixPath(file_name).suffix.lower()
22
- file_extension = suffix if suffix else '.' + file_name.lower()
21
+ suffix: str = PurePosixPath(file_name).suffix.lower()
22
+ file_extension: str = suffix if suffix else '.' + file_name.lower()
23
23
 
24
24
  if file_extension in code_file_extensions:
25
25
  return 'text', 'code'
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.15.6"
7
+ version = "1.15.8"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -33,9 +33,9 @@ classifiers = [
33
33
 
34
34
  [dependency-groups]
35
35
  dev = [
36
- "pytest",
37
- "pytest-asyncio",
38
- "python-dotenv", #dotenv
36
+ "pytest==9.0.2",
37
+ "pytest-asyncio==1.3.0",
38
+ "python-dotenv==1.2.1", #dotenv
39
39
  ]
40
40
 
41
41
  [tool.pytest.ini_options]
@@ -5,13 +5,20 @@ from llm_bridge.logic.message_preprocess.file_type_checker import get_file_type
5
5
 
6
6
  @pytest.mark.asyncio
7
7
  async def test_get_file_type_with_extension():
8
+ file_type, sub_type = await get_file_type("https://example.com/1767243600000-markdown.md")
9
+ assert file_type == "text"
10
+ assert sub_type == "code"
11
+
12
+
13
+ @pytest.mark.asyncio
14
+ async def test_get_file_type_pdf():
8
15
  file_type, sub_type = await get_file_type("https://example.com/1767243600000-document.pdf")
9
16
  assert file_type == "text"
10
17
  assert sub_type == "pdf"
11
18
 
12
19
 
13
20
  @pytest.mark.asyncio
14
- async def test_get_file_type_without_extension():
21
+ async def test_get_file_type_dockerfile():
15
22
  file_type, sub_type = await get_file_type("https://example.com/1767243600000-Dockerfile")
16
23
  assert file_type == "text"
17
24
  assert sub_type == "code"
@@ -0,0 +1,132 @@
1
+ from llm_bridge import *
2
+
3
+ structured_output_schema = {
4
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
5
+ "$id": "https://example.com/product.schema.json",
6
+ "title": "Product",
7
+ "description": "A product from Acme's catalog",
8
+ "type": "object",
9
+ "properties": {
10
+ "productId": {
11
+ "description": "The unique identifier for a product",
12
+ "type": "integer"
13
+ },
14
+ "productName": {
15
+ "description": "Name of the product",
16
+ "type": "string"
17
+ },
18
+ "price": {
19
+ "description": "The price of the product",
20
+ "type": "number",
21
+ "exclusiveMinimum": 0
22
+ },
23
+ "tags": {
24
+ "description": "Tags for the product",
25
+ "type": "array",
26
+ "items": {
27
+ "type": "string"
28
+ },
29
+ "minItems": 1,
30
+ "uniqueItems": True
31
+ }
32
+ },
33
+ "required": [
34
+ "productId",
35
+ "productName",
36
+ "price"
37
+ ]
38
+ }
39
+ structured_output_schema = None
40
+
41
+ messages = [
42
+ Message(
43
+ role=Role.System,
44
+ contents=[
45
+ # Content(type=ContentType.Text, data="You are a helpful assistant."),
46
+ Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf")
47
+ ]
48
+ ),
49
+ # Message(
50
+ # role=Role.User,
51
+ # contents=[
52
+ # Content(type=ContentType.Text, data="Hello")
53
+ # ]
54
+ # ),
55
+ # Message(
56
+ # role=Role.Assistant,
57
+ # contents=[
58
+ # Content(type=ContentType.Text, data="Hello! How can I assist you today?")
59
+ # ]
60
+ # ),
61
+ Message(
62
+ role=Role.User,
63
+ contents=[
64
+ # Simple Question
65
+ Content(type=ContentType.Text, data="What's this?"),
66
+
67
+ # Thinking
68
+ # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
69
+
70
+ # Web Search
71
+ # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
72
+
73
+ # Image Understanding
74
+ # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
75
+ # Content(type=ContentType.Text, data="What is in this image?"),
76
+
77
+ # Image Generation
78
+ # Content(type=ContentType.Text, data="Please generate an image of a cat."),
79
+
80
+ # URL Context
81
+ # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
82
+
83
+ # Code Execution
84
+ # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
85
+
86
+ # File Output
87
+ # Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
88
+
89
+ # Structured Output
90
+ # Content(type=ContentType.Text, data="Please generate a product."),
91
+ ]
92
+ ),
93
+ # Message(
94
+ # role=Role.User,
95
+ # contents=[
96
+ # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1769429581512-Test.txt"),
97
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
98
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
99
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
100
+ # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
101
+ # Content(type=ContentType.Text, data="What's this?"),
102
+ # ]
103
+ # ),
104
+ ]
105
+ # See /llm_bridge/resources/model_prices.json for available models
106
+ model = "gpt-5.2"
107
+ # model = "gpt-5.1"
108
+ # model = "gpt-5-pro"
109
+ # model = "gpt-5"
110
+ # model = "gpt-4.1"
111
+ # model = "gpt-5-codex"
112
+ # model = "gemini-3-pro-preview"
113
+ # model = "gemini-3-pro-image-preview"
114
+ # model = "gemini-3-flash-preview"
115
+ # model = "grok-4-1-fast-reasoning"
116
+ # model = "claude-sonnet-4-5"
117
+ # model = "claude-opus-4-5"
118
+ # api_type = "Gemini-Vertex"
119
+ # api_type = "Gemini-Free"
120
+ # api_type = "Gemini-Paid"
121
+ api_type = "OpenAI"
122
+ # api_type = "OpenAI-Azure"
123
+ # api_type = "OpenAI-GitHub"
124
+ # api_type = "Claude"
125
+ # api_type = "Grok"
126
+ temperature = 0
127
+ stream = True
128
+ # stream = False
129
+ thought = True
130
+ # thought = False
131
+ # code_execution = True
132
+ code_execution = False
@@ -357,7 +357,7 @@ wheels = [
357
357
 
358
358
  [[package]]
359
359
  name = "llm-bridge"
360
- version = "1.15.6"
360
+ version = "1.15.8"
361
361
  source = { editable = "." }
362
362
  dependencies = [
363
363
  { name = "anthropic" },
@@ -397,9 +397,9 @@ requires-dist = [
397
397
 
398
398
  [package.metadata.requires-dev]
399
399
  dev = [
400
- { name = "pytest" },
401
- { name = "pytest-asyncio" },
402
- { name = "python-dotenv" },
400
+ { name = "pytest", specifier = "==9.0.2" },
401
+ { name = "pytest-asyncio", specifier = "==1.3.0" },
402
+ { name = "python-dotenv", specifier = "==1.2.1" },
403
403
  ]
404
404
 
405
405
  [[package]]
@@ -1,129 +0,0 @@
1
- from llm_bridge import *
2
-
3
- structured_output_schema = {
4
- "$schema": "https://json-schema.org/draft/2020-12/schema",
5
- "$id": "https://example.com/product.schema.json",
6
- "title": "Product",
7
- "description": "A product from Acme's catalog",
8
- "type": "object",
9
- "properties": {
10
- "productId": {
11
- "description": "The unique identifier for a product",
12
- "type": "integer"
13
- },
14
- "productName": {
15
- "description": "Name of the product",
16
- "type": "string"
17
- },
18
- "price": {
19
- "description": "The price of the product",
20
- "type": "number",
21
- "exclusiveMinimum": 0
22
- },
23
- "tags": {
24
- "description": "Tags for the product",
25
- "type": "array",
26
- "items": {
27
- "type": "string"
28
- },
29
- "minItems": 1,
30
- "uniqueItems": True
31
- }
32
- },
33
- "required": [
34
- "productId",
35
- "productName",
36
- "price"
37
- ]
38
- }
39
- structured_output_schema = None
40
-
41
- messages = [
42
- Message(
43
- role=Role.System,
44
- contents=[
45
- Content(type=ContentType.Text, data="You are a helpful assistant.")
46
- ]
47
- ),
48
- Message(
49
- role=Role.User,
50
- contents=[
51
- Content(type=ContentType.Text, data="Hello")
52
- ]
53
- ),
54
- Message(
55
- role=Role.Assistant,
56
- contents=[
57
- Content(type=ContentType.Text, data="Hello! How can I assist you today?")
58
- ]
59
- ),
60
- # Message(
61
- # role=Role.User,
62
- # contents=[
63
- # # Thinking
64
- # # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
65
- #
66
- # # Web Search
67
- # # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
68
- #
69
- # # Image Understanding
70
- # # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
71
- # # Content(type=ContentType.Text, data="What is in this image?"),
72
- #
73
- # # Image Generation
74
- # # Content(type=ContentType.Text, data="Please generate an image of a cat."),
75
- #
76
- # # URL Context
77
- # # Content(type=ContentType.Text, data="What is in https://www.windsnow1025.com/"),
78
- #
79
- # # Code Execution
80
- # # Content(type=ContentType.Text, data="What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."),
81
- #
82
- # # File Output
83
- # # Content(type=ContentType.Text, data="Create a matplotlib visualization and save it as output.png"),
84
- #
85
- # # Structured Output
86
- # # Content(type=ContentType.Text, data="Please generate a product."),
87
- # ]
88
- # ),
89
- Message(
90
- role=Role.User,
91
- contents=[
92
- Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1769429581512-Test.txt"),
93
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
94
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
95
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
96
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
97
- # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1753804900037-Calculus.docx"),
98
- Content(type=ContentType.Text, data="What's this?"),
99
- ]
100
- ),
101
- ]
102
- # See /llm_bridge/resources/model_prices.json for available models
103
- # model = "gpt-5.2"
104
- # model = "gpt-5.1"
105
- # model = "gpt-5-pro"
106
- # model = "gpt-5"
107
- # model = "gpt-4.1"
108
- # model = "gpt-5-codex"
109
- # model = "gemini-3-pro-preview"
110
- # model = "gemini-3-pro-image-preview"
111
- model = "gemini-3-flash-preview"
112
- # model = "grok-4-1-fast-reasoning"
113
- # model = "claude-sonnet-4-5"
114
- # model = "claude-opus-4-5"
115
- api_type = "Gemini-Vertex"
116
- # api_type = "Gemini-Free"
117
- # api_type = "Gemini-Paid"
118
- # api_type = "OpenAI"
119
- # api_type = "OpenAI-Azure"
120
- # api_type = "OpenAI-GitHub"
121
- # api_type = "Claude"
122
- # api_type = "Grok"
123
- temperature = 0
124
- stream = True
125
- # stream = False
126
- thought = True
127
- # thought = False
128
- code_execution = True
129
- # code_execution = False
File without changes
File without changes
File without changes
File without changes
File without changes