LLM-Bridge 1.14.0a1__tar.gz → 1.15.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/PKG-INFO +15 -22
  2. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/README.md +14 -21
  3. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/__init__.py +1 -1
  4. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/gemini/gemini_response_handler.py +1 -15
  5. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/openai/non_stream_openai_responses_client.py +9 -23
  6. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/openai/steam_openai_responses_client.py +9 -17
  7. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py +8 -9
  8. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/resources/model_prices.json +31 -43
  9. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/chat_response.py +0 -15
  10. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/pyproject.toml +1 -1
  11. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/usage/main.py +10 -9
  12. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/uv.lock +3 -1
  13. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/.gitattributes +0 -0
  14. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/.github/workflows/python-publish.yml +0 -0
  15. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/.gitignore +0 -0
  16. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/LICENSE +0 -0
  17. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/MANIFEST.in +0 -0
  18. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/__init__.py +0 -0
  19. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/chat_client.py +0 -0
  20. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/__init__.py +0 -0
  21. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/claude/__init__.py +0 -0
  22. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/claude/claude_response_handler.py +0 -0
  23. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/claude/claude_token_counter.py +0 -0
  24. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/claude/non_stream_claude_client.py +0 -0
  25. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/claude/stream_claude_client.py +0 -0
  26. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/gemini/__init__.py +0 -0
  27. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/gemini/gemini_token_counter.py +0 -0
  28. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +0 -0
  29. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/gemini/stream_gemini_client.py +0 -0
  30. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/openai/__init__.py +0 -0
  31. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/openai/non_stream_openai_client.py +0 -0
  32. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/openai/openai_token_couter.py +0 -0
  33. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/openai/stream_openai_client.py +0 -0
  34. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/implementations/printing_status.py +0 -0
  35. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/model_client/__init__.py +0 -0
  36. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/model_client/claude_client.py +0 -0
  37. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/model_client/gemini_client.py +0 -0
  38. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/client/model_client/openai_client.py +0 -0
  39. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/__init__.py +0 -0
  40. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/__init__.py +0 -0
  41. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/chat_client_factory.py +0 -0
  42. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/chat_message_converter.py +0 -0
  43. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/media_processor.py +0 -0
  44. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
  45. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +0 -0
  46. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +0 -0
  47. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_client_factory/schema_converter.py +0 -0
  48. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +0 -0
  49. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +0 -0
  50. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +0 -0
  51. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_message_converter.py +0 -0
  52. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/chat_generate/model_message_converter/openai_responses_message_converter.py +0 -0
  53. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/file_fetch.py +0 -0
  54. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/message_preprocess/__init__.py +0 -0
  55. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/message_preprocess/code_file_extensions.py +0 -0
  56. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/message_preprocess/document_processor.py +0 -0
  57. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/message_preprocess/file_type_checker.py +0 -0
  58. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/message_preprocess/message_preprocessor.py +0 -0
  59. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/logic/model_prices.py +0 -0
  60. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/resources/__init__.py +0 -0
  61. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/__init__.py +0 -0
  62. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/message.py +0 -0
  63. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/model_message/__init__.py +0 -0
  64. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/model_message/claude_message.py +0 -0
  65. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/model_message/gemini_message.py +0 -0
  66. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/model_message/openai_message.py +0 -0
  67. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/model_message/openai_responses_message.py +0 -0
  68. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/llm_bridge/type/serializer.py +0 -0
  69. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/tests/__init__.py +0 -0
  70. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/tests/chat_client_factory_test.py +0 -0
  71. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/tests/message_preprocessor_test.py +0 -0
  72. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/usage/.env.example +0 -0
  73. {llm_bridge-1.14.0a1 → llm_bridge-1.15.0}/usage/workflow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLM-Bridge
3
- Version: 1.14.0a1
3
+ Version: 1.15.0
4
4
  Summary: A Bridge for LLMs
5
5
  Author-email: windsnow1025 <windsnow1025@gmail.com>
6
6
  License-Expression: MIT
@@ -37,21 +37,21 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
37
37
  1. **Model Message Converter**: converts general messages to model messages
38
38
  1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
39
39
  3. **Chat Client**: generate stream or non-stream responses
40
- - **Model Thoughts**: captures and formats the model's thinking process
41
- - **Code Execution**: auto generate and execute Python code
42
- - **Web Search + Citations**: extracts and formats citations from search results
40
+ - **Model Thoughts**: captures the model's thinking process
41
+ - **Code Execution**: generates and executes Python code
42
+ - **Web Search**: generates response from search results
43
43
  - **Token Counter**: tracks and reports input and output token usage
44
44
 
45
45
  ### Supported Features for API Types
46
46
 
47
47
  The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
48
48
 
49
- | API Type | Input Format | Capabilities | Output Format |
50
- |----------|--------------------------------|---------------------------------------------------------------------|-------------------|
51
- | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
52
- | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution, Structured Output | Text, Image, File |
53
- | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
54
- | Grok | Text, Image | | Text |
49
+ | API Type | Input Format | Capabilities | Output Format |
50
+ |----------|--------------------------------|---------------------------------------------------------|-------------------|
51
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
52
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
53
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
54
+ | Grok | Text, Image | | Text |
55
55
 
56
56
  #### Planned Features
57
57
 
@@ -59,12 +59,6 @@ The features listed represent the maximum capabilities of each API type supporte
59
59
  - More features for API Types
60
60
  - Native support for Grok
61
61
 
62
- ## Installation
63
-
64
- ```bash
65
- pip install --upgrade llm_bridge
66
- ```
67
-
68
62
  ## Development
69
63
 
70
64
  ### Python uv
@@ -73,16 +67,15 @@ pip install --upgrade llm_bridge
73
67
  2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
74
68
  3. Configure requirements:
75
69
  ```bash
76
- uv sync
70
+ uv sync --refresh
77
71
  ```
78
72
 
79
73
  ### Pycharm
80
74
 
81
- Add New Configuration >> uv run
82
- - script: `./usage/main.py`
83
- - Paths to ".env" files: `./usage/.env`
84
-
85
- If uv interpreter is not found, create a new project with uv.
75
+ 1. Add New Interpreter >> Add Local Interpreter
76
+ - Environment: Select existing
77
+ - Type: uv
78
+ 2. Add New Configuration >> uv run >> script: `./usage/main.py`
86
79
 
87
80
  ### Usage
88
81
 
@@ -13,21 +13,21 @@ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge
13
13
  1. **Model Message Converter**: converts general messages to model messages
14
14
  1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
15
15
  3. **Chat Client**: generate stream or non-stream responses
16
- - **Model Thoughts**: captures and formats the model's thinking process
17
- - **Code Execution**: auto generate and execute Python code
18
- - **Web Search + Citations**: extracts and formats citations from search results
16
+ - **Model Thoughts**: captures the model's thinking process
17
+ - **Code Execution**: generates and executes Python code
18
+ - **Web Search**: generates response from search results
19
19
  - **Token Counter**: tracks and reports input and output token usage
20
20
 
21
21
  ### Supported Features for API Types
22
22
 
23
23
  The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
24
24
 
25
- | API Type | Input Format | Capabilities | Output Format |
26
- |----------|--------------------------------|---------------------------------------------------------------------|-------------------|
27
- | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
28
- | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution, Structured Output | Text, Image, File |
29
- | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
30
- | Grok | Text, Image | | Text |
25
+ | API Type | Input Format | Capabilities | Output Format |
26
+ |----------|--------------------------------|---------------------------------------------------------|-------------------|
27
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text, Image |
28
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search, Code Execution, Structured Output | Text, Image, File |
29
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
30
+ | Grok | Text, Image | | Text |
31
31
 
32
32
  #### Planned Features
33
33
 
@@ -35,12 +35,6 @@ The features listed represent the maximum capabilities of each API type supporte
35
35
  - More features for API Types
36
36
  - Native support for Grok
37
37
 
38
- ## Installation
39
-
40
- ```bash
41
- pip install --upgrade llm_bridge
42
- ```
43
-
44
38
  ## Development
45
39
 
46
40
  ### Python uv
@@ -49,16 +43,15 @@ pip install --upgrade llm_bridge
49
43
  2. Install Python in uv: `uv python install 3.12`; upgrade Python in uv: `uv python install 3.12`
50
44
  3. Configure requirements:
51
45
  ```bash
52
- uv sync
46
+ uv sync --refresh
53
47
  ```
54
48
 
55
49
  ### Pycharm
56
50
 
57
- Add New Configuration >> uv run
58
- - script: `./usage/main.py`
59
- - Paths to ".env" files: `./usage/.env`
60
-
61
- If uv interpreter is not found, create a new project with uv.
51
+ 1. Add New Interpreter >> Add Local Interpreter
52
+ - Environment: Select existing
53
+ - Type: uv
54
+ 2. Add New Configuration >> uv run >> script: `./usage/main.py`
62
55
 
63
56
  ### Usage
64
57
 
@@ -2,6 +2,6 @@ from .logic.chat_generate.chat_client_factory import create_chat_client
2
2
  from .logic.chat_generate.chat_message_converter import *
3
3
  from .logic.message_preprocess.message_preprocessor import preprocess_messages
4
4
  from .logic.model_prices import ModelPrice, get_model_prices, find_model_prices, calculate_chat_cost
5
- from .type.chat_response import Citation, ChatResponse
5
+ from .type.chat_response import ChatResponse
6
6
  from .type.message import Role, Message, Content, ContentType
7
7
  from .type.serializer import serialize
@@ -7,7 +7,7 @@ from google.genai.types import Part
7
7
 
8
8
  from llm_bridge.client.implementations.gemini.gemini_token_counter import count_gemini_tokens
9
9
  from llm_bridge.client.implementations.printing_status import PrintingStatus
10
- from llm_bridge.type.chat_response import Citation, ChatResponse, File
10
+ from llm_bridge.type.chat_response import ChatResponse, File
11
11
 
12
12
 
13
13
  class GeminiResponseHandler:
@@ -26,7 +26,6 @@ class GeminiResponseHandler:
26
26
  code_output: str = ""
27
27
  files: list[File] = []
28
28
  display: Optional[str] = None
29
- citations: list[Citation] = extract_citations(response)
30
29
  input_tokens, stage_output_tokens = await count_gemini_tokens(response)
31
30
 
32
31
  parts: list[Part] = []
@@ -89,19 +88,6 @@ class GeminiResponseHandler:
89
88
  code_output=code_output,
90
89
  files=files,
91
90
  display=display,
92
- citations=citations,
93
91
  input_tokens=input_tokens,
94
92
  output_tokens=output_tokens,
95
93
  )
96
-
97
-
98
- def extract_citations(response: types.GenerateContentResponse) -> list[Citation]:
99
- citations = []
100
- if candidates := response.candidates:
101
- if grounding_metadata := candidates[0].grounding_metadata:
102
- if grounding_supports := grounding_metadata.grounding_supports:
103
- for grounding_support in grounding_supports:
104
- citation_indices = [index + 1 for index in grounding_support.grounding_chunk_indices]
105
- citation_text = grounding_support.segment.text
106
- citations.append(Citation(text=citation_text, indices=citation_indices))
107
- return citations
@@ -1,19 +1,17 @@
1
1
  import logging
2
2
  import re
3
- from pprint import pprint
4
- from typing import Optional
5
3
 
6
4
  import httpx
7
5
  import openai
8
6
  from fastapi import HTTPException
9
7
  from openai import APIStatusError
10
- from openai.types.responses import WebSearchToolParam, Response, ResponseOutputItem, ResponseOutputMessage, \
8
+ from openai.types.responses import Response, ResponseOutputItem, ResponseOutputMessage, \
11
9
  ResponseOutputText, ResponseReasoningItem
12
10
 
13
11
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
14
12
  count_openai_output_tokens
15
13
  from llm_bridge.client.model_client.openai_client import OpenAIClient
16
- from llm_bridge.type.chat_response import ChatResponse, Citation, File
14
+ from llm_bridge.type.chat_response import ChatResponse, File
17
15
  from llm_bridge.type.serializer import serialize
18
16
 
19
17
 
@@ -27,7 +25,6 @@ def process_openai_responses_non_stream_response(
27
25
  text: str = ""
28
26
  thought: str = ""
29
27
  files: list[File] = []
30
- citations: list[Citation] = []
31
28
 
32
29
  for output in output_list:
33
30
  if output.type == "message":
@@ -36,27 +33,17 @@ def process_openai_responses_non_stream_response(
36
33
  if content.type == "output_text":
37
34
  output_text: ResponseOutputText = content
38
35
  text += output_text.text
39
- # Citation is unavailable in OpenAI Responses API
40
- # elif annotations := content.annotations:
41
- # for annotation in annotations:
42
- # citations.append(
43
- # Citation(
44
- # text=content.text[annotation.start_index:annotation.end_index],
45
- # url=annotation.url
46
- # )
47
- # )
48
36
  elif output.type == "reasoning":
49
37
  reasoning_item: ResponseReasoningItem = output
50
38
  for summary_delta in reasoning_item.summary:
51
39
  thought += summary_delta.text
52
- # Image Generation to be tested
53
- # if output.type == "image_generation_call":
54
- # file = File(
55
- # name="generated_image.png",
56
- # data=output.result,
57
- # type="image/png",
58
- # )
59
- # files.append(file)
40
+ if output.type == "image_generation_call":
41
+ file = File(
42
+ name="generated_image.png",
43
+ data=output.result,
44
+ type="image/png",
45
+ )
46
+ files.append(file)
60
47
 
61
48
  chat_response = ChatResponse(text=text, files=files)
62
49
  output_tokens = count_openai_output_tokens(chat_response)
@@ -64,7 +51,6 @@ def process_openai_responses_non_stream_response(
64
51
  text=text,
65
52
  thought=thought,
66
53
  files=files,
67
- citations=citations,
68
54
  input_tokens=input_tokens,
69
55
  output_tokens=output_tokens,
70
56
  )
@@ -1,7 +1,6 @@
1
1
  import logging
2
2
  import re
3
- from pprint import pprint
4
- from typing import AsyncGenerator, Optional
3
+ from typing import AsyncGenerator
5
4
 
6
5
  import httpx
7
6
  import openai
@@ -12,7 +11,7 @@ from openai.types.responses import ResponseStreamEvent, ResponseReasoningSummary
12
11
  from llm_bridge.client.implementations.openai.openai_token_couter import count_openai_responses_input_tokens, \
13
12
  count_openai_output_tokens
14
13
  from llm_bridge.client.model_client.openai_client import OpenAIClient
15
- from llm_bridge.type.chat_response import ChatResponse, Citation, File
14
+ from llm_bridge.type.chat_response import ChatResponse, File
16
15
  from llm_bridge.type.serializer import serialize
17
16
 
18
17
 
@@ -20,7 +19,6 @@ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
20
19
  text: str = ""
21
20
  thought: str = ""
22
21
  files: list[File] = []
23
- citations: list[Citation] = []
24
22
 
25
23
  if event.type == "response.output_text.delta":
26
24
  text_delta_event: ResponseTextDeltaEvent = event
@@ -28,23 +26,18 @@ def process_delta(event: ResponseStreamEvent) -> ChatResponse:
28
26
  elif event.type == "response.reasoning_summary_text.delta":
29
27
  reasoning_summary_text_delta_event: ResponseReasoningSummaryTextDeltaEvent = event
30
28
  thought = reasoning_summary_text_delta_event.delta
31
- # Citation is unavailable in OpenAI Responses API
32
- elif event.type == "response.output_text.annotation.added":
33
- pass
34
- # Image Generation to be tested
35
- # if event.type == "response.image_generation_call.partial_image":
36
- # file = File(
37
- # name="generated_image.png",
38
- # data=event.partial_image_b64,
39
- # type="image/png",
40
- # )
41
- # files.append(file)
29
+ if event.type == "response.image_generation_call.partial_image":
30
+ file = File(
31
+ name="generated_image.png",
32
+ data=event.partial_image_b64,
33
+ type="image/png",
34
+ )
35
+ files.append(file)
42
36
 
43
37
  chat_response = ChatResponse(
44
38
  text=text,
45
39
  thought=thought,
46
40
  files=files,
47
- citations=citations,
48
41
  )
49
42
  return chat_response
50
43
 
@@ -61,7 +54,6 @@ async def generate_chunk(
61
54
  text=chat_response.text,
62
55
  thought=chat_response.thought,
63
56
  files=chat_response.files,
64
- citations=chat_response.citations,
65
57
  input_tokens=input_tokens,
66
58
  output_tokens=output_tokens,
67
59
  )
@@ -65,7 +65,7 @@ async def create_openai_client(
65
65
  tools = []
66
66
  reasoning = None
67
67
 
68
- if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
68
+ if model not in ["gpt-5-pro", "gpt-5.2-pro"]:
69
69
  if code_execution:
70
70
  tools.append(
71
71
  CodeInterpreter(
@@ -73,16 +73,15 @@ async def create_openai_client(
73
73
  container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
74
74
  )
75
75
  )
76
- if model not in ["gpt-5-chat-latest"]:
77
- tools.append(
78
- WebSearchToolParam(
79
- type="web_search",
80
- search_context_size="high",
81
- )
76
+ tools.append(
77
+ WebSearchToolParam(
78
+ type="web_search",
79
+ search_context_size="high",
82
80
  )
83
- if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
81
+ )
82
+ if re.match(r"gpt-5.*", model):
84
83
  temperature = 1
85
- if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
84
+ if re.match(r"gpt-5.*", model):
86
85
  if thought:
87
86
  reasoning = Reasoning(
88
87
  effort="high",
@@ -1,75 +1,57 @@
1
1
  [
2
2
  {
3
- "apiType": "Gemini-Vertex",
3
+ "apiType": "Gemini-Paid",
4
4
  "model": "gemini-3-pro-preview",
5
5
  "input": 4,
6
6
  "output": 18
7
7
  },
8
8
  {
9
- "apiType": "Gemini-Vertex",
9
+ "apiType": "Gemini-Paid",
10
+ "model": "gemini-3-flash-preview",
11
+ "input": 1,
12
+ "output": 3
13
+ },
14
+ {
15
+ "apiType": "Gemini-Paid",
10
16
  "model": "gemini-3-pro-image-preview",
11
17
  "input": 2,
12
18
  "output": 120
13
19
  },
14
20
  {
15
- "apiType": "Gemini-Vertex",
16
- "model": "gemini-2.5-flash",
21
+ "apiType": "Gemini-Paid",
22
+ "model": "gemini-flash-latest",
17
23
  "input": 1,
18
24
  "output": 2.5
19
25
  },
20
26
  {
21
27
  "apiType": "Gemini-Vertex",
22
- "model": "gemini-2.5-pro",
23
- "input": 2.5,
24
- "output": 15
25
- },
26
- {
27
- "apiType": "Gemini-Free",
28
- "model": "gemini-flash-latest",
29
- "input": 0,
30
- "output": 0
31
- },
32
- {
33
- "apiType": "Gemini-Free",
34
- "model": "gemini-2.5-flash",
35
- "input": 0,
36
- "output": 0
37
- },
38
- {
39
- "apiType": "Gemini-Free",
40
- "model": "gemini-2.5-pro",
41
- "input": 0,
42
- "output": 0
43
- },
44
- {
45
- "apiType": "Gemini-Paid",
46
28
  "model": "gemini-3-pro-preview",
47
29
  "input": 4,
48
30
  "output": 18
49
31
  },
50
32
  {
51
- "apiType": "Gemini-Paid",
33
+ "apiType": "Gemini-Vertex",
34
+ "model": "gemini-3-flash-preview",
35
+ "input": 1,
36
+ "output": 3
37
+ },
38
+ {
39
+ "apiType": "Gemini-Vertex",
52
40
  "model": "gemini-3-pro-image-preview",
53
41
  "input": 2,
54
42
  "output": 120
55
43
  },
56
44
  {
57
- "apiType": "Gemini-Paid",
58
- "model": "gemini-flash-latest",
59
- "input": 1,
60
- "output": 2.5
61
- },
62
- {
63
- "apiType": "Gemini-Paid",
64
- "model": "gemini-2.5-flash",
65
- "input": 1,
66
- "output": 2.5
45
+ "apiType": "Gemini-Free",
46
+ "model": "gemini-3-flash-preview",
47
+ "input": 0,
48
+ "output": 0
67
49
  },
68
50
  {
69
- "apiType": "Gemini-Paid",
70
- "model": "gemini-2.5-pro",
71
- "input": 2.5,
72
- "output": 15
51
+ "apiType": "OpenAI",
52
+ "model": "gpt-5.2",
53
+ "input": 1.75,
54
+ "output": 14
73
55
  },
74
56
  {
75
57
  "apiType": "OpenAI",
@@ -89,6 +71,12 @@
89
71
  "input": 0.25,
90
72
  "output": 2
91
73
  },
74
+ {
75
+ "apiType": "OpenAI",
76
+ "model": "gpt-5.2-pro",
77
+ "input": 21,
78
+ "output": 168
79
+ },
92
80
  {
93
81
  "apiType": "OpenAI",
94
82
  "model": "gpt-5-pro",
@@ -9,20 +9,6 @@ class File:
9
9
  type: str
10
10
 
11
11
 
12
- @dataclass
13
- class Citation:
14
- text: str
15
- indices: list[int]
16
-
17
-
18
- # TODO: adapt to different Citation formats
19
- # @dataclass
20
- # class Citation:
21
- # text: str
22
- # indices: Optional[list[int]] = None
23
- # url: Optional[str] = None
24
-
25
-
26
12
  @dataclass
27
13
  class ChatResponse:
28
14
  text: Optional[str] = None
@@ -31,7 +17,6 @@ class ChatResponse:
31
17
  code_output: Optional[str] = None
32
18
  files: Optional[list[File]] = None
33
19
  display: Optional[str] = None
34
- citations: Optional[list[Citation]] = None
35
20
  error: Optional[str] = None
36
21
  input_tokens: Optional[int] = 0
37
22
  output_tokens: Optional[int] = 0
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "LLM-Bridge"
7
- version = "1.14.0-alpha.1"
7
+ version = "1.15.0"
8
8
  dependencies = [
9
9
  "fastapi",
10
10
  "httpx",
@@ -98,7 +98,7 @@ messages = [
98
98
  # Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
99
99
 
100
100
  # Web Search
101
- # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
101
+ Content(type=ContentType.Text, data="What's the weather in NYC today?"),
102
102
 
103
103
  # Image Understanding
104
104
  # Content(type=ContentType.File, data="https://www.gstatic.com/webp/gallery3/1.png"),
@@ -118,7 +118,7 @@ messages = [
118
118
  # Content(type=ContentType.Text, data="Please implement a minimum example of Neural Network in `script.py`"),
119
119
 
120
120
  # Structured Output
121
- Content(type=ContentType.Text, data="Please generate a product."),
121
+ # Content(type=ContentType.Text, data="Please generate a product."),
122
122
  ]
123
123
  ),
124
124
  # Message(
@@ -134,27 +134,28 @@ messages = [
134
134
  # ),
135
135
  ]
136
136
  # See /llm_bridge/resources/model_prices.json for available models
137
+ # model = "gpt-5.2"
137
138
  # model = "gpt-5.1"
138
139
  # model = "gpt-5-pro"
139
140
  # model = "gpt-5"
140
141
  # model = "gpt-4.1"
141
- # model = "gemini-3-pro-preview"
142
+ model = "gemini-3-pro-preview"
142
143
  # model = "gemini-3-pro-image-preview"
143
- # model = "gemini-flash-latest"
144
+ # model = "gemini-3-flash-preview"
144
145
  # model = "grok-4-1-fast-reasoning"
145
- model = "claude-sonnet-4-5"
146
+ # model = "claude-sonnet-4-5"
146
147
  # model = "claude-opus-4-5"
147
- # api_type = "Gemini-Vertex"
148
+ api_type = "Gemini-Vertex"
148
149
  # api_type = "Gemini-Free"
149
150
  # api_type = "Gemini-Paid"
150
151
  # api_type = "OpenAI"
151
152
  # api_type = "OpenAI-Azure"
152
153
  # api_type = "OpenAI-GitHub"
153
- api_type = "Claude"
154
+ # api_type = "Claude"
154
155
  # api_type = "Grok"
155
156
  temperature = 0
156
- stream = True
157
- # stream = False
157
+ # stream = True
158
+ stream = False
158
159
  thought = True
159
160
  # thought = False
160
161
  code_execution = True
@@ -357,7 +357,7 @@ wheels = [
357
357
 
358
358
  [[package]]
359
359
  name = "llm-bridge"
360
- version = "1.14.0a1"
360
+ version = "1.15.0a0"
361
361
  source = { editable = "." }
362
362
  dependencies = [
363
363
  { name = "anthropic" },
@@ -726,6 +726,8 @@ wheels = [
726
726
  { url = "https://files.pythonhosted.org/packages/72/74/448b6172927c829c6a3fba80078d7b0a016ebbe2c9ee528821f5ea21677a/pymupdf-1.26.7-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:31aa9c8377ea1eea02934b92f4dcf79fb2abba0bf41f8a46d64c3e31546a3c02", size = 22470101, upload-time = "2025-12-11T21:47:37.105Z" },
727
727
  { url = "https://files.pythonhosted.org/packages/65/e7/47af26f3ac76be7ac3dd4d6cc7ee105948a8355d774e5ca39857bf91c11c/pymupdf-1.26.7-cp310-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:e419b609996434a14a80fa060adec72c434a1cca6a511ec54db9841bc5d51b3c", size = 23502486, upload-time = "2025-12-12T09:51:25.824Z" },
728
728
  { url = "https://files.pythonhosted.org/packages/2a/6b/3de1714d734ff949be1e90a22375d0598d3540b22ae73eb85c2d7d1f36a9/pymupdf-1.26.7-cp310-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:69dfc78f206a96e5b3ac22741263ebab945fdf51f0dbe7c5757c3511b23d9d72", size = 24115727, upload-time = "2025-12-11T21:47:51.274Z" },
729
+ { url = "https://files.pythonhosted.org/packages/62/9b/f86224847949577a523be2207315ae0fd3155b5d909cd66c274d095349a3/pymupdf-1.26.7-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1d5106f46e1ca0d64d46bd51892372a4f82076bdc14a9678d33d630702abca36", size = 24324386, upload-time = "2025-12-12T14:58:45.483Z" },
730
+ { url = "https://files.pythonhosted.org/packages/85/8e/a117d39092ca645fde8b903f4a941d9aa75b370a67b4f1f435f56393dc5a/pymupdf-1.26.7-cp310-abi3-win32.whl", hash = "sha256:7c9645b6f5452629c747690190350213d3e5bbdb6b2eca227d82702b327f6eee", size = 17203888, upload-time = "2025-12-12T13:59:57.613Z" },
729
731
  { url = "https://files.pythonhosted.org/packages/dd/c3/d0047678146c294469c33bae167c8ace337deafb736b0bf97b9bc481aa65/pymupdf-1.26.7-cp310-abi3-win_amd64.whl", hash = "sha256:425b1befe40d41b72eb0fe211711c7ae334db5eb60307e9dd09066ed060cceba", size = 18405952, upload-time = "2025-12-11T21:48:02.947Z" },
730
732
  ]
731
733
 
File without changes
File without changes
File without changes