LLM-Bridge 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_bridge-0.1.4/LICENSE +21 -0
- llm_bridge-0.1.4/LLM_Bridge.egg-info/PKG-INFO +30 -0
- llm_bridge-0.1.4/LLM_Bridge.egg-info/SOURCES.txt +56 -0
- llm_bridge-0.1.4/LLM_Bridge.egg-info/dependency_links.txt +1 -0
- llm_bridge-0.1.4/LLM_Bridge.egg-info/requires.txt +15 -0
- llm_bridge-0.1.4/LLM_Bridge.egg-info/top_level.txt +2 -0
- llm_bridge-0.1.4/PKG-INFO +30 -0
- llm_bridge-0.1.4/README.md +63 -0
- llm_bridge-0.1.4/llm_bridge/__init__.py +7 -0
- llm_bridge-0.1.4/llm_bridge/client/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/client/chat_client.py +8 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/claude/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/claude/non_stream_claude_client.py +38 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/claude/stream_claude_client.py +47 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gemini/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gemini/gemini_response_handler.py +53 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gemini/non_stream_gemini_client.py +38 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gemini/stream_gemini_client.py +56 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gpt/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gpt/non_stream_gpt_client.py +47 -0
- llm_bridge-0.1.4/llm_bridge/client/implementations/gpt/stream_gpt_client.py +73 -0
- llm_bridge-0.1.4/llm_bridge/client/model_client/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/client/model_client/claude_client.py +26 -0
- llm_bridge-0.1.4/llm_bridge/client/model_client/gemini_client.py +27 -0
- llm_bridge-0.1.4/llm_bridge/client/model_client/gpt_client.py +26 -0
- llm_bridge-0.1.4/llm_bridge/logic/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/__init__.py +1 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/chat_client_factory.py +63 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/media_processor.py +46 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_client_factory/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py +44 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py +83 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_client_factory/gpt_client_factory.py +51 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_message_converter/__init__.py +1 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py +34 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py +30 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_message_converter/gpt_message_converter.py +31 -0
- llm_bridge-0.1.4/llm_bridge/logic/chat_generate/model_message_converter/model_message_converter.py +19 -0
- llm_bridge-0.1.4/llm_bridge/logic/message_preprocess/__init__.py +2 -0
- llm_bridge-0.1.4/llm_bridge/logic/message_preprocess/code_file_extensions.py +82 -0
- llm_bridge-0.1.4/llm_bridge/logic/message_preprocess/document_processor.py +123 -0
- llm_bridge-0.1.4/llm_bridge/logic/message_preprocess/file_type_checker.py +27 -0
- llm_bridge-0.1.4/llm_bridge/logic/message_preprocess/message_preprocessor.py +38 -0
- llm_bridge-0.1.4/llm_bridge/type/__init__.py +3 -0
- llm_bridge-0.1.4/llm_bridge/type/chat_response.py +16 -0
- llm_bridge-0.1.4/llm_bridge/type/message.py +16 -0
- llm_bridge-0.1.4/llm_bridge/type/model_message/__init__.py +0 -0
- llm_bridge-0.1.4/llm_bridge/type/model_message/claude_message.py +31 -0
- llm_bridge-0.1.4/llm_bridge/type/model_message/gemini_message.py +107 -0
- llm_bridge-0.1.4/llm_bridge/type/model_message/gpt_message.py +28 -0
- llm_bridge-0.1.4/llm_bridge/type/serializer.py +16 -0
- llm_bridge-0.1.4/pyproject.toml +2 -0
- llm_bridge-0.1.4/setup.cfg +4 -0
- llm_bridge-0.1.4/setup.py +29 -0
- llm_bridge-0.1.4/tests/__init__.py +0 -0
- llm_bridge-0.1.4/tests/chat_client_factory_test.py +49 -0
- llm_bridge-0.1.4/tests/message_preprocessor_test.py +21 -0
llm_bridge-0.1.4/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 windsnow1025
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: LLM-Bridge
|
|
3
|
+
Version: 0.1.4
|
|
4
|
+
Summary: A Bridge for LLMs
|
|
5
|
+
Author: windsnow1025
|
|
6
|
+
Author-email: windsnow125@gmail.com
|
|
7
|
+
License: MIT
|
|
8
|
+
Requires-Python: >=3.12
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: fastapi
|
|
11
|
+
Requires-Dist: httpx
|
|
12
|
+
Requires-Dist: openai
|
|
13
|
+
Requires-Dist: google-genai
|
|
14
|
+
Requires-Dist: anthropic
|
|
15
|
+
Requires-Dist: tiktoken
|
|
16
|
+
Requires-Dist: pillow
|
|
17
|
+
Requires-Dist: PyMuPDF
|
|
18
|
+
Requires-Dist: python-docx
|
|
19
|
+
Requires-Dist: openpyxl
|
|
20
|
+
Requires-Dist: python-pptx
|
|
21
|
+
Requires-Dist: pytest
|
|
22
|
+
Requires-Dist: pytest-asyncio
|
|
23
|
+
Requires-Dist: python-dotenv
|
|
24
|
+
Requires-Dist: protobuf
|
|
25
|
+
Dynamic: author
|
|
26
|
+
Dynamic: author-email
|
|
27
|
+
Dynamic: license
|
|
28
|
+
Dynamic: requires-dist
|
|
29
|
+
Dynamic: requires-python
|
|
30
|
+
Dynamic: summary
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
setup.py
|
|
5
|
+
LLM_Bridge.egg-info/PKG-INFO
|
|
6
|
+
LLM_Bridge.egg-info/SOURCES.txt
|
|
7
|
+
LLM_Bridge.egg-info/dependency_links.txt
|
|
8
|
+
LLM_Bridge.egg-info/requires.txt
|
|
9
|
+
LLM_Bridge.egg-info/top_level.txt
|
|
10
|
+
llm_bridge/__init__.py
|
|
11
|
+
llm_bridge/client/__init__.py
|
|
12
|
+
llm_bridge/client/chat_client.py
|
|
13
|
+
llm_bridge/client/implementations/__init__.py
|
|
14
|
+
llm_bridge/client/implementations/claude/__init__.py
|
|
15
|
+
llm_bridge/client/implementations/claude/non_stream_claude_client.py
|
|
16
|
+
llm_bridge/client/implementations/claude/stream_claude_client.py
|
|
17
|
+
llm_bridge/client/implementations/gemini/__init__.py
|
|
18
|
+
llm_bridge/client/implementations/gemini/gemini_response_handler.py
|
|
19
|
+
llm_bridge/client/implementations/gemini/non_stream_gemini_client.py
|
|
20
|
+
llm_bridge/client/implementations/gemini/stream_gemini_client.py
|
|
21
|
+
llm_bridge/client/implementations/gpt/__init__.py
|
|
22
|
+
llm_bridge/client/implementations/gpt/non_stream_gpt_client.py
|
|
23
|
+
llm_bridge/client/implementations/gpt/stream_gpt_client.py
|
|
24
|
+
llm_bridge/client/model_client/__init__.py
|
|
25
|
+
llm_bridge/client/model_client/claude_client.py
|
|
26
|
+
llm_bridge/client/model_client/gemini_client.py
|
|
27
|
+
llm_bridge/client/model_client/gpt_client.py
|
|
28
|
+
llm_bridge/logic/__init__.py
|
|
29
|
+
llm_bridge/logic/chat_generate/__init__.py
|
|
30
|
+
llm_bridge/logic/chat_generate/chat_client_factory.py
|
|
31
|
+
llm_bridge/logic/chat_generate/media_processor.py
|
|
32
|
+
llm_bridge/logic/chat_generate/model_client_factory/__init__.py
|
|
33
|
+
llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py
|
|
34
|
+
llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py
|
|
35
|
+
llm_bridge/logic/chat_generate/model_client_factory/gpt_client_factory.py
|
|
36
|
+
llm_bridge/logic/chat_generate/model_message_converter/__init__.py
|
|
37
|
+
llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py
|
|
38
|
+
llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py
|
|
39
|
+
llm_bridge/logic/chat_generate/model_message_converter/gpt_message_converter.py
|
|
40
|
+
llm_bridge/logic/chat_generate/model_message_converter/model_message_converter.py
|
|
41
|
+
llm_bridge/logic/message_preprocess/__init__.py
|
|
42
|
+
llm_bridge/logic/message_preprocess/code_file_extensions.py
|
|
43
|
+
llm_bridge/logic/message_preprocess/document_processor.py
|
|
44
|
+
llm_bridge/logic/message_preprocess/file_type_checker.py
|
|
45
|
+
llm_bridge/logic/message_preprocess/message_preprocessor.py
|
|
46
|
+
llm_bridge/type/__init__.py
|
|
47
|
+
llm_bridge/type/chat_response.py
|
|
48
|
+
llm_bridge/type/message.py
|
|
49
|
+
llm_bridge/type/serializer.py
|
|
50
|
+
llm_bridge/type/model_message/__init__.py
|
|
51
|
+
llm_bridge/type/model_message/claude_message.py
|
|
52
|
+
llm_bridge/type/model_message/gemini_message.py
|
|
53
|
+
llm_bridge/type/model_message/gpt_message.py
|
|
54
|
+
tests/__init__.py
|
|
55
|
+
tests/chat_client_factory_test.py
|
|
56
|
+
tests/message_preprocessor_test.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: LLM-Bridge
|
|
3
|
+
Version: 0.1.4
|
|
4
|
+
Summary: A Bridge for LLMs
|
|
5
|
+
Author: windsnow1025
|
|
6
|
+
Author-email: windsnow125@gmail.com
|
|
7
|
+
License: MIT
|
|
8
|
+
Requires-Python: >=3.12
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: fastapi
|
|
11
|
+
Requires-Dist: httpx
|
|
12
|
+
Requires-Dist: openai
|
|
13
|
+
Requires-Dist: google-genai
|
|
14
|
+
Requires-Dist: anthropic
|
|
15
|
+
Requires-Dist: tiktoken
|
|
16
|
+
Requires-Dist: pillow
|
|
17
|
+
Requires-Dist: PyMuPDF
|
|
18
|
+
Requires-Dist: python-docx
|
|
19
|
+
Requires-Dist: openpyxl
|
|
20
|
+
Requires-Dist: python-pptx
|
|
21
|
+
Requires-Dist: pytest
|
|
22
|
+
Requires-Dist: pytest-asyncio
|
|
23
|
+
Requires-Dist: python-dotenv
|
|
24
|
+
Requires-Dist: protobuf
|
|
25
|
+
Dynamic: author
|
|
26
|
+
Dynamic: author-email
|
|
27
|
+
Dynamic: license
|
|
28
|
+
Dynamic: requires-dist
|
|
29
|
+
Dynamic: requires-python
|
|
30
|
+
Dynamic: summary
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# LLM Bridge
|
|
2
|
+
|
|
3
|
+
## Process
|
|
4
|
+
|
|
5
|
+
1. **Message Preprocessor**: Preprocess Messages
|
|
6
|
+
1. **Message Preprocessor**: Extract Text Files to Message
|
|
7
|
+
2. **Chat Client Factory**: Create Chat Client
|
|
8
|
+
1. **Model Message Converter**: Convert Message to Model
|
|
9
|
+
1. **Media Processor**: Get Model Image Content from URL
|
|
10
|
+
3. **Chat Client**: Generate Response
|
|
11
|
+
|
|
12
|
+
## Test
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pytest ./tests/
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Usage
|
|
19
|
+
|
|
20
|
+
See ./usage/
|
|
21
|
+
|
|
22
|
+
## Build
|
|
23
|
+
|
|
24
|
+
### Dependencies
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install --upgrade setuptools wheel twine
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### Build
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
python setup.py sdist bdist_wheel
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Publish
|
|
37
|
+
|
|
38
|
+
### TestPyPI
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
twine upload --repository testpypi dist/*
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### PyPI
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
twine upload dist/*
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Installation
|
|
51
|
+
|
|
52
|
+
### TestPyPI
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
pip install --index-url https://test.pypi.org/simple/ --upgrade llm_bridge
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### PyPI
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
pip install --upgrade llm_bridge
|
|
62
|
+
```
|
|
63
|
+
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
from .logic.chat_generate.chat_client_factory import create_chat_client
|
|
2
|
+
from .logic.chat_generate.model_message_converter.model_message_converter import *
|
|
3
|
+
from .logic.message_preprocess.file_type_checker import get_file_type
|
|
4
|
+
from .logic.message_preprocess.message_preprocessor import preprocess_messages
|
|
5
|
+
from .type.chat_response import Citation, ChatResponse
|
|
6
|
+
from .type.message import Role, Message
|
|
7
|
+
from .type.serializer import serialize
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
from fastapi import HTTPException
|
|
6
|
+
|
|
7
|
+
from llm_bridge.client.model_client.claude_client import ClaudeClient
|
|
8
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
9
|
+
from llm_bridge.type.serializer import serialize
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class NonStreamClaudeClient(ClaudeClient):
|
|
13
|
+
async def generate_response(self) -> ChatResponse:
|
|
14
|
+
try:
|
|
15
|
+
logging.info(f"messages: {self.messages}")
|
|
16
|
+
message = await self.client.messages.create(
|
|
17
|
+
model=self.model,
|
|
18
|
+
max_tokens=4096,
|
|
19
|
+
temperature=self.temperature,
|
|
20
|
+
system=self.system,
|
|
21
|
+
messages=serialize(self.messages)
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
content = message.content[0].text
|
|
25
|
+
return ChatResponse(text=content)
|
|
26
|
+
except httpx.HTTPStatusError as e:
|
|
27
|
+
status_code = e.response.status_code
|
|
28
|
+
text = e.response.text
|
|
29
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logging.exception(e)
|
|
32
|
+
match = re.search(r'\d{3}', str(e))
|
|
33
|
+
if match:
|
|
34
|
+
error_code = int(match.group(0))
|
|
35
|
+
else:
|
|
36
|
+
error_code = 500
|
|
37
|
+
|
|
38
|
+
raise HTTPException(status_code=error_code, detail=str(e))
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import AsyncGenerator
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
from fastapi import HTTPException
|
|
7
|
+
|
|
8
|
+
from llm_bridge.client.model_client.claude_client import ClaudeClient
|
|
9
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
10
|
+
from llm_bridge.type.serializer import serialize
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class StreamClaudeClient(ClaudeClient):
|
|
14
|
+
async def generate_response(self) -> AsyncGenerator[ChatResponse, None]:
|
|
15
|
+
try:
|
|
16
|
+
logging.info(f"messages: {self.messages}")
|
|
17
|
+
|
|
18
|
+
async def chunk_generator() -> AsyncGenerator[ChatResponse, None]:
|
|
19
|
+
try:
|
|
20
|
+
async with self.client.messages.stream(
|
|
21
|
+
model=self.model,
|
|
22
|
+
max_tokens=4096,
|
|
23
|
+
temperature=self.temperature,
|
|
24
|
+
system=self.system,
|
|
25
|
+
messages=serialize(self.messages)
|
|
26
|
+
) as stream:
|
|
27
|
+
async for response_delta in stream.text_stream:
|
|
28
|
+
yield ChatResponse(text=response_delta)
|
|
29
|
+
except Exception as e:
|
|
30
|
+
logging.exception(e)
|
|
31
|
+
yield ChatResponse(error=str(e))
|
|
32
|
+
|
|
33
|
+
return chunk_generator()
|
|
34
|
+
|
|
35
|
+
except httpx.HTTPStatusError as e:
|
|
36
|
+
status_code = e.response.status_code
|
|
37
|
+
text = e.response.text
|
|
38
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
logging.exception(e)
|
|
41
|
+
match = re.search(r'\d{3}', str(e))
|
|
42
|
+
if match:
|
|
43
|
+
error_code = int(match.group(0))
|
|
44
|
+
else:
|
|
45
|
+
error_code = 500
|
|
46
|
+
|
|
47
|
+
raise HTTPException(status_code=error_code, detail=str(e))
|
|
File without changes
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
from google.genai import types
|
|
4
|
+
|
|
5
|
+
from llm_bridge.type.chat_response import Citation, ChatResponse
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PrintingStatus(Enum):
|
|
9
|
+
Start = "start"
|
|
10
|
+
Thought = "thought"
|
|
11
|
+
Response = "response"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GeminiResponseHandler:
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self.printing_status = PrintingStatus.Start
|
|
17
|
+
|
|
18
|
+
def process_gemini_response(
|
|
19
|
+
self, response: types.GenerateContentResponse
|
|
20
|
+
) -> ChatResponse:
|
|
21
|
+
text = ""
|
|
22
|
+
display = None
|
|
23
|
+
citations = extract_citations(response)
|
|
24
|
+
|
|
25
|
+
for part in response.candidates[0].content.parts:
|
|
26
|
+
if part.thought and self.printing_status == PrintingStatus.Start:
|
|
27
|
+
text += "# Model Thought:\n\n"
|
|
28
|
+
self.printing_status = PrintingStatus.Thought
|
|
29
|
+
elif not part.thought and self.printing_status == PrintingStatus.Thought:
|
|
30
|
+
text += f"\n\n# Model Response:\n\n"
|
|
31
|
+
self.printing_status = PrintingStatus.Response
|
|
32
|
+
text += part.text
|
|
33
|
+
if grounding_metadata := response.candidates[0].grounding_metadata:
|
|
34
|
+
if search_entry_point := grounding_metadata.search_entry_point:
|
|
35
|
+
display = search_entry_point.rendered_content
|
|
36
|
+
if grounding_metadata.grounding_chunks:
|
|
37
|
+
text += "\n\n# Grounding Sources:\n"
|
|
38
|
+
for i, chunk in enumerate(grounding_metadata.grounding_chunks, start=1):
|
|
39
|
+
if chunk.web:
|
|
40
|
+
text += f"{i}. [{chunk.web.title}]({chunk.web.uri})\n"
|
|
41
|
+
|
|
42
|
+
return ChatResponse(text=text, display=display, citations=citations)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def extract_citations(response: types.GenerateContentResponse) -> list[Citation]:
|
|
46
|
+
citations = []
|
|
47
|
+
if grounding_metadata := response.candidates[0].grounding_metadata:
|
|
48
|
+
if grounding_supports := grounding_metadata.grounding_supports:
|
|
49
|
+
for grounding_support in grounding_supports:
|
|
50
|
+
citation_indices = [index + 1 for index in grounding_support.grounding_chunk_indices]
|
|
51
|
+
citation_text = grounding_support.segment.text
|
|
52
|
+
citations.append(Citation(text=citation_text, indices=citation_indices))
|
|
53
|
+
return citations if citations else None
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
from fastapi import HTTPException
|
|
6
|
+
|
|
7
|
+
from llm_bridge.client.implementations.gemini.gemini_response_handler import *
|
|
8
|
+
from llm_bridge.client.model_client.gemini_client import GeminiClient
|
|
9
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class NonStreamGeminiClient(GeminiClient):
|
|
13
|
+
async def generate_response(self) -> ChatResponse:
|
|
14
|
+
try:
|
|
15
|
+
logging.info(f"messages: {self.messages}")
|
|
16
|
+
|
|
17
|
+
response = await self.client.aio.models.generate_content(
|
|
18
|
+
model=self.model,
|
|
19
|
+
contents=self.messages,
|
|
20
|
+
config=self.config,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
gemini_response_handler = GeminiResponseHandler()
|
|
24
|
+
return gemini_response_handler.process_gemini_response(response)
|
|
25
|
+
|
|
26
|
+
except httpx.HTTPStatusError as e:
|
|
27
|
+
status_code = e.response.status_code
|
|
28
|
+
text = e.response.text
|
|
29
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logging.exception(e)
|
|
32
|
+
match = re.search(r'\d{3}', str(e))
|
|
33
|
+
if match:
|
|
34
|
+
error_code = int(match.group(0))
|
|
35
|
+
else:
|
|
36
|
+
error_code = 500
|
|
37
|
+
|
|
38
|
+
raise HTTPException(status_code=error_code, detail=str(e))
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import AsyncGenerator, AsyncIterator
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
from fastapi import HTTPException
|
|
7
|
+
from google.genai import types
|
|
8
|
+
|
|
9
|
+
from llm_bridge.client.implementations.gemini.gemini_response_handler import GeminiResponseHandler
|
|
10
|
+
from llm_bridge.client.model_client.gemini_client import GeminiClient
|
|
11
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
12
|
+
|
|
13
|
+
gemini_response_handler = GeminiResponseHandler()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def process_delta(completion_delta: types.GenerateContentResponse) -> ChatResponse:
|
|
17
|
+
return gemini_response_handler.process_gemini_response(completion_delta)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def generate_chunk(
|
|
21
|
+
response: AsyncIterator[types.GenerateContentResponse]
|
|
22
|
+
) -> AsyncGenerator[ChatResponse, None]:
|
|
23
|
+
try:
|
|
24
|
+
async for response_delta in response:
|
|
25
|
+
yield process_delta(response_delta)
|
|
26
|
+
except Exception as e:
|
|
27
|
+
logging.exception(e)
|
|
28
|
+
yield ChatResponse(error=str(e))
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class StreamGeminiClient(GeminiClient):
|
|
32
|
+
async def generate_response(self) -> AsyncGenerator[ChatResponse, None]:
|
|
33
|
+
try:
|
|
34
|
+
logging.info(f"messages: {self.messages}")
|
|
35
|
+
|
|
36
|
+
response = await self.client.aio.models.generate_content_stream(
|
|
37
|
+
model=self.model,
|
|
38
|
+
contents=self.messages,
|
|
39
|
+
config=self.config,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
return generate_chunk(response)
|
|
43
|
+
|
|
44
|
+
except httpx.HTTPStatusError as e:
|
|
45
|
+
status_code = e.response.status_code
|
|
46
|
+
text = e.response.text
|
|
47
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
48
|
+
except Exception as e:
|
|
49
|
+
logging.exception(e)
|
|
50
|
+
match = re.search(r'\d{3}', str(e))
|
|
51
|
+
if match:
|
|
52
|
+
error_code = int(match.group(0))
|
|
53
|
+
else:
|
|
54
|
+
error_code = 500
|
|
55
|
+
|
|
56
|
+
raise HTTPException(status_code=error_code, detail=str(e))
|
|
File without changes
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
import openai
|
|
6
|
+
from fastapi import HTTPException
|
|
7
|
+
from openai import APIStatusError
|
|
8
|
+
|
|
9
|
+
from llm_bridge.client.model_client.gpt_client import GPTClient
|
|
10
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
11
|
+
from llm_bridge.type.serializer import serialize
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class NonStreamGPTClient(GPTClient):
|
|
15
|
+
async def generate_response(self) -> ChatResponse:
|
|
16
|
+
try:
|
|
17
|
+
logging.info(f"messages: {self.messages}")
|
|
18
|
+
completion = await self.client.chat.completions.create(
|
|
19
|
+
messages=serialize(self.messages),
|
|
20
|
+
model=self.model,
|
|
21
|
+
temperature=self.temperature,
|
|
22
|
+
stream=False
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
content = completion.choices[0].message.content
|
|
26
|
+
return ChatResponse(text=content)
|
|
27
|
+
except httpx.HTTPStatusError as e:
|
|
28
|
+
status_code = e.response.status_code
|
|
29
|
+
text = e.response.text
|
|
30
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
31
|
+
except openai.BadRequestError as e:
|
|
32
|
+
status_code = e.status_code
|
|
33
|
+
text = e.message
|
|
34
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
35
|
+
except APIStatusError as e:
|
|
36
|
+
status_code = e.status_code
|
|
37
|
+
text = e.message
|
|
38
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
logging.exception(e)
|
|
41
|
+
match = re.search(r'\d{3}', str(e))
|
|
42
|
+
if match:
|
|
43
|
+
error_code = int(match.group(0))
|
|
44
|
+
else:
|
|
45
|
+
error_code = 500
|
|
46
|
+
|
|
47
|
+
raise HTTPException(status_code=error_code, detail=str(e))
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import AsyncGenerator
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
import openai
|
|
7
|
+
from fastapi import HTTPException
|
|
8
|
+
from openai import APIStatusError, AsyncStream
|
|
9
|
+
from openai.types.chat import ChatCompletionChunk
|
|
10
|
+
|
|
11
|
+
from llm_bridge.client.model_client.gpt_client import GPTClient
|
|
12
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
13
|
+
from llm_bridge.type.serializer import serialize
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def process_delta(completion_delta: ChatCompletionChunk) -> str:
|
|
17
|
+
# Necessary for Azure
|
|
18
|
+
if not completion_delta.choices:
|
|
19
|
+
return ""
|
|
20
|
+
|
|
21
|
+
content_delta = completion_delta.choices[0].delta.content
|
|
22
|
+
if not content_delta:
|
|
23
|
+
content_delta = ""
|
|
24
|
+
logging.debug(f"chunk: {content_delta}")
|
|
25
|
+
return content_delta
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
async def generate_chunk(
|
|
29
|
+
completion: AsyncStream[ChatCompletionChunk]
|
|
30
|
+
) -> AsyncGenerator[ChatResponse, None]:
|
|
31
|
+
try:
|
|
32
|
+
async for completion_delta in completion:
|
|
33
|
+
content_delta = process_delta(completion_delta)
|
|
34
|
+
yield ChatResponse(text=content_delta)
|
|
35
|
+
except Exception as e:
|
|
36
|
+
logging.exception(e)
|
|
37
|
+
yield ChatResponse(error=str(e))
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class StreamGPTClient(GPTClient):
|
|
41
|
+
async def generate_response(self) -> AsyncGenerator[ChatResponse, None]:
|
|
42
|
+
try:
|
|
43
|
+
logging.info(f"messages: {self.messages}")
|
|
44
|
+
completion = await self.client.chat.completions.create(
|
|
45
|
+
messages=serialize(self.messages),
|
|
46
|
+
model=self.model,
|
|
47
|
+
temperature=self.temperature,
|
|
48
|
+
stream=True
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
return generate_chunk(completion)
|
|
52
|
+
|
|
53
|
+
except httpx.HTTPStatusError as e:
|
|
54
|
+
status_code = e.response.status_code
|
|
55
|
+
text = e.response.text
|
|
56
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
57
|
+
except openai.BadRequestError as e:
|
|
58
|
+
status_code = e.status_code
|
|
59
|
+
text = e.message
|
|
60
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
61
|
+
except APIStatusError as e:
|
|
62
|
+
status_code = e.status_code
|
|
63
|
+
text = e.message
|
|
64
|
+
raise HTTPException(status_code=status_code, detail=text)
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logging.exception(e)
|
|
67
|
+
match = re.search(r'\d{3}', str(e))
|
|
68
|
+
if match:
|
|
69
|
+
error_code = int(match.group(0))
|
|
70
|
+
else:
|
|
71
|
+
error_code = 500
|
|
72
|
+
|
|
73
|
+
raise HTTPException(status_code=error_code, detail=str(e))
|
|
File without changes
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from typing import AsyncGenerator
|
|
2
|
+
|
|
3
|
+
import anthropic
|
|
4
|
+
|
|
5
|
+
from llm_bridge.client.chat_client import ChatClient
|
|
6
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
7
|
+
from llm_bridge.type.model_message.claude_message import ClaudeMessage
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ClaudeClient(ChatClient):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
model: str,
|
|
14
|
+
messages: list[ClaudeMessage],
|
|
15
|
+
temperature: float,
|
|
16
|
+
system: str,
|
|
17
|
+
client: anthropic.AsyncAnthropic,
|
|
18
|
+
):
|
|
19
|
+
self.model = model
|
|
20
|
+
self.messages = messages
|
|
21
|
+
self.temperature = temperature
|
|
22
|
+
self.system = system
|
|
23
|
+
self.client = client
|
|
24
|
+
|
|
25
|
+
async def generate_response(self) -> ChatResponse | AsyncGenerator[ChatResponse, None]:
|
|
26
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from typing import AsyncGenerator
|
|
2
|
+
|
|
3
|
+
from google import genai
|
|
4
|
+
from google.genai import types
|
|
5
|
+
|
|
6
|
+
from llm_bridge.client.chat_client import ChatClient
|
|
7
|
+
from llm_bridge.type.chat_response import ChatResponse
|
|
8
|
+
from llm_bridge.type.model_message.gemini_message import GeminiMessage
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GeminiClient(ChatClient):
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
model: str,
|
|
15
|
+
messages: list[GeminiMessage],
|
|
16
|
+
temperature: float,
|
|
17
|
+
client: genai.Client,
|
|
18
|
+
config: types.GenerateContentConfig,
|
|
19
|
+
):
|
|
20
|
+
self.model = model
|
|
21
|
+
self.messages = messages
|
|
22
|
+
self.temperature = temperature
|
|
23
|
+
self.client = client
|
|
24
|
+
self.config = config
|
|
25
|
+
|
|
26
|
+
async def generate_response(self) -> ChatResponse | AsyncGenerator[ChatResponse, None]:
|
|
27
|
+
raise NotImplementedError
|