appkit-assistant 0.17.3__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- appkit_assistant/backend/{models.py → database/models.py} +32 -132
- appkit_assistant/backend/{repositories.py → database/repositories.py} +93 -1
- appkit_assistant/backend/model_manager.py +5 -5
- appkit_assistant/backend/models/__init__.py +28 -0
- appkit_assistant/backend/models/anthropic.py +31 -0
- appkit_assistant/backend/models/google.py +27 -0
- appkit_assistant/backend/models/openai.py +50 -0
- appkit_assistant/backend/models/perplexity.py +56 -0
- appkit_assistant/backend/processors/__init__.py +29 -0
- appkit_assistant/backend/processors/claude_responses_processor.py +205 -387
- appkit_assistant/backend/processors/gemini_responses_processor.py +290 -352
- appkit_assistant/backend/processors/lorem_ipsum_processor.py +6 -4
- appkit_assistant/backend/processors/mcp_mixin.py +297 -0
- appkit_assistant/backend/processors/openai_base.py +11 -125
- appkit_assistant/backend/processors/openai_chat_completion_processor.py +5 -3
- appkit_assistant/backend/processors/openai_responses_processor.py +480 -402
- appkit_assistant/backend/processors/perplexity_processor.py +156 -79
- appkit_assistant/backend/{processor.py → processors/processor_base.py} +7 -2
- appkit_assistant/backend/processors/streaming_base.py +188 -0
- appkit_assistant/backend/schemas.py +138 -0
- appkit_assistant/backend/services/auth_error_detector.py +99 -0
- appkit_assistant/backend/services/chunk_factory.py +273 -0
- appkit_assistant/backend/services/citation_handler.py +292 -0
- appkit_assistant/backend/services/file_cleanup_service.py +316 -0
- appkit_assistant/backend/services/file_upload_service.py +903 -0
- appkit_assistant/backend/services/file_validation.py +138 -0
- appkit_assistant/backend/{mcp_auth_service.py → services/mcp_auth_service.py} +4 -2
- appkit_assistant/backend/services/mcp_token_service.py +61 -0
- appkit_assistant/backend/services/message_converter.py +289 -0
- appkit_assistant/backend/services/openai_client_service.py +120 -0
- appkit_assistant/backend/{response_accumulator.py → services/response_accumulator.py} +163 -1
- appkit_assistant/backend/services/system_prompt_builder.py +89 -0
- appkit_assistant/backend/services/thread_service.py +5 -3
- appkit_assistant/backend/system_prompt_cache.py +3 -3
- appkit_assistant/components/__init__.py +8 -4
- appkit_assistant/components/composer.py +59 -24
- appkit_assistant/components/file_manager.py +623 -0
- appkit_assistant/components/mcp_server_dialogs.py +12 -20
- appkit_assistant/components/mcp_server_table.py +12 -2
- appkit_assistant/components/message.py +119 -2
- appkit_assistant/components/thread.py +1 -1
- appkit_assistant/components/threadlist.py +4 -2
- appkit_assistant/components/tools_modal.py +37 -20
- appkit_assistant/configuration.py +12 -0
- appkit_assistant/state/file_manager_state.py +697 -0
- appkit_assistant/state/mcp_oauth_state.py +3 -3
- appkit_assistant/state/mcp_server_state.py +47 -2
- appkit_assistant/state/system_prompt_state.py +1 -1
- appkit_assistant/state/thread_list_state.py +99 -5
- appkit_assistant/state/thread_state.py +88 -9
- {appkit_assistant-0.17.3.dist-info → appkit_assistant-1.0.1.dist-info}/METADATA +8 -6
- appkit_assistant-1.0.1.dist-info/RECORD +58 -0
- appkit_assistant/backend/processors/claude_base.py +0 -178
- appkit_assistant/backend/processors/gemini_base.py +0 -84
- appkit_assistant-0.17.3.dist-info/RECORD +0 -39
- /appkit_assistant/backend/{file_manager.py → services/file_manager.py} +0 -0
- {appkit_assistant-0.17.3.dist-info → appkit_assistant-1.0.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""File validation service for AI processors.
|
|
2
|
+
|
|
3
|
+
Provides file type, size, and extension validation utilities
|
|
4
|
+
shared across all processors that handle file uploads.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Final
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class FileValidationService:
|
|
15
|
+
"""Service for validating files before upload to AI APIs."""
|
|
16
|
+
|
|
17
|
+
# Max file size (5MB)
|
|
18
|
+
MAX_FILE_SIZE: Final[int] = 5 * 1024 * 1024
|
|
19
|
+
|
|
20
|
+
# Allowed file extensions
|
|
21
|
+
ALLOWED_EXTENSIONS: Final[set[str]] = {
|
|
22
|
+
"pdf",
|
|
23
|
+
"png",
|
|
24
|
+
"jpg",
|
|
25
|
+
"jpeg",
|
|
26
|
+
"xlsx",
|
|
27
|
+
"csv",
|
|
28
|
+
"docx",
|
|
29
|
+
"pptx",
|
|
30
|
+
"md",
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
# Image extensions (for determining content type)
|
|
34
|
+
IMAGE_EXTENSIONS: Final[set[str]] = {"png", "jpg", "jpeg", "gif", "webp"}
|
|
35
|
+
|
|
36
|
+
# MIME type mapping
|
|
37
|
+
MEDIA_TYPES: Final[dict[str, str]] = {
|
|
38
|
+
"pdf": "application/pdf",
|
|
39
|
+
"png": "image/png",
|
|
40
|
+
"jpg": "image/jpeg",
|
|
41
|
+
"jpeg": "image/jpeg",
|
|
42
|
+
"gif": "image/gif",
|
|
43
|
+
"webp": "image/webp",
|
|
44
|
+
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
|
45
|
+
"csv": "text/csv",
|
|
46
|
+
"docx": (
|
|
47
|
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
|
48
|
+
),
|
|
49
|
+
"pptx": (
|
|
50
|
+
"application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
|
51
|
+
),
|
|
52
|
+
"md": "text/markdown",
|
|
53
|
+
"txt": "text/plain",
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
def get_file_extension(self, file_path: str) -> str:
|
|
57
|
+
"""Extract file extension from path.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
file_path: Path to the file
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Lowercase file extension without the dot, or empty string
|
|
64
|
+
"""
|
|
65
|
+
return file_path.rsplit(".", 1)[-1].lower() if "." in file_path else ""
|
|
66
|
+
|
|
67
|
+
def is_image_file(self, file_path: str) -> bool:
|
|
68
|
+
"""Check if file is an image based on extension.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
file_path: Path to the file
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
True if file has an image extension
|
|
75
|
+
"""
|
|
76
|
+
ext = self.get_file_extension(file_path)
|
|
77
|
+
return ext in self.IMAGE_EXTENSIONS
|
|
78
|
+
|
|
79
|
+
def get_media_type(self, file_path: str) -> str:
|
|
80
|
+
"""Get MIME type for a file based on extension.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
file_path: Path to the file
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
MIME type string, defaults to application/octet-stream
|
|
87
|
+
"""
|
|
88
|
+
ext = self.get_file_extension(file_path)
|
|
89
|
+
return self.MEDIA_TYPES.get(ext, "application/octet-stream")
|
|
90
|
+
|
|
91
|
+
def validate_file(self, file_path: str) -> tuple[bool, str]:
|
|
92
|
+
"""Validate file for upload.
|
|
93
|
+
|
|
94
|
+
Checks:
|
|
95
|
+
- File exists
|
|
96
|
+
- Extension is allowed
|
|
97
|
+
- File size is within limits
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
file_path: Path to the file
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Tuple of (is_valid, error_message)
|
|
104
|
+
"""
|
|
105
|
+
path = Path(file_path)
|
|
106
|
+
|
|
107
|
+
# Check if file exists
|
|
108
|
+
if not path.exists():
|
|
109
|
+
return False, f"File not found: {file_path}"
|
|
110
|
+
|
|
111
|
+
# Check extension
|
|
112
|
+
ext = self.get_file_extension(file_path)
|
|
113
|
+
if ext not in self.ALLOWED_EXTENSIONS:
|
|
114
|
+
return False, f"Unsupported file type: {ext}"
|
|
115
|
+
|
|
116
|
+
# Check file size
|
|
117
|
+
file_size = path.stat().st_size
|
|
118
|
+
if file_size > self.MAX_FILE_SIZE:
|
|
119
|
+
size_mb = file_size / (1024 * 1024)
|
|
120
|
+
return False, f"File too large: {size_mb:.1f}MB (max 5MB)"
|
|
121
|
+
|
|
122
|
+
return True, ""
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
# Singleton instance for convenience
|
|
126
|
+
_file_validation_service: FileValidationService | None = None
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def get_file_validation_service() -> FileValidationService:
|
|
130
|
+
"""Get or create the file validation service singleton.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
The FileValidationService instance
|
|
134
|
+
"""
|
|
135
|
+
global _file_validation_service
|
|
136
|
+
if _file_validation_service is None:
|
|
137
|
+
_file_validation_service = FileValidationService()
|
|
138
|
+
return _file_validation_service
|
|
@@ -20,11 +20,13 @@ from urllib.parse import urlencode, urlparse
|
|
|
20
20
|
import httpx
|
|
21
21
|
from sqlmodel import Session, select
|
|
22
22
|
|
|
23
|
-
from appkit_assistant.backend.models import (
|
|
23
|
+
from appkit_assistant.backend.database.models import (
|
|
24
24
|
AssistantMCPUserToken,
|
|
25
|
-
MCPAuthType,
|
|
26
25
|
MCPServer,
|
|
27
26
|
)
|
|
27
|
+
from appkit_assistant.backend.schemas import (
|
|
28
|
+
MCPAuthType,
|
|
29
|
+
)
|
|
28
30
|
from appkit_user.authentication.backend.entities import OAuthStateEntity
|
|
29
31
|
|
|
30
32
|
logger = logging.getLogger(__name__)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""MCP Token Service for OAuth token management.
|
|
2
|
+
|
|
3
|
+
Provides unified token retrieval and validation for MCP servers
|
|
4
|
+
across all AI processors.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
import reflex as rx
|
|
10
|
+
|
|
11
|
+
from appkit_assistant.backend.database.models import AssistantMCPUserToken, MCPServer
|
|
12
|
+
from appkit_assistant.backend.services.mcp_auth_service import MCPAuthService
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MCPTokenService:
|
|
18
|
+
"""Service for managing MCP OAuth tokens."""
|
|
19
|
+
|
|
20
|
+
def __init__(self, mcp_auth_service: MCPAuthService) -> None:
|
|
21
|
+
"""Initialize the token service.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
mcp_auth_service: The MCP auth service for token operations
|
|
25
|
+
"""
|
|
26
|
+
self._mcp_auth_service = mcp_auth_service
|
|
27
|
+
|
|
28
|
+
async def get_valid_token(
|
|
29
|
+
self,
|
|
30
|
+
server: MCPServer,
|
|
31
|
+
user_id: int,
|
|
32
|
+
) -> AssistantMCPUserToken | None:
|
|
33
|
+
"""Get a valid OAuth token for the given server and user.
|
|
34
|
+
|
|
35
|
+
Retrieves the stored token and refreshes it if expired
|
|
36
|
+
(when a refresh token is available).
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
server: The MCP server configuration
|
|
40
|
+
user_id: The user's ID
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
A valid token or None if not available
|
|
44
|
+
"""
|
|
45
|
+
if server.id is None:
|
|
46
|
+
logger.debug("Server %s has no ID, cannot retrieve token", server.name)
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
with rx.session() as session:
|
|
50
|
+
token = self._mcp_auth_service.get_user_token(session, user_id, server.id)
|
|
51
|
+
|
|
52
|
+
if token is None:
|
|
53
|
+
logger.debug(
|
|
54
|
+
"No token found for user %d on server %s", user_id, server.name
|
|
55
|
+
)
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
# Check if token is valid or can be refreshed
|
|
59
|
+
return await self._mcp_auth_service.ensure_valid_token(
|
|
60
|
+
session, server, token
|
|
61
|
+
)
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
"""Message Converter Protocol and vendor-specific adapters.
|
|
2
|
+
|
|
3
|
+
Provides a unified interface for converting internal Message objects
|
|
4
|
+
to vendor-specific API formats.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from typing import Any, Protocol, TypeVar
|
|
10
|
+
|
|
11
|
+
from appkit_assistant.backend.schemas import Message, MessageType
|
|
12
|
+
from appkit_assistant.backend.system_prompt_cache import get_system_prompt
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
# Type variable for converted message format
|
|
17
|
+
T = TypeVar("T")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MessageConverterProtocol(Protocol[T]):
|
|
21
|
+
"""Protocol for message format converters."""
|
|
22
|
+
|
|
23
|
+
async def convert(
|
|
24
|
+
self,
|
|
25
|
+
messages: list[Message],
|
|
26
|
+
mcp_prompt: str = "",
|
|
27
|
+
file_blocks: list[dict[str, Any]] | None = None,
|
|
28
|
+
) -> T:
|
|
29
|
+
"""Convert messages to vendor-specific format.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
messages: List of internal Message objects
|
|
33
|
+
mcp_prompt: Optional MCP tool prompt to inject
|
|
34
|
+
file_blocks: Optional file content blocks (for Claude)
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Vendor-specific message format
|
|
38
|
+
"""
|
|
39
|
+
...
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class BaseMessageConverter(ABC):
|
|
43
|
+
"""Base class for message converters with shared utilities."""
|
|
44
|
+
|
|
45
|
+
# Common role mapping
|
|
46
|
+
ROLE_MAP = {
|
|
47
|
+
MessageType.HUMAN: "user",
|
|
48
|
+
MessageType.ASSISTANT: "assistant",
|
|
49
|
+
MessageType.SYSTEM: "system",
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
def _build_mcp_section(self, mcp_prompt: str) -> str:
|
|
53
|
+
"""Build the MCP tool selection prompt section.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
mcp_prompt: The MCP tool prompts
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Formatted MCP section or empty string
|
|
60
|
+
"""
|
|
61
|
+
if not mcp_prompt:
|
|
62
|
+
return ""
|
|
63
|
+
return (
|
|
64
|
+
"### Tool-Auswahlrichtlinien (Einbettung externer Beschreibungen)\n"
|
|
65
|
+
f"{mcp_prompt}"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
async def _get_system_prompt_with_mcp(self, mcp_prompt: str = "") -> str:
|
|
69
|
+
"""Get the system prompt with MCP section injected.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
mcp_prompt: Optional MCP tool prompt
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Complete system prompt
|
|
76
|
+
"""
|
|
77
|
+
system_prompt_template = await get_system_prompt()
|
|
78
|
+
mcp_section = self._build_mcp_section(mcp_prompt)
|
|
79
|
+
return system_prompt_template.format(mcp_prompts=mcp_section)
|
|
80
|
+
|
|
81
|
+
@abstractmethod
|
|
82
|
+
async def convert(
|
|
83
|
+
self,
|
|
84
|
+
messages: list[Message],
|
|
85
|
+
mcp_prompt: str = "",
|
|
86
|
+
file_blocks: list[dict[str, Any]] | None = None,
|
|
87
|
+
) -> Any:
|
|
88
|
+
"""Convert messages to vendor-specific format."""
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class ClaudeMessageConverter(BaseMessageConverter):
|
|
92
|
+
"""Converter for Claude Messages API format."""
|
|
93
|
+
|
|
94
|
+
async def convert(
|
|
95
|
+
self,
|
|
96
|
+
messages: list[Message],
|
|
97
|
+
mcp_prompt: str = "",
|
|
98
|
+
file_blocks: list[dict[str, Any]] | None = None,
|
|
99
|
+
) -> tuple[list[dict[str, Any]], str]:
|
|
100
|
+
"""Convert messages to Claude API format.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
messages: List of internal Message objects
|
|
104
|
+
mcp_prompt: Optional MCP tool prompt
|
|
105
|
+
file_blocks: Optional file content blocks to attach to last user message
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Tuple of (claude_messages, system_prompt)
|
|
109
|
+
"""
|
|
110
|
+
claude_messages = []
|
|
111
|
+
|
|
112
|
+
for i, msg in enumerate(messages):
|
|
113
|
+
if msg.type == MessageType.SYSTEM:
|
|
114
|
+
continue # System messages handled separately
|
|
115
|
+
|
|
116
|
+
role = "user" if msg.type == MessageType.HUMAN else "assistant"
|
|
117
|
+
|
|
118
|
+
# Build content
|
|
119
|
+
content: list[dict[str, Any]] = []
|
|
120
|
+
|
|
121
|
+
# For the last user message, attach files if present
|
|
122
|
+
is_last_user = role == "user" and i == len(messages) - 1 and file_blocks
|
|
123
|
+
|
|
124
|
+
if is_last_user and file_blocks:
|
|
125
|
+
content.extend(file_blocks)
|
|
126
|
+
|
|
127
|
+
# Add text content
|
|
128
|
+
content.append({"type": "text", "text": msg.text})
|
|
129
|
+
|
|
130
|
+
claude_messages.append(
|
|
131
|
+
{
|
|
132
|
+
"role": role,
|
|
133
|
+
"content": content if len(content) > 1 else msg.text,
|
|
134
|
+
}
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Build system prompt
|
|
138
|
+
system_prompt = await self._get_system_prompt_with_mcp(mcp_prompt)
|
|
139
|
+
|
|
140
|
+
return claude_messages, system_prompt
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class OpenAIResponsesConverter(BaseMessageConverter):
|
|
144
|
+
"""Converter for OpenAI Responses API format."""
|
|
145
|
+
|
|
146
|
+
def __init__(self, use_system_prompt: bool = True) -> None:
|
|
147
|
+
"""Initialize the converter.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
use_system_prompt: Whether to prepend system prompt
|
|
151
|
+
"""
|
|
152
|
+
self._use_system_prompt = use_system_prompt
|
|
153
|
+
|
|
154
|
+
async def convert(
|
|
155
|
+
self,
|
|
156
|
+
messages: list[Message],
|
|
157
|
+
mcp_prompt: str = "",
|
|
158
|
+
file_blocks: list[dict[str, Any]] | None = None, # noqa: ARG002
|
|
159
|
+
) -> list[dict[str, Any]]:
|
|
160
|
+
"""Convert messages to OpenAI Responses API format.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
messages: List of internal Message objects
|
|
164
|
+
mcp_prompt: Optional MCP tool prompt
|
|
165
|
+
file_blocks: Not used for OpenAI Responses
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
List of formatted messages with content arrays
|
|
169
|
+
"""
|
|
170
|
+
input_messages = []
|
|
171
|
+
|
|
172
|
+
# Add system message as first message
|
|
173
|
+
if self._use_system_prompt:
|
|
174
|
+
system_text = await self._get_system_prompt_with_mcp(mcp_prompt)
|
|
175
|
+
input_messages.append(
|
|
176
|
+
{
|
|
177
|
+
"role": "system",
|
|
178
|
+
"content": [{"type": "input_text", "text": system_text}],
|
|
179
|
+
}
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# Add conversation messages
|
|
183
|
+
for msg in messages:
|
|
184
|
+
if msg.type == MessageType.SYSTEM:
|
|
185
|
+
continue # System messages handled above
|
|
186
|
+
|
|
187
|
+
role = "user" if msg.type == MessageType.HUMAN else "assistant"
|
|
188
|
+
content_type = "input_text" if role == "user" else "output_text"
|
|
189
|
+
input_messages.append(
|
|
190
|
+
{"role": role, "content": [{"type": content_type, "text": msg.text}]}
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
return input_messages
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class OpenAIChatConverter(BaseMessageConverter):
|
|
197
|
+
"""Converter for OpenAI Chat Completions API format.
|
|
198
|
+
|
|
199
|
+
Note: This format merges consecutive same-role messages and uses
|
|
200
|
+
simple string content instead of content arrays.
|
|
201
|
+
"""
|
|
202
|
+
|
|
203
|
+
async def convert(
|
|
204
|
+
self,
|
|
205
|
+
messages: list[Message],
|
|
206
|
+
mcp_prompt: str = "", # noqa: ARG002
|
|
207
|
+
file_blocks: list[dict[str, Any]] | None = None, # noqa: ARG002
|
|
208
|
+
) -> list[dict[str, str]]:
|
|
209
|
+
"""Convert messages to OpenAI Chat Completions format.
|
|
210
|
+
|
|
211
|
+
Merges consecutive user/assistant messages with blank line separator.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
messages: List of internal Message objects
|
|
215
|
+
mcp_prompt: Not used for chat completions
|
|
216
|
+
file_blocks: Not used for chat completions
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
List of role/content dicts
|
|
220
|
+
"""
|
|
221
|
+
formatted: list[dict[str, str]] = []
|
|
222
|
+
|
|
223
|
+
for msg in messages or []:
|
|
224
|
+
if msg.type not in self.ROLE_MAP:
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
role = self.ROLE_MAP[msg.type]
|
|
228
|
+
|
|
229
|
+
# Merge consecutive user/assistant messages
|
|
230
|
+
if formatted and role != "system" and formatted[-1]["role"] == role:
|
|
231
|
+
formatted[-1]["content"] = formatted[-1]["content"] + "\n\n" + msg.text
|
|
232
|
+
else:
|
|
233
|
+
formatted.append({"role": role, "content": msg.text})
|
|
234
|
+
|
|
235
|
+
return formatted
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
class GeminiMessageConverter(BaseMessageConverter):
|
|
239
|
+
"""Converter for Gemini GenAI API format.
|
|
240
|
+
|
|
241
|
+
Note: Requires google.genai.types for Content/Part objects.
|
|
242
|
+
"""
|
|
243
|
+
|
|
244
|
+
async def convert(
|
|
245
|
+
self,
|
|
246
|
+
messages: list[Message],
|
|
247
|
+
mcp_prompt: str = "",
|
|
248
|
+
file_blocks: list[dict[str, Any]] | None = None, # noqa: ARG002
|
|
249
|
+
) -> tuple[list[Any], str | None]:
|
|
250
|
+
"""Convert messages to Gemini Content objects.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
messages: List of internal Message objects
|
|
254
|
+
mcp_prompt: Optional MCP tool prompt
|
|
255
|
+
file_blocks: Not used for Gemini
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Tuple of (contents list, system_instruction)
|
|
259
|
+
"""
|
|
260
|
+
# Import here to avoid hard dependency
|
|
261
|
+
from google.genai import types # noqa: PLC0415
|
|
262
|
+
|
|
263
|
+
contents: list[types.Content] = []
|
|
264
|
+
system_instruction: str | None = None
|
|
265
|
+
|
|
266
|
+
# Build MCP section
|
|
267
|
+
mcp_section = ""
|
|
268
|
+
if mcp_prompt:
|
|
269
|
+
mcp_section = f"\n\n{self._build_mcp_section(mcp_prompt)}"
|
|
270
|
+
|
|
271
|
+
# Get system prompt
|
|
272
|
+
system_prompt_template = await get_system_prompt()
|
|
273
|
+
if system_prompt_template:
|
|
274
|
+
system_instruction = system_prompt_template.format(mcp_prompts=mcp_section)
|
|
275
|
+
|
|
276
|
+
for msg in messages:
|
|
277
|
+
if msg.type == MessageType.SYSTEM:
|
|
278
|
+
# Append to system instruction
|
|
279
|
+
if system_instruction:
|
|
280
|
+
system_instruction += f"\n{msg.text}"
|
|
281
|
+
else:
|
|
282
|
+
system_instruction = msg.text
|
|
283
|
+
elif msg.type in (MessageType.HUMAN, MessageType.ASSISTANT):
|
|
284
|
+
role = "user" if msg.type == MessageType.HUMAN else "model"
|
|
285
|
+
contents.append(
|
|
286
|
+
types.Content(role=role, parts=[types.Part(text=msg.text)])
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
return contents, system_instruction
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""OpenAI client service for creating and managing AsyncOpenAI clients."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
from openai import AsyncOpenAI
|
|
6
|
+
|
|
7
|
+
from appkit_assistant.configuration import AssistantConfig
|
|
8
|
+
from appkit_commons.registry import service_registry
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OpenAIClientService:
|
|
14
|
+
"""Service for creating AsyncOpenAI clients with proper configuration.
|
|
15
|
+
|
|
16
|
+
This service handles the complexity of creating OpenAI clients for both
|
|
17
|
+
standard OpenAI API and Azure OpenAI endpoints. It reads configuration
|
|
18
|
+
from the AssistantConfig and provides a consistent interface for client
|
|
19
|
+
creation throughout the application.
|
|
20
|
+
|
|
21
|
+
Usage:
|
|
22
|
+
# Get service from registry
|
|
23
|
+
service = service_registry().get(OpenAIClientService)
|
|
24
|
+
|
|
25
|
+
# Create a client
|
|
26
|
+
client = service.create_client()
|
|
27
|
+
if client:
|
|
28
|
+
response = await client.files.list()
|
|
29
|
+
|
|
30
|
+
# Check if service is available
|
|
31
|
+
if service.is_available:
|
|
32
|
+
...
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
api_key: str | None = None,
|
|
38
|
+
base_url: str | None = None,
|
|
39
|
+
is_azure: bool = False,
|
|
40
|
+
) -> None:
|
|
41
|
+
"""Initialize the OpenAI client service.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
api_key: API key for OpenAI or Azure OpenAI.
|
|
45
|
+
base_url: Base URL for the API (optional).
|
|
46
|
+
is_azure: Whether to use Azure OpenAI client configuration.
|
|
47
|
+
"""
|
|
48
|
+
self._api_key = api_key
|
|
49
|
+
self._base_url = base_url
|
|
50
|
+
self._is_azure = is_azure
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def from_config(cls) -> "OpenAIClientService":
|
|
54
|
+
"""Create an OpenAIClientService from the AssistantConfig.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Configured OpenAIClientService instance.
|
|
58
|
+
"""
|
|
59
|
+
config = service_registry().get(AssistantConfig)
|
|
60
|
+
api_key = (
|
|
61
|
+
config.openai_api_key.get_secret_value() if config.openai_api_key else None
|
|
62
|
+
)
|
|
63
|
+
return cls(
|
|
64
|
+
api_key=api_key,
|
|
65
|
+
base_url=config.openai_base_url,
|
|
66
|
+
is_azure=config.openai_is_azure,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def is_available(self) -> bool:
|
|
71
|
+
"""Check if the service is properly configured with an API key."""
|
|
72
|
+
return self._api_key is not None
|
|
73
|
+
|
|
74
|
+
def create_client(self) -> AsyncOpenAI | None:
|
|
75
|
+
"""Create an AsyncOpenAI client with the configured settings.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Configured AsyncOpenAI client, or None if API key is not available.
|
|
79
|
+
"""
|
|
80
|
+
if not self._api_key:
|
|
81
|
+
logger.warning("OpenAI API key not configured")
|
|
82
|
+
return None
|
|
83
|
+
|
|
84
|
+
if self._api_key and self._base_url and self._is_azure:
|
|
85
|
+
logger.debug("Creating Azure OpenAI client")
|
|
86
|
+
return AsyncOpenAI(
|
|
87
|
+
api_key=self._api_key,
|
|
88
|
+
base_url=f"{self._base_url}/openai/v1",
|
|
89
|
+
default_query={"api-version": "preview"},
|
|
90
|
+
)
|
|
91
|
+
if self._api_key and self._base_url:
|
|
92
|
+
logger.debug("Creating OpenAI client with custom base URL")
|
|
93
|
+
return AsyncOpenAI(api_key=self._api_key, base_url=self._base_url)
|
|
94
|
+
if self._api_key:
|
|
95
|
+
logger.debug("Creating standard OpenAI client")
|
|
96
|
+
return AsyncOpenAI(api_key=self._api_key)
|
|
97
|
+
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_openai_client_service() -> OpenAIClientService:
|
|
102
|
+
"""Get or create the OpenAI client service from the registry.
|
|
103
|
+
|
|
104
|
+
This function ensures the service is registered and returns it.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
The configured OpenAIClientService.
|
|
108
|
+
"""
|
|
109
|
+
registry = service_registry()
|
|
110
|
+
|
|
111
|
+
# Check if already registered
|
|
112
|
+
try:
|
|
113
|
+
return registry.get(OpenAIClientService)
|
|
114
|
+
except KeyError:
|
|
115
|
+
pass
|
|
116
|
+
|
|
117
|
+
# Create and register the service
|
|
118
|
+
service = OpenAIClientService.from_config()
|
|
119
|
+
registry.register_as(OpenAIClientService, service)
|
|
120
|
+
return service
|