appkit-assistant 0.17.3__tar.gz → 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/PKG-INFO +8 -6
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/pyproject.toml +8 -6
- {appkit_assistant-0.17.3/src/appkit_assistant/backend → appkit_assistant-1.0.0/src/appkit_assistant/backend/database}/models.py +32 -132
- {appkit_assistant-0.17.3/src/appkit_assistant/backend → appkit_assistant-1.0.0/src/appkit_assistant/backend/database}/repositories.py +93 -1
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/model_manager.py +5 -5
- appkit_assistant-1.0.0/src/appkit_assistant/backend/models/__init__.py +28 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/models/anthropic.py +31 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/models/google.py +27 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/models/openai.py +50 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/models/perplexity.py +56 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/__init__.py +29 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/processors/claude_responses_processor.py +205 -387
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/processors/gemini_responses_processor.py +231 -299
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/processors/lorem_ipsum_processor.py +6 -4
- appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/mcp_mixin.py +297 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/openai_base.py +75 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/processors/openai_chat_completion_processor.py +5 -3
- appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/openai_responses_processor.py +850 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/perplexity_processor.py +203 -0
- appkit_assistant-0.17.3/src/appkit_assistant/backend/processor.py → appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/processor_base.py +7 -2
- appkit_assistant-1.0.0/src/appkit_assistant/backend/processors/streaming_base.py +188 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/schemas.py +138 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/auth_error_detector.py +99 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/chunk_factory.py +273 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/citation_handler.py +292 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/file_cleanup_service.py +316 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/file_upload_service.py +903 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/file_validation.py +138 -0
- {appkit_assistant-0.17.3/src/appkit_assistant/backend → appkit_assistant-1.0.0/src/appkit_assistant/backend/services}/mcp_auth_service.py +4 -2
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/mcp_token_service.py +61 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/message_converter.py +289 -0
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/openai_client_service.py +120 -0
- {appkit_assistant-0.17.3/src/appkit_assistant/backend → appkit_assistant-1.0.0/src/appkit_assistant/backend/services}/response_accumulator.py +163 -1
- appkit_assistant-1.0.0/src/appkit_assistant/backend/services/system_prompt_builder.py +89 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/services/thread_service.py +5 -3
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/system_prompt_cache.py +3 -3
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/__init__.py +8 -4
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/composer.py +59 -24
- appkit_assistant-1.0.0/src/appkit_assistant/components/file_manager.py +623 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/mcp_server_dialogs.py +12 -20
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/mcp_server_table.py +12 -2
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/message.py +119 -2
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/thread.py +1 -1
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/threadlist.py +4 -2
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/tools_modal.py +37 -20
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/configuration.py +12 -0
- appkit_assistant-1.0.0/src/appkit_assistant/state/file_manager_state.py +697 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/state/mcp_oauth_state.py +3 -3
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/state/mcp_server_state.py +47 -2
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/state/system_prompt_state.py +1 -1
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/state/thread_list_state.py +99 -5
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/state/thread_state.py +88 -9
- appkit_assistant-0.17.3/src/appkit_assistant/backend/processors/claude_base.py +0 -178
- appkit_assistant-0.17.3/src/appkit_assistant/backend/processors/gemini_base.py +0 -84
- appkit_assistant-0.17.3/src/appkit_assistant/backend/processors/openai_base.py +0 -189
- appkit_assistant-0.17.3/src/appkit_assistant/backend/processors/openai_responses_processor.py +0 -772
- appkit_assistant-0.17.3/src/appkit_assistant/backend/processors/perplexity_processor.py +0 -126
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/.gitignore +0 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/README.md +0 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/docs/assistant.png +0 -0
- {appkit_assistant-0.17.3/src/appkit_assistant/backend → appkit_assistant-1.0.0/src/appkit_assistant/backend/services}/file_manager.py +0 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/composer_key_handler.py +0 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/mcp_oauth.py +0 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/components/system_prompt_editor.py +0 -0
- {appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/pages.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: appkit-assistant
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 1.0.0
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/jenreh/appkit
|
|
6
6
|
Project-URL: Documentation, https://github.com/jenreh/appkit/tree/main/docs
|
|
@@ -16,14 +16,16 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
16
16
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
17
17
|
Classifier: Topic :: Software Development :: User Interfaces
|
|
18
18
|
Requires-Python: >=3.13
|
|
19
|
-
Requires-Dist: anthropic>=0.
|
|
19
|
+
Requires-Dist: anthropic>=0.77.0
|
|
20
20
|
Requires-Dist: appkit-commons
|
|
21
21
|
Requires-Dist: appkit-mantine
|
|
22
22
|
Requires-Dist: appkit-ui
|
|
23
|
-
Requires-Dist:
|
|
24
|
-
Requires-Dist:
|
|
25
|
-
Requires-Dist:
|
|
26
|
-
Requires-Dist:
|
|
23
|
+
Requires-Dist: apscheduler>=3.11.2
|
|
24
|
+
Requires-Dist: google-genai>=1.60.0
|
|
25
|
+
Requires-Dist: mcp>=1.26.0
|
|
26
|
+
Requires-Dist: openai>=2.16.0
|
|
27
|
+
Requires-Dist: python-multipart>=0.0.22
|
|
28
|
+
Requires-Dist: reflex>=0.8.26
|
|
27
29
|
Description-Content-Type: text/markdown
|
|
28
30
|
|
|
29
31
|
# appkit-assistant
|
|
@@ -1,16 +1,18 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
dependencies = [
|
|
3
|
-
"anthropic>=0.
|
|
3
|
+
"anthropic>=0.77.0",
|
|
4
|
+
"apscheduler>=3.11.2",
|
|
4
5
|
"appkit-commons",
|
|
5
6
|
"appkit-mantine",
|
|
6
7
|
"appkit-ui",
|
|
7
|
-
"google-genai>=1.
|
|
8
|
-
"mcp>=1.
|
|
9
|
-
"openai>=2.
|
|
10
|
-
"reflex>=0.8.
|
|
8
|
+
"google-genai>=1.60.0",
|
|
9
|
+
"mcp>=1.26.0",
|
|
10
|
+
"openai>=2.16.0",
|
|
11
|
+
"reflex>=0.8.26",
|
|
12
|
+
"python-multipart>=0.0.22",
|
|
11
13
|
]
|
|
12
14
|
name = "appkit-assistant"
|
|
13
|
-
version = "0.
|
|
15
|
+
version = "1.0.0"
|
|
14
16
|
description = "Add your description here"
|
|
15
17
|
readme = "README.md"
|
|
16
18
|
authors = [{ name = "Jens Rehpöhler" }]
|
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
import json
|
|
2
|
-
import uuid
|
|
3
2
|
from datetime import UTC, datetime
|
|
4
|
-
from enum import StrEnum
|
|
5
3
|
from typing import Any
|
|
6
4
|
|
|
7
5
|
import reflex as rx
|
|
8
|
-
from pydantic import BaseModel
|
|
9
6
|
from sqlalchemy.sql import func
|
|
10
7
|
from sqlmodel import Column, DateTime, Field
|
|
11
8
|
|
|
9
|
+
from appkit_assistant.backend.schemas import MCPAuthType, ThreadStatus
|
|
12
10
|
from appkit_commons.database.configuration import DatabaseConfig
|
|
13
11
|
from appkit_commons.database.entities import EncryptedString
|
|
14
12
|
from appkit_commons.registry import service_registry
|
|
@@ -32,135 +30,6 @@ class EncryptedJSON(EncryptedString):
|
|
|
32
30
|
return value
|
|
33
31
|
|
|
34
32
|
|
|
35
|
-
class ChunkType(StrEnum):
|
|
36
|
-
"""Enum for chunk types."""
|
|
37
|
-
|
|
38
|
-
TEXT = "text" # default
|
|
39
|
-
ANNOTATION = "annotation" # for text annotations
|
|
40
|
-
IMAGE = "image"
|
|
41
|
-
IMAGE_PARTIAL = "image_partial" # for streaming image generation
|
|
42
|
-
THINKING = "thinking" # when the model is "thinking" / reasoning
|
|
43
|
-
THINKING_RESULT = "thinking_result" # when the "thinking" is done
|
|
44
|
-
ACTION = "action" # when the user needs to take action
|
|
45
|
-
TOOL_RESULT = "tool_result" # result from a tool
|
|
46
|
-
TOOL_CALL = "tool_call" # calling a tool
|
|
47
|
-
COMPLETION = "completion" # when response generation is complete
|
|
48
|
-
AUTH_REQUIRED = "auth_required" # user needs to authenticate (MCP)
|
|
49
|
-
ERROR = "error" # when an error occurs
|
|
50
|
-
LIFECYCLE = "lifecycle"
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class Chunk(BaseModel):
|
|
54
|
-
"""Model for text chunks."""
|
|
55
|
-
|
|
56
|
-
type: ChunkType
|
|
57
|
-
text: str
|
|
58
|
-
chunk_metadata: dict[str, str] = {}
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
class ThreadStatus(StrEnum):
|
|
62
|
-
"""Enum for thread status."""
|
|
63
|
-
|
|
64
|
-
NEW = "new"
|
|
65
|
-
ACTIVE = "active"
|
|
66
|
-
IDLE = "idle"
|
|
67
|
-
WAITING = "waiting"
|
|
68
|
-
ERROR = "error"
|
|
69
|
-
DELETED = "deleted"
|
|
70
|
-
ARCHIVED = "archived"
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class MessageType(StrEnum):
|
|
74
|
-
"""Enum for message types."""
|
|
75
|
-
|
|
76
|
-
HUMAN = "human"
|
|
77
|
-
SYSTEM = "system"
|
|
78
|
-
ASSISTANT = "assistant"
|
|
79
|
-
TOOL_USE = "tool_use"
|
|
80
|
-
ERROR = "error"
|
|
81
|
-
INFO = "info"
|
|
82
|
-
WARNING = "warning"
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
class Message(BaseModel):
|
|
86
|
-
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
87
|
-
text: str
|
|
88
|
-
original_text: str | None = None # To store original text if edited
|
|
89
|
-
editable: bool = False
|
|
90
|
-
type: MessageType
|
|
91
|
-
done: bool = False
|
|
92
|
-
attachments: list[str] = [] # List of filenames for display
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
class ThinkingType(StrEnum):
|
|
96
|
-
REASONING = "reasoning"
|
|
97
|
-
TOOL_CALL = "tool_call"
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
class ThinkingStatus(StrEnum):
|
|
101
|
-
IN_PROGRESS = "in_progress"
|
|
102
|
-
COMPLETED = "completed"
|
|
103
|
-
ERROR = "error"
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
class Thinking(BaseModel):
|
|
107
|
-
type: ThinkingType
|
|
108
|
-
id: str # reasoning_session_id or tool_id
|
|
109
|
-
text: str
|
|
110
|
-
status: ThinkingStatus = ThinkingStatus.IN_PROGRESS
|
|
111
|
-
tool_name: str | None = None
|
|
112
|
-
parameters: str | None = None
|
|
113
|
-
result: str | None = None
|
|
114
|
-
error: str | None = None
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
class AIModel(BaseModel):
|
|
118
|
-
id: str
|
|
119
|
-
text: str
|
|
120
|
-
icon: str = "codesandbox"
|
|
121
|
-
stream: bool = False
|
|
122
|
-
tenant_key: str = ""
|
|
123
|
-
project_id: int = 0
|
|
124
|
-
model: str = "default"
|
|
125
|
-
temperature: float = 0.05
|
|
126
|
-
supports_tools: bool = False
|
|
127
|
-
supports_attachments: bool = False
|
|
128
|
-
keywords: list[str] = []
|
|
129
|
-
disabled: bool = False
|
|
130
|
-
requires_role: str | None = None
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
class Suggestion(BaseModel):
|
|
134
|
-
prompt: str
|
|
135
|
-
icon: str = ""
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
class UploadedFile(BaseModel):
|
|
139
|
-
"""Model for tracking uploaded files in the composer."""
|
|
140
|
-
|
|
141
|
-
filename: str
|
|
142
|
-
file_path: str
|
|
143
|
-
size: int = 0
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
class ThreadModel(BaseModel):
|
|
147
|
-
thread_id: str
|
|
148
|
-
title: str = ""
|
|
149
|
-
active: bool = False
|
|
150
|
-
state: ThreadStatus = ThreadStatus.NEW
|
|
151
|
-
prompt: str | None = ""
|
|
152
|
-
messages: list[Message] = []
|
|
153
|
-
ai_model: str = ""
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
class MCPAuthType(StrEnum):
|
|
157
|
-
"""Enum for MCP server authentication types."""
|
|
158
|
-
|
|
159
|
-
NONE = "none"
|
|
160
|
-
API_KEY = "api_key"
|
|
161
|
-
OAUTH_DISCOVERY = "oauth_discovery"
|
|
162
|
-
|
|
163
|
-
|
|
164
33
|
class MCPServer(rx.Model, table=True):
|
|
165
34
|
"""Model for MCP (Model Context Protocol) server configuration."""
|
|
166
35
|
|
|
@@ -197,6 +66,7 @@ class MCPServer(rx.Model, table=True):
|
|
|
197
66
|
oauth_discovered_at: datetime | None = Field(
|
|
198
67
|
default=None, sa_column=Column(DateTime(timezone=True), nullable=True)
|
|
199
68
|
)
|
|
69
|
+
active: bool = Field(default=True, nullable=False)
|
|
200
70
|
|
|
201
71
|
|
|
202
72
|
class SystemPrompt(rx.Model, table=True):
|
|
@@ -228,6 +98,7 @@ class AssistantThread(rx.Model, table=True):
|
|
|
228
98
|
ai_model: str = Field(default="", nullable=False)
|
|
229
99
|
active: bool = Field(default=False, nullable=False)
|
|
230
100
|
messages: list[dict[str, Any]] = Field(default=[], sa_column=Column(EncryptedJSON))
|
|
101
|
+
vector_store_id: str | None = Field(default=None, nullable=True)
|
|
231
102
|
created_at: datetime = Field(
|
|
232
103
|
default_factory=lambda: datetime.now(UTC),
|
|
233
104
|
sa_column=Column(DateTime(timezone=True)),
|
|
@@ -271,3 +142,32 @@ class AssistantMCPUserToken(rx.Model, table=True):
|
|
|
271
142
|
default_factory=lambda: datetime.now(UTC),
|
|
272
143
|
sa_column=Column(DateTime(timezone=True), onupdate=func.now()),
|
|
273
144
|
)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class AssistantFileUpload(rx.Model, table=True):
|
|
148
|
+
"""Model for tracking files uploaded to OpenAI for vector search.
|
|
149
|
+
|
|
150
|
+
Each file is associated with a thread and vector store.
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
__tablename__ = "assistant_file_uploads"
|
|
154
|
+
|
|
155
|
+
id: int | None = Field(default=None, primary_key=True)
|
|
156
|
+
filename: str = Field(max_length=255, nullable=False)
|
|
157
|
+
openai_file_id: str = Field(max_length=255, nullable=False, index=True)
|
|
158
|
+
vector_store_id: str = Field(max_length=255, nullable=False, index=True)
|
|
159
|
+
vector_store_name: str = Field(max_length=255, default="", nullable=False)
|
|
160
|
+
thread_id: int = Field(
|
|
161
|
+
index=True, nullable=False, foreign_key="assistant_thread.id"
|
|
162
|
+
)
|
|
163
|
+
user_id: int = Field(index=True, nullable=False)
|
|
164
|
+
file_size: int = Field(default=0, nullable=False)
|
|
165
|
+
|
|
166
|
+
created_at: datetime = Field(
|
|
167
|
+
default_factory=lambda: datetime.now(UTC),
|
|
168
|
+
sa_column=Column(DateTime(timezone=True)),
|
|
169
|
+
)
|
|
170
|
+
updated_at: datetime = Field(
|
|
171
|
+
default_factory=lambda: datetime.now(UTC),
|
|
172
|
+
sa_column=Column(DateTime(timezone=True), onupdate=func.now()),
|
|
173
|
+
)
|
|
@@ -7,7 +7,8 @@ from sqlalchemy import select
|
|
|
7
7
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
8
8
|
from sqlalchemy.orm import defer
|
|
9
9
|
|
|
10
|
-
from appkit_assistant.backend.models import (
|
|
10
|
+
from appkit_assistant.backend.database.models import (
|
|
11
|
+
AssistantFileUpload,
|
|
11
12
|
AssistantThread,
|
|
12
13
|
MCPServer,
|
|
13
14
|
SystemPrompt,
|
|
@@ -30,6 +31,18 @@ class MCPServerRepository(BaseRepository[MCPServer, AsyncSession]):
|
|
|
30
31
|
result = await session.execute(stmt)
|
|
31
32
|
return list(result.scalars().all())
|
|
32
33
|
|
|
34
|
+
async def find_all_active_ordered_by_name(
|
|
35
|
+
self, session: AsyncSession
|
|
36
|
+
) -> list[MCPServer]:
|
|
37
|
+
"""Retrieve all active MCP servers ordered by name."""
|
|
38
|
+
stmt = (
|
|
39
|
+
select(MCPServer)
|
|
40
|
+
.where(MCPServer.active == True) # noqa: E712
|
|
41
|
+
.order_by(MCPServer.name)
|
|
42
|
+
)
|
|
43
|
+
result = await session.execute(stmt)
|
|
44
|
+
return list(result.scalars().all())
|
|
45
|
+
|
|
33
46
|
|
|
34
47
|
class SystemPromptRepository(BaseRepository[SystemPrompt, AsyncSession]):
|
|
35
48
|
"""Repository class for system prompt database operations.
|
|
@@ -156,7 +169,86 @@ class ThreadRepository(BaseRepository[AssistantThread, AsyncSession]):
|
|
|
156
169
|
return list(result.scalars().all())
|
|
157
170
|
|
|
158
171
|
|
|
172
|
+
class FileUploadRepository(BaseRepository[AssistantFileUpload, AsyncSession]):
|
|
173
|
+
"""Repository class for file upload database operations."""
|
|
174
|
+
|
|
175
|
+
@property
|
|
176
|
+
def model_class(self) -> type[AssistantFileUpload]:
|
|
177
|
+
return AssistantFileUpload
|
|
178
|
+
|
|
179
|
+
async def find_unique_vector_stores(
|
|
180
|
+
self, session: AsyncSession
|
|
181
|
+
) -> list[tuple[str, str]]:
|
|
182
|
+
"""Get unique vector store IDs with names from all file uploads.
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
List of tuples (vector_store_id, vector_store_name).
|
|
186
|
+
"""
|
|
187
|
+
stmt = (
|
|
188
|
+
select(
|
|
189
|
+
AssistantFileUpload.vector_store_id,
|
|
190
|
+
AssistantFileUpload.vector_store_name,
|
|
191
|
+
)
|
|
192
|
+
.distinct()
|
|
193
|
+
.order_by(AssistantFileUpload.vector_store_id)
|
|
194
|
+
)
|
|
195
|
+
result = await session.execute(stmt)
|
|
196
|
+
return [(row[0], row[1] or "") for row in result.all()]
|
|
197
|
+
|
|
198
|
+
async def find_by_vector_store(
|
|
199
|
+
self, session: AsyncSession, vector_store_id: str
|
|
200
|
+
) -> list[AssistantFileUpload]:
|
|
201
|
+
"""Get all files for a specific vector store."""
|
|
202
|
+
stmt = (
|
|
203
|
+
select(AssistantFileUpload)
|
|
204
|
+
.where(AssistantFileUpload.vector_store_id == vector_store_id)
|
|
205
|
+
.order_by(AssistantFileUpload.created_at.desc())
|
|
206
|
+
)
|
|
207
|
+
result = await session.execute(stmt)
|
|
208
|
+
return list(result.scalars().all())
|
|
209
|
+
|
|
210
|
+
async def find_by_thread(
|
|
211
|
+
self, session: AsyncSession, thread_id: int
|
|
212
|
+
) -> list[AssistantFileUpload]:
|
|
213
|
+
"""Get all files for a specific thread."""
|
|
214
|
+
stmt = (
|
|
215
|
+
select(AssistantFileUpload)
|
|
216
|
+
.where(AssistantFileUpload.thread_id == thread_id)
|
|
217
|
+
.order_by(AssistantFileUpload.created_at.desc())
|
|
218
|
+
)
|
|
219
|
+
result = await session.execute(stmt)
|
|
220
|
+
return list(result.scalars().all())
|
|
221
|
+
|
|
222
|
+
async def delete_file(
|
|
223
|
+
self, session: AsyncSession, file_id: int
|
|
224
|
+
) -> AssistantFileUpload | None:
|
|
225
|
+
"""Delete a file upload by ID and return the deleted record."""
|
|
226
|
+
stmt = select(AssistantFileUpload).where(AssistantFileUpload.id == file_id)
|
|
227
|
+
result = await session.execute(stmt)
|
|
228
|
+
file_upload = result.scalars().first()
|
|
229
|
+
if file_upload:
|
|
230
|
+
await session.delete(file_upload)
|
|
231
|
+
await session.flush()
|
|
232
|
+
return file_upload
|
|
233
|
+
return None
|
|
234
|
+
|
|
235
|
+
async def delete_by_vector_store(
|
|
236
|
+
self, session: AsyncSession, vector_store_id: str
|
|
237
|
+
) -> list[AssistantFileUpload]:
|
|
238
|
+
"""Delete all files for a vector store and return the deleted records."""
|
|
239
|
+
stmt = select(AssistantFileUpload).where(
|
|
240
|
+
AssistantFileUpload.vector_store_id == vector_store_id
|
|
241
|
+
)
|
|
242
|
+
result = await session.execute(stmt)
|
|
243
|
+
files = list(result.scalars().all())
|
|
244
|
+
for file_upload in files:
|
|
245
|
+
await session.delete(file_upload)
|
|
246
|
+
await session.flush()
|
|
247
|
+
return files
|
|
248
|
+
|
|
249
|
+
|
|
159
250
|
# Export instances
|
|
160
251
|
mcp_server_repo = MCPServerRepository()
|
|
161
252
|
system_prompt_repo = SystemPromptRepository()
|
|
162
253
|
thread_repo = ThreadRepository()
|
|
254
|
+
file_upload_repo = FileUploadRepository()
|
{appkit_assistant-0.17.3 → appkit_assistant-1.0.0}/src/appkit_assistant/backend/model_manager.py
RENAMED
|
@@ -4,8 +4,8 @@ import logging
|
|
|
4
4
|
import threading
|
|
5
5
|
from typing import Self
|
|
6
6
|
|
|
7
|
-
from appkit_assistant.backend.
|
|
8
|
-
from appkit_assistant.backend.
|
|
7
|
+
from appkit_assistant.backend.processors.processor_base import ProcessorBase
|
|
8
|
+
from appkit_assistant.backend.schemas import AIModel
|
|
9
9
|
|
|
10
10
|
logger = logging.getLogger(__name__)
|
|
11
11
|
|
|
@@ -29,13 +29,13 @@ class ModelManager:
|
|
|
29
29
|
def __init__(self):
|
|
30
30
|
"""Initialize the service manager if not already initialized."""
|
|
31
31
|
if not hasattr(self, "_initialized"):
|
|
32
|
-
self._processors: dict[str,
|
|
32
|
+
self._processors: dict[str, ProcessorBase] = {}
|
|
33
33
|
self._models: dict[str, AIModel] = {}
|
|
34
34
|
self._model_to_processor: dict[str, str] = {}
|
|
35
35
|
self._initialized = True
|
|
36
36
|
logger.debug("ModelManager initialized")
|
|
37
37
|
|
|
38
|
-
def register_processor(self, processor_name: str, processor:
|
|
38
|
+
def register_processor(self, processor_name: str, processor: ProcessorBase) -> None:
|
|
39
39
|
"""
|
|
40
40
|
Register a processor with the service manager.
|
|
41
41
|
|
|
@@ -59,7 +59,7 @@ class ModelManager:
|
|
|
59
59
|
|
|
60
60
|
logger.debug("Registered processor: %s", processor_name)
|
|
61
61
|
|
|
62
|
-
def get_processor_for_model(self, model_id: str) ->
|
|
62
|
+
def get_processor_for_model(self, model_id: str) -> ProcessorBase | None:
|
|
63
63
|
"""
|
|
64
64
|
Get the processor that supports the specified model.
|
|
65
65
|
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from appkit_assistant.backend.models.anthropic import (
|
|
2
|
+
CLAUDE_HAIKU_4_5,
|
|
3
|
+
CLAUDE_SONNET_4_5,
|
|
4
|
+
)
|
|
5
|
+
from appkit_assistant.backend.models.google import GEMINI_3_FLASH, GEMINI_3_PRO
|
|
6
|
+
from appkit_assistant.backend.models.openai import GPT_5_1, GPT_5_MINI, GPT_5_2
|
|
7
|
+
from appkit_assistant.backend.models.perplexity import (
|
|
8
|
+
SONAR,
|
|
9
|
+
SONAR_DEEP_RESEARCH,
|
|
10
|
+
SONAR_PRO,
|
|
11
|
+
SONAR_REASONING,
|
|
12
|
+
)
|
|
13
|
+
from appkit_assistant.backend.schemas import AIModel
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"CLAUDE_HAIKU_4_5",
|
|
17
|
+
"CLAUDE_SONNET_4_5",
|
|
18
|
+
"GEMINI_3_FLASH",
|
|
19
|
+
"GEMINI_3_PRO",
|
|
20
|
+
"GPT_5_1",
|
|
21
|
+
"GPT_5_2",
|
|
22
|
+
"GPT_5_MINI",
|
|
23
|
+
"SONAR",
|
|
24
|
+
"SONAR_DEEP_RESEARCH",
|
|
25
|
+
"SONAR_PRO",
|
|
26
|
+
"SONAR_REASONING",
|
|
27
|
+
"AIModel",
|
|
28
|
+
]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Claude model definitions for the Anthropic's Claude API.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Final
|
|
6
|
+
|
|
7
|
+
from appkit_assistant.backend.schemas import (
|
|
8
|
+
AIModel,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
CLAUDE_HAIKU_4_5: Final = AIModel(
|
|
12
|
+
id="claude-haiku-4.5",
|
|
13
|
+
text="Claude 4.5 Haiku",
|
|
14
|
+
icon="anthropic",
|
|
15
|
+
model="claude-haiku-4-5",
|
|
16
|
+
stream=True,
|
|
17
|
+
supports_attachments=False,
|
|
18
|
+
supports_tools=True,
|
|
19
|
+
temperature=1.0,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
CLAUDE_SONNET_4_5: Final = AIModel(
|
|
23
|
+
id="claude-sonnet-4.5",
|
|
24
|
+
text="Claude 4.5 Sonnet",
|
|
25
|
+
icon="anthropic",
|
|
26
|
+
model="claude-sonnet-4-5",
|
|
27
|
+
stream=True,
|
|
28
|
+
supports_attachments=False,
|
|
29
|
+
supports_tools=True,
|
|
30
|
+
temperature=1.0,
|
|
31
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gemini model definitions for Google's GenAI API.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Final
|
|
6
|
+
|
|
7
|
+
from appkit_assistant.backend.schemas import AIModel
|
|
8
|
+
|
|
9
|
+
GEMINI_3_PRO: Final = AIModel(
|
|
10
|
+
id="gemini-3-pro-preview",
|
|
11
|
+
text="Gemini 3 Pro",
|
|
12
|
+
icon="googlegemini",
|
|
13
|
+
model="gemini-3-pro-preview",
|
|
14
|
+
stream=True,
|
|
15
|
+
supports_attachments=False,
|
|
16
|
+
supports_tools=True,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
GEMINI_3_FLASH: Final = AIModel(
|
|
20
|
+
id="gemini-3-flash-preview",
|
|
21
|
+
text="Gemini 3 Flash",
|
|
22
|
+
icon="googlegemini",
|
|
23
|
+
model="gemini-3-flash-preview",
|
|
24
|
+
stream=True,
|
|
25
|
+
supports_attachments=False,
|
|
26
|
+
supports_tools=True,
|
|
27
|
+
)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from typing import Final
|
|
2
|
+
|
|
3
|
+
from appkit_assistant.backend.schemas import AIModel
|
|
4
|
+
|
|
5
|
+
O3: Final = AIModel(
|
|
6
|
+
id="o3",
|
|
7
|
+
text="o3 Reasoning",
|
|
8
|
+
icon="openai",
|
|
9
|
+
model="o3",
|
|
10
|
+
temperature=1,
|
|
11
|
+
stream=True,
|
|
12
|
+
supports_attachments=False,
|
|
13
|
+
supports_tools=True,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
GPT_5_MINI: Final = AIModel(
|
|
17
|
+
id="gpt-5-mini",
|
|
18
|
+
text="GPT 5 Mini",
|
|
19
|
+
icon="openai",
|
|
20
|
+
model="gpt-5-mini",
|
|
21
|
+
stream=True,
|
|
22
|
+
supports_attachments=True,
|
|
23
|
+
supports_tools=True,
|
|
24
|
+
supports_search=True,
|
|
25
|
+
temperature=1,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
GPT_5_1: Final = AIModel(
|
|
29
|
+
id="gpt-5.1",
|
|
30
|
+
text="GPT 5.1",
|
|
31
|
+
icon="openai",
|
|
32
|
+
model="gpt-5.1",
|
|
33
|
+
stream=True,
|
|
34
|
+
supports_attachments=True,
|
|
35
|
+
supports_tools=True,
|
|
36
|
+
supports_search=True,
|
|
37
|
+
temperature=1,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
GPT_5_2: Final = AIModel(
|
|
41
|
+
id="gpt-5.2",
|
|
42
|
+
text="GPT 5.2",
|
|
43
|
+
icon="openai",
|
|
44
|
+
model="gpt-5.2",
|
|
45
|
+
stream=True,
|
|
46
|
+
supports_attachments=True,
|
|
47
|
+
supports_tools=True,
|
|
48
|
+
supports_search=True,
|
|
49
|
+
temperature=1,
|
|
50
|
+
)
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import enum
|
|
2
|
+
|
|
3
|
+
from appkit_assistant.backend.schemas import AIModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ContextSize(enum.StrEnum):
|
|
7
|
+
"""Enum for context size options."""
|
|
8
|
+
|
|
9
|
+
LOW = "low"
|
|
10
|
+
MEDIUM = "medium"
|
|
11
|
+
HIGH = "high"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PerplexityAIModel(AIModel):
|
|
15
|
+
"""AI model for Perplexity API."""
|
|
16
|
+
|
|
17
|
+
search_context_size: ContextSize = ContextSize.MEDIUM
|
|
18
|
+
search_domain_filter: list[str] = []
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
SONAR = PerplexityAIModel(
|
|
22
|
+
id="sonar",
|
|
23
|
+
text="Perplexity Sonar",
|
|
24
|
+
icon="perplexity",
|
|
25
|
+
model="sonar",
|
|
26
|
+
stream=True,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
SONAR_PRO = PerplexityAIModel(
|
|
30
|
+
id="sonar-pro",
|
|
31
|
+
text="Perplexity Sonar Pro",
|
|
32
|
+
icon="perplexity",
|
|
33
|
+
model="sonar-pro",
|
|
34
|
+
stream=True,
|
|
35
|
+
keywords=["sonar", "perplexity"],
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
SONAR_DEEP_RESEARCH = PerplexityAIModel(
|
|
39
|
+
id="sonar-deep-research",
|
|
40
|
+
text="Perplexity Deep Research",
|
|
41
|
+
icon="perplexity",
|
|
42
|
+
model="sonar-deep-research",
|
|
43
|
+
search_context_size=ContextSize.HIGH,
|
|
44
|
+
stream=True,
|
|
45
|
+
keywords=["reasoning", "deep", "research", "perplexity"],
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
SONAR_REASONING = PerplexityAIModel(
|
|
49
|
+
id="sonar-reasoning",
|
|
50
|
+
text="Perplexity Reasoning",
|
|
51
|
+
icon="perplexity",
|
|
52
|
+
model="sonar-reasoning",
|
|
53
|
+
search_context_size=ContextSize.HIGH,
|
|
54
|
+
stream=True,
|
|
55
|
+
keywords=["reasoning", "perplexity"],
|
|
56
|
+
)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from appkit_assistant.backend.processors.claude_responses_processor import (
|
|
2
|
+
ClaudeResponsesProcessor,
|
|
3
|
+
)
|
|
4
|
+
from appkit_assistant.backend.processors.processor_base import ProcessorBase
|
|
5
|
+
from appkit_assistant.backend.processors.perplexity_processor import PerplexityProcessor
|
|
6
|
+
from appkit_assistant.backend.processors.streaming_base import StreamingProcessorBase
|
|
7
|
+
from appkit_assistant.backend.processors.openai_chat_completion_processor import (
|
|
8
|
+
OpenAIChatCompletionsProcessor,
|
|
9
|
+
)
|
|
10
|
+
from appkit_assistant.backend.processors.openai_responses_processor import (
|
|
11
|
+
OpenAIResponsesProcessor,
|
|
12
|
+
)
|
|
13
|
+
from appkit_assistant.backend.processors.lorem_ipsum_processor import (
|
|
14
|
+
LoremIpsumProcessor,
|
|
15
|
+
)
|
|
16
|
+
from appkit_assistant.backend.processors.gemini_responses_processor import (
|
|
17
|
+
GeminiResponsesProcessor,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"ClaudeResponsesProcessor",
|
|
22
|
+
"GeminiResponsesProcessor",
|
|
23
|
+
"LoremIpsumProcessor",
|
|
24
|
+
"OpenAIChatCompletionsProcessor",
|
|
25
|
+
"OpenAIResponsesProcessor",
|
|
26
|
+
"PerplexityProcessor",
|
|
27
|
+
"ProcessorBase",
|
|
28
|
+
"StreamingProcessorBase",
|
|
29
|
+
]
|