appkit-assistant 0.7.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- appkit_assistant-0.7.0/.gitignore +114 -0
- appkit_assistant-0.7.0/PKG-INFO +8 -0
- appkit_assistant-0.7.0/README.md +0 -0
- appkit_assistant-0.7.0/docs/ideas.md +67 -0
- appkit_assistant-0.7.0/pyproject.toml +24 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/model_manager.py +133 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/models.py +103 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processor.py +46 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/ai_models.py +109 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/knowledgeai_processor.py +275 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/lorem_ipsum_processor.py +123 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/openai_base.py +73 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/openai_chat_completion_processor.py +117 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/openai_responses_processor.py +508 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/processors/perplexity_processor.py +118 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/repositories.py +96 -0
- appkit_assistant-0.7.0/src/appkit_assistant/backend/system_prompt.py +56 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/__init__.py +38 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/composer.py +154 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/composer_key_handler.py +38 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/mcp_server_dialogs.py +344 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/mcp_server_table.py +76 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/message.py +299 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/thread.py +252 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/threadlist.py +134 -0
- appkit_assistant-0.7.0/src/appkit_assistant/components/tools_modal.py +97 -0
- appkit_assistant-0.7.0/src/appkit_assistant/configuration.py +10 -0
- appkit_assistant-0.7.0/src/appkit_assistant/state/mcp_server_state.py +222 -0
- appkit_assistant-0.7.0/src/appkit_assistant/state/thread_state.py +874 -0
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
__pycache__/
|
|
2
|
+
__pypackages__/
|
|
3
|
+
.cache
|
|
4
|
+
.coverage
|
|
5
|
+
.coverage.*
|
|
6
|
+
.dmypy.json
|
|
7
|
+
.DS_Store
|
|
8
|
+
.eggs/
|
|
9
|
+
.env
|
|
10
|
+
.env.backup
|
|
11
|
+
.env.docker
|
|
12
|
+
.hypothesis/
|
|
13
|
+
.idea/
|
|
14
|
+
.installed.cfg
|
|
15
|
+
.ipynb_checkpoints
|
|
16
|
+
.mypy_cache/
|
|
17
|
+
.nox/
|
|
18
|
+
.pdm.toml
|
|
19
|
+
.pybuilder/
|
|
20
|
+
.pyre/
|
|
21
|
+
.pytest_cache/
|
|
22
|
+
.Python
|
|
23
|
+
.python_packages
|
|
24
|
+
.pytype/
|
|
25
|
+
.ropeproject
|
|
26
|
+
.scrapy
|
|
27
|
+
.spyderproject
|
|
28
|
+
.spyproject
|
|
29
|
+
.states
|
|
30
|
+
.tox/
|
|
31
|
+
.venv
|
|
32
|
+
.venv.mac
|
|
33
|
+
.web
|
|
34
|
+
.webassets-cache
|
|
35
|
+
*.bak
|
|
36
|
+
*.cover
|
|
37
|
+
*.db
|
|
38
|
+
*.egg
|
|
39
|
+
*.egg-info/
|
|
40
|
+
*.kv-env.*
|
|
41
|
+
*.log
|
|
42
|
+
*.manifest
|
|
43
|
+
*.mo
|
|
44
|
+
*.pot
|
|
45
|
+
*.py,cover
|
|
46
|
+
*.py[cod]
|
|
47
|
+
*.sage.py
|
|
48
|
+
*.so
|
|
49
|
+
*.spec
|
|
50
|
+
*.terraform.lock.hcl
|
|
51
|
+
*.tfplan
|
|
52
|
+
*.tfstate
|
|
53
|
+
*.tfstate.*.backup
|
|
54
|
+
*.tfstate.backup
|
|
55
|
+
*.tfvars
|
|
56
|
+
**/.terraform/*
|
|
57
|
+
*$py.class
|
|
58
|
+
/site
|
|
59
|
+
/vectorstore/
|
|
60
|
+
aila-storage/
|
|
61
|
+
assets/external/
|
|
62
|
+
build/
|
|
63
|
+
celerybeat-schedule
|
|
64
|
+
celerybeat.pid
|
|
65
|
+
configuration/config.abaz009.yaml
|
|
66
|
+
configuration/config.bubb001.yaml
|
|
67
|
+
configuration/config.stie104.yaml
|
|
68
|
+
configuration/config.voro047.yaml
|
|
69
|
+
connector examples/sharepoint.json
|
|
70
|
+
cover/
|
|
71
|
+
coverage.xml
|
|
72
|
+
cython_debug/
|
|
73
|
+
db.sqlite3
|
|
74
|
+
db.sqlite3-journal
|
|
75
|
+
develop-eggs/
|
|
76
|
+
dist/
|
|
77
|
+
dmypy.json
|
|
78
|
+
docs/_build/
|
|
79
|
+
Documents/
|
|
80
|
+
downloads/
|
|
81
|
+
eggs/
|
|
82
|
+
env.bak/
|
|
83
|
+
env/
|
|
84
|
+
ENV/
|
|
85
|
+
htmlcov/
|
|
86
|
+
instance/
|
|
87
|
+
ipython_config.py
|
|
88
|
+
knowledge/migrate.py
|
|
89
|
+
lib/
|
|
90
|
+
lib64/
|
|
91
|
+
local_settings.py
|
|
92
|
+
local.settings.json
|
|
93
|
+
MANIFEST
|
|
94
|
+
nosetests.xml
|
|
95
|
+
out
|
|
96
|
+
parts/
|
|
97
|
+
pip-delete-this-directory.txt
|
|
98
|
+
pip-log.txt
|
|
99
|
+
Pipfile
|
|
100
|
+
profile_default/
|
|
101
|
+
sdist/
|
|
102
|
+
share/python-wheels/
|
|
103
|
+
sketchpad/
|
|
104
|
+
sketchpad/
|
|
105
|
+
stores/
|
|
106
|
+
target/
|
|
107
|
+
tests/mcp_test.py
|
|
108
|
+
tmp.txt
|
|
109
|
+
uploaded_files/
|
|
110
|
+
uploads/
|
|
111
|
+
var/
|
|
112
|
+
venv.bak/
|
|
113
|
+
venv/
|
|
114
|
+
wheels/
|
|
File without changes
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# Render information with a dedicated UI
|
|
2
|
+
|
|
3
|
+
"Action" classes, e.g. something like
|
|
4
|
+
|
|
5
|
+
```python
|
|
6
|
+
class Calender(AssitantAction):
|
|
7
|
+
name: str
|
|
8
|
+
description: str
|
|
9
|
+
parameters: list[dict]
|
|
10
|
+
|
|
11
|
+
def __init__(self):
|
|
12
|
+
name = "showCalendarMeeting"
|
|
13
|
+
description = "Displays calendar meeting information"
|
|
14
|
+
parameters = [
|
|
15
|
+
{
|
|
16
|
+
name: "date",
|
|
17
|
+
type: "string",
|
|
18
|
+
description: "Meeting date (YYYY-MM-DD)",
|
|
19
|
+
required: true
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
name: "time",
|
|
23
|
+
type: "string",
|
|
24
|
+
description: "Meeting time (HH:mm)",
|
|
25
|
+
required: true
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
name: "meetingName",
|
|
29
|
+
type: "string",
|
|
30
|
+
description: "Name of the meeting",
|
|
31
|
+
required: false
|
|
32
|
+
}
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
def render(status, **kwargs):
|
|
36
|
+
if status == Status.LOADING:
|
|
37
|
+
return loading_view()
|
|
38
|
+
else:
|
|
39
|
+
return calendar_entry(**kwargs)
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Next Actions
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
class Suggestion(AssitantAction):
|
|
46
|
+
name: str
|
|
47
|
+
description: str
|
|
48
|
+
parameters: list[dict]
|
|
49
|
+
|
|
50
|
+
def __init__(self):
|
|
51
|
+
name = "showSuggestion"
|
|
52
|
+
description = "Displays suggestions"
|
|
53
|
+
parameters = [
|
|
54
|
+
{
|
|
55
|
+
name: "suggestion",
|
|
56
|
+
type: "string",
|
|
57
|
+
description: "Suggestion what how to continue or what to ask next",
|
|
58
|
+
required: false
|
|
59
|
+
}
|
|
60
|
+
]
|
|
61
|
+
|
|
62
|
+
def render(status, **kwargs):
|
|
63
|
+
if status == Status.LOADING:
|
|
64
|
+
return loading_view()
|
|
65
|
+
else:
|
|
66
|
+
return render_suggestion(**kwargs)
|
|
67
|
+
```
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "appkit-assistant"
|
|
3
|
+
version = "0.7.0"
|
|
4
|
+
description = "Add your description here"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
authors = [{ name = "Jens Rehpöhler" }]
|
|
7
|
+
requires-python = ">=3.13"
|
|
8
|
+
dependencies = [
|
|
9
|
+
"appkit-commons",
|
|
10
|
+
"openai>=2.3.0",
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
[tool.setuptools.packages.find]
|
|
14
|
+
where = ["src"]
|
|
15
|
+
|
|
16
|
+
[tool.uv.sources]
|
|
17
|
+
appkit-commons = { workspace = true }
|
|
18
|
+
|
|
19
|
+
[tool.hatch.build.targets.wheel]
|
|
20
|
+
packages = ["src/appkit_assistant"]
|
|
21
|
+
|
|
22
|
+
[build-system]
|
|
23
|
+
requires = ["hatchling"]
|
|
24
|
+
build-backend = "hatchling.build"
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import threading
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from appkit_assistant.backend.models import AIModel
|
|
6
|
+
from appkit_assistant.backend.processor import Processor
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ModelManager:
|
|
12
|
+
"""Singleton service manager for AI processing services."""
|
|
13
|
+
|
|
14
|
+
_instance: Optional["ModelManager"] = None
|
|
15
|
+
_lock = threading.Lock()
|
|
16
|
+
_default_model_id = (
|
|
17
|
+
None # Default model ID will be set to the first registered model
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
def __new__(cls) -> "ModelManager":
|
|
21
|
+
if cls._instance is None:
|
|
22
|
+
with cls._lock:
|
|
23
|
+
if cls._instance is None:
|
|
24
|
+
cls._instance = super(ModelManager, cls).__new__(cls) # noqa UP008
|
|
25
|
+
return cls._instance
|
|
26
|
+
|
|
27
|
+
def __init__(self):
|
|
28
|
+
"""Initialize the service manager if not already initialized."""
|
|
29
|
+
if not hasattr(self, "_initialized"):
|
|
30
|
+
self._processors: dict[str, Processor] = {}
|
|
31
|
+
self._models: dict[str, AIModel] = {}
|
|
32
|
+
self._model_to_processor: dict[str, str] = {}
|
|
33
|
+
self._initialized = True
|
|
34
|
+
logger.debug("ModelManager initialized")
|
|
35
|
+
|
|
36
|
+
def register_processor(self, processor_name: str, processor: Processor) -> None:
|
|
37
|
+
"""
|
|
38
|
+
Register a processor with the service manager.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
processor_name: Name of the processor.
|
|
42
|
+
processor: Instance of a Processor.
|
|
43
|
+
"""
|
|
44
|
+
self._processors[processor_name] = processor
|
|
45
|
+
|
|
46
|
+
# Extract and register all models supported by this processor
|
|
47
|
+
supported_models = processor.get_supported_models()
|
|
48
|
+
for model_id, model in supported_models.items():
|
|
49
|
+
if model_id not in self._models:
|
|
50
|
+
self._models[model_id] = model
|
|
51
|
+
self._model_to_processor[model_id] = processor_name
|
|
52
|
+
|
|
53
|
+
# Set the first registered model as default if no default is set
|
|
54
|
+
if self._default_model_id is None:
|
|
55
|
+
self._default_model_id = model_id
|
|
56
|
+
logger.debug("Set first model %s as default", model_id)
|
|
57
|
+
|
|
58
|
+
logger.debug("Registered processor: %s", processor_name)
|
|
59
|
+
|
|
60
|
+
def get_processor_for_model(self, model_id: str) -> Processor | None:
|
|
61
|
+
"""
|
|
62
|
+
Get the processor that supports the specified model.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
model_id: ID of the model.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
The processor that supports the model or None if no processor is found.
|
|
69
|
+
"""
|
|
70
|
+
processor_name = self._model_to_processor.get(model_id)
|
|
71
|
+
if processor_name:
|
|
72
|
+
return self._processors.get(processor_name)
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
def get_all_models(self) -> list[AIModel]:
|
|
76
|
+
"""
|
|
77
|
+
Get all registered models.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
List of all models.
|
|
81
|
+
"""
|
|
82
|
+
return sorted(
|
|
83
|
+
self._models.values(),
|
|
84
|
+
key=lambda model: (
|
|
85
|
+
model.icon.lower() if model.icon else "",
|
|
86
|
+
model.text.lower(),
|
|
87
|
+
),
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def get_model(self, model_id: str) -> AIModel | None:
|
|
91
|
+
"""
|
|
92
|
+
Get a model by its ID.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
model_id: ID of the model.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
The model or None if not found.
|
|
99
|
+
"""
|
|
100
|
+
return self._models.get(model_id)
|
|
101
|
+
|
|
102
|
+
def get_default_model(self) -> str:
|
|
103
|
+
"""
|
|
104
|
+
Get the default model ID.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
The default model ID as a string.
|
|
108
|
+
"""
|
|
109
|
+
if self._default_model_id is None:
|
|
110
|
+
if self._models:
|
|
111
|
+
self._default_model_id = next(iter(self._models.keys()))
|
|
112
|
+
logger.debug(
|
|
113
|
+
"Using first available model %s as default", self._default_model_id
|
|
114
|
+
)
|
|
115
|
+
else:
|
|
116
|
+
logger.warning("No models registered, returning fallback model name")
|
|
117
|
+
return "default"
|
|
118
|
+
return self._default_model_id
|
|
119
|
+
|
|
120
|
+
def set_default_model(self, model_id: str) -> None:
|
|
121
|
+
"""
|
|
122
|
+
Set the default model ID.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
model_id: ID of the model to set as default.
|
|
126
|
+
"""
|
|
127
|
+
if model_id in self._models:
|
|
128
|
+
self._default_model_id = model_id
|
|
129
|
+
logger.debug("Default model set to: %s", model_id)
|
|
130
|
+
else:
|
|
131
|
+
logger.warning(
|
|
132
|
+
"Attempted to set unregistered model %s as default. Ignoring.", model_id
|
|
133
|
+
)
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
|
|
3
|
+
import reflex as rx
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from sqlmodel import Field
|
|
6
|
+
|
|
7
|
+
from appkit_commons.database.entities import EncryptedString
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ChunkType(StrEnum):
|
|
11
|
+
"""Enum for chunk types."""
|
|
12
|
+
|
|
13
|
+
TEXT = "text" # default
|
|
14
|
+
ANNOTATION = "annotation" # for text annotations
|
|
15
|
+
IMAGE = "image"
|
|
16
|
+
IMAGE_PARTIAL = "image_partial" # for streaming image generation
|
|
17
|
+
THINKING = "thinking" # when the model is "thinking" / reasoning
|
|
18
|
+
THINKING_RESULT = "thinking_result" # when the "thinking" is done
|
|
19
|
+
ACTION = "action" # when the user needs to take action
|
|
20
|
+
TOOL_RESULT = "tool_result" # result from a tool
|
|
21
|
+
TOOL_CALL = "tool_call" # calling a tool
|
|
22
|
+
COMPLETION = "completion" # when response generation is complete
|
|
23
|
+
ERROR = "error" # when an error occurs
|
|
24
|
+
LIFECYCLE = "lifecycle"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Chunk(BaseModel):
|
|
28
|
+
"""Model for text chunks."""
|
|
29
|
+
|
|
30
|
+
type: ChunkType
|
|
31
|
+
text: str
|
|
32
|
+
chunk_metadata: dict[str, str] = {}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ThreadStatus(StrEnum):
|
|
36
|
+
"""Enum for thread status."""
|
|
37
|
+
|
|
38
|
+
NEW = "new"
|
|
39
|
+
ACTIVE = "active"
|
|
40
|
+
IDLE = "idle"
|
|
41
|
+
WAITING = "waiting"
|
|
42
|
+
DELETED = "deleted"
|
|
43
|
+
ARCHIVED = "archived"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class MessageType(StrEnum):
|
|
47
|
+
"""Enum for message types."""
|
|
48
|
+
|
|
49
|
+
HUMAN = "human"
|
|
50
|
+
SYSTEM = "system"
|
|
51
|
+
ASSISTANT = "assistant"
|
|
52
|
+
TOOL_USE = "tool_use"
|
|
53
|
+
ERROR = "error"
|
|
54
|
+
INFO = "info"
|
|
55
|
+
WARNING = "warning"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class Message(BaseModel):
|
|
59
|
+
text: str
|
|
60
|
+
editable: bool = False
|
|
61
|
+
type: MessageType
|
|
62
|
+
done: bool = False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class AIModel(BaseModel):
|
|
66
|
+
id: str
|
|
67
|
+
text: str
|
|
68
|
+
icon: str = "codesandbox"
|
|
69
|
+
stream: bool = False
|
|
70
|
+
tenant_key: str = ""
|
|
71
|
+
project_id: int = 0
|
|
72
|
+
model: str = "default"
|
|
73
|
+
temperature: float = 0.05
|
|
74
|
+
supports_tools: bool = False
|
|
75
|
+
supports_attachments: bool = False
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class Suggestion(BaseModel):
|
|
79
|
+
prompt: str
|
|
80
|
+
icon: str = ""
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ThreadModel(BaseModel):
|
|
84
|
+
thread_id: str
|
|
85
|
+
title: str = ""
|
|
86
|
+
active: bool = False
|
|
87
|
+
state: ThreadStatus = ThreadStatus.NEW
|
|
88
|
+
prompt: str | None = ""
|
|
89
|
+
messages: list[Message] = []
|
|
90
|
+
ai_model: str = ""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class MCPServer(rx.Model, table=True):
|
|
94
|
+
"""Model for MCP (Model Context Protocol) server configuration."""
|
|
95
|
+
|
|
96
|
+
__tablename__ = "mcp_server"
|
|
97
|
+
|
|
98
|
+
id: int | None = Field(default=None, primary_key=True)
|
|
99
|
+
name: str = Field(unique=True, max_length=100, nullable=False)
|
|
100
|
+
description: str = Field(default="", max_length=255, nullable=True)
|
|
101
|
+
url: str = Field(nullable=False)
|
|
102
|
+
headers: str = Field(nullable=False, sa_type=EncryptedString)
|
|
103
|
+
prompt: str = Field(default="", max_length=2000, nullable=True)
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base processor interface for AI processing services.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import abc
|
|
6
|
+
import logging
|
|
7
|
+
from collections.abc import AsyncGenerator
|
|
8
|
+
|
|
9
|
+
from appkit_assistant.backend.models import AIModel, Chunk, MCPServer, Message
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Processor(abc.ABC):
|
|
15
|
+
"""Base processor interface for AI processing services."""
|
|
16
|
+
|
|
17
|
+
@abc.abstractmethod
|
|
18
|
+
async def process(
|
|
19
|
+
self,
|
|
20
|
+
messages: list[Message],
|
|
21
|
+
model_id: str,
|
|
22
|
+
files: list[str] | None = None,
|
|
23
|
+
mcp_servers: list[MCPServer] | None = None,
|
|
24
|
+
) -> AsyncGenerator[Chunk, None]:
|
|
25
|
+
"""
|
|
26
|
+
Process the thread using an AI model.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
messages: The list of messages to process.
|
|
30
|
+
model_id: The ID of the model to use.
|
|
31
|
+
files: Optional list of file paths that were uploaded.
|
|
32
|
+
mcp_servers: Optional list of MCP servers to use as tools.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
An async generator that yields Chunk objects containing different content
|
|
36
|
+
types.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
@abc.abstractmethod
|
|
40
|
+
def get_supported_models(self) -> dict[str, AIModel]:
|
|
41
|
+
"""
|
|
42
|
+
Get a dictionary of models supported by this processor.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
Dictionary mapping model IDs to AIModel objects.
|
|
46
|
+
"""
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from typing import Final
|
|
2
|
+
|
|
3
|
+
from appkit_assistant.backend.models import AIModel
|
|
4
|
+
|
|
5
|
+
DEFAULT: Final = AIModel(
|
|
6
|
+
id="default",
|
|
7
|
+
text="Default (GPT 4.1 Mini)",
|
|
8
|
+
icon="avvia_intelligence",
|
|
9
|
+
model="default",
|
|
10
|
+
stream=True,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
GEMINI_2_5_FLASH: Final = AIModel(
|
|
14
|
+
id="gemini-2-5-flash",
|
|
15
|
+
text="Gemini 2.5 Flash",
|
|
16
|
+
icon="googlegemini",
|
|
17
|
+
model="gemini-2-5-flash",
|
|
18
|
+
)
|
|
19
|
+
LLAMA_3_2_VISION: Final = AIModel(
|
|
20
|
+
id="llama32_vision_90b",
|
|
21
|
+
text="Llama 3.2 Vision 90B (OnPrem)",
|
|
22
|
+
icon="ollama",
|
|
23
|
+
model="lllama32_vision_90b",
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
GPT_4o: Final = AIModel(
|
|
27
|
+
id="gpt-4o",
|
|
28
|
+
text="GPT 4o",
|
|
29
|
+
icon="openai",
|
|
30
|
+
model="gpt-4o",
|
|
31
|
+
stream=True,
|
|
32
|
+
supports_attachments=True,
|
|
33
|
+
supports_tools=True,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
GPT_4_1: Final = AIModel(
|
|
37
|
+
id="gpt-4.1",
|
|
38
|
+
text="GPT-4.1",
|
|
39
|
+
icon="openai",
|
|
40
|
+
model="gpt-4.1",
|
|
41
|
+
stream=True,
|
|
42
|
+
supports_attachments=True,
|
|
43
|
+
supports_tools=True,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
O3: Final = AIModel(
|
|
47
|
+
id="o3",
|
|
48
|
+
text="o3 Reasoning",
|
|
49
|
+
icon="openai",
|
|
50
|
+
model="o3",
|
|
51
|
+
temperature=1,
|
|
52
|
+
stream=True,
|
|
53
|
+
supports_attachments=True,
|
|
54
|
+
supports_tools=True,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
O4_MINI: Final = AIModel(
|
|
58
|
+
id="o4-mini",
|
|
59
|
+
text="o4 Mini Reasoning",
|
|
60
|
+
icon="openai",
|
|
61
|
+
model="o4-mini",
|
|
62
|
+
stream=True,
|
|
63
|
+
supports_attachments=True,
|
|
64
|
+
supports_tools=True,
|
|
65
|
+
temperature=1,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
GPT_5: Final = AIModel(
|
|
69
|
+
id="gpt-5",
|
|
70
|
+
text="GPT 5",
|
|
71
|
+
icon="openai",
|
|
72
|
+
model="gpt-5",
|
|
73
|
+
stream=True,
|
|
74
|
+
supports_attachments=True,
|
|
75
|
+
supports_tools=True,
|
|
76
|
+
temperature=1,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
GPT_5_CHAT: Final = AIModel(
|
|
80
|
+
id="gpt-5-chat",
|
|
81
|
+
text="GPT 5 Chat",
|
|
82
|
+
icon="openai",
|
|
83
|
+
model="gpt-5-chat",
|
|
84
|
+
stream=True,
|
|
85
|
+
supports_attachments=True,
|
|
86
|
+
supports_tools=False,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
GPT_5_MINI: Final = AIModel(
|
|
90
|
+
id="gpt-5-mini",
|
|
91
|
+
text="GPT 5 Mini",
|
|
92
|
+
icon="openai",
|
|
93
|
+
model="gpt-5-mini",
|
|
94
|
+
stream=True,
|
|
95
|
+
supports_attachments=True,
|
|
96
|
+
supports_tools=True,
|
|
97
|
+
temperature=1,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
GPT_5_NANO: Final = AIModel(
|
|
101
|
+
id="gpt-5-nano",
|
|
102
|
+
text="GPT 5 Nano",
|
|
103
|
+
icon="openai",
|
|
104
|
+
model="gpt-5-nano",
|
|
105
|
+
stream=True,
|
|
106
|
+
supports_attachments=True,
|
|
107
|
+
supports_tools=True,
|
|
108
|
+
temperature=1,
|
|
109
|
+
)
|