appkit-assistant 0.7.2__tar.gz → 0.7.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. appkit_assistant-0.7.4/PKG-INFO +330 -0
  2. appkit_assistant-0.7.4/README.md +320 -0
  3. appkit_assistant-0.7.4/docs/assistant.png +0 -0
  4. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/pyproject.toml +1 -1
  5. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/models.py +2 -0
  6. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/ai_models.py +23 -0
  7. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/perplexity_processor.py +3 -0
  8. appkit_assistant-0.7.2/PKG-INFO +0 -8
  9. appkit_assistant-0.7.2/README.md +0 -0
  10. appkit_assistant-0.7.2/docs/ideas.md +0 -67
  11. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/.gitignore +0 -0
  12. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/model_manager.py +0 -0
  13. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processor.py +0 -0
  14. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/knowledgeai_processor.py +0 -0
  15. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/lorem_ipsum_processor.py +0 -0
  16. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/openai_base.py +0 -0
  17. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/openai_chat_completion_processor.py +0 -0
  18. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/processors/openai_responses_processor.py +0 -0
  19. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/repositories.py +0 -0
  20. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/backend/system_prompt.py +0 -0
  21. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/__init__.py +0 -0
  22. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/composer.py +0 -0
  23. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/composer_key_handler.py +0 -0
  24. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/mcp_server_dialogs.py +0 -0
  25. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/mcp_server_table.py +0 -0
  26. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/message.py +0 -0
  27. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/thread.py +0 -0
  28. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/threadlist.py +0 -0
  29. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/components/tools_modal.py +0 -0
  30. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/configuration.py +0 -0
  31. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/state/mcp_server_state.py +0 -0
  32. {appkit_assistant-0.7.2 → appkit_assistant-0.7.4}/src/appkit_assistant/state/thread_state.py +0 -0
@@ -0,0 +1,330 @@
1
+ Metadata-Version: 2.4
2
+ Name: appkit-assistant
3
+ Version: 0.7.4
4
+ Summary: Add your description here
5
+ Author: Jens Rehpöhler
6
+ Requires-Python: >=3.13
7
+ Requires-Dist: appkit-commons
8
+ Requires-Dist: openai>=2.3.0
9
+ Description-Content-Type: text/markdown
10
+
11
+ # appkit-assistant
12
+
13
+ [![Python 3.13+](https://img.shields.io/badge/python-3.13+-blue.svg)](https://www.python.org/downloads/)
14
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
15
+
16
+ **AI assistant component for Reflex applications with MCP server integration.**
17
+
18
+ appkit-assistant provides a complete conversational AI interface built on Reflex, featuring OpenAI and Perplexity integrations, Model Context Protocol (MCP) server management, and secure credential handling. It includes both backend processing services and ready-to-use UI components for building AI-powered applications.
19
+
20
+ ![Assistant](./docs/assistant.png)
21
+
22
+ ---
23
+
24
+ ## ✨ Features
25
+
26
+ - **Multi-Model Support** - OpenAI Chat Completions, OpenAI Responses API, Perplexity, and fallback Lorem Ipsum processor
27
+ - **MCP Server Integration** - Manage and connect to Model Context Protocol servers as tools
28
+ - **Secure Credential Management** - Encrypted storage and handling of API keys and server credentials
29
+ - **Reflex UI Components** - Pre-built assistant interface with composer, thread management, and message display
30
+ - **Streaming Responses** - Real-time streaming of AI responses with chunked content
31
+ - **Thread Management** - Persistent conversation threads with state management
32
+
33
+ ---
34
+
35
+ ## 🚀 Installation
36
+
37
+ ### As Part of AppKit Workspace
38
+
39
+ If you're using the full AppKit workspace:
40
+
41
+ ```bash
42
+ git clone https://github.com/jenreh/appkit.git
43
+ cd appkit
44
+ uv sync
45
+ ```
46
+
47
+ ### Standalone Installation
48
+
49
+ Install from PyPI:
50
+
51
+ ```bash
52
+ pip install appkit-assistant
53
+ ```
54
+
55
+ Or with uv:
56
+
57
+ ```bash
58
+ uv add appkit-assistant
59
+ ```
60
+
61
+ ### Dependencies
62
+
63
+ - `appkit-commons` (shared utilities)
64
+ - `openai>=2.3.0` (OpenAI API client)
65
+
66
+ ---
67
+
68
+ ## 🏁 Quick Start
69
+
70
+ ### Basic Setup
71
+
72
+ 1. Configure your API keys in your application's configuration:
73
+
74
+ ```python
75
+ from appkit_assistant.configuration import AssistantConfig
76
+
77
+ # In your app configuration
78
+ assistant_config = AssistantConfig(
79
+ openai_api_key="your-openai-key",
80
+ perplexity_api_key="your-perplexity-key",
81
+ # Optional: custom OpenAI base URL
82
+ openai_base_url="https://api.openai.com/v1"
83
+ )
84
+ ```
85
+
86
+ 2. Register processors with the ModelManager:
87
+
88
+ ```python
89
+ from appkit_assistant.backend.model_manager import ModelManager
90
+ from appkit_assistant.backend.processors.openai_chat_completion_processor import OpenAIChatCompletionProcessor
91
+ from appkit_assistant.backend.processors.perplexity_processor import PerplexityProcessor
92
+
93
+ manager = ModelManager()
94
+ manager.register_processor("openai", OpenAIChatCompletionProcessor(assistant_config))
95
+ manager.register_processor("perplexity", PerplexityProcessor(assistant_config))
96
+ ```
97
+
98
+ 3. Use the assistant component in your Reflex app:
99
+
100
+ ```python
101
+ import reflex as rx
102
+ import appkit_assistant as assistant
103
+
104
+ def assistant_page():
105
+ return rx.container(
106
+ assistant.Assistant(),
107
+ height="100vh"
108
+ )
109
+ ```
110
+
111
+ ---
112
+
113
+ ## 📖 Usage
114
+
115
+ ### Model Management
116
+
117
+ The `ModelManager` singleton handles all AI processors and models:
118
+
119
+ ```python
120
+ from appkit_assistant.backend.model_manager import ModelManager
121
+
122
+ manager = ModelManager()
123
+
124
+ # Get all available models
125
+ models = manager.get_all_models()
126
+
127
+ # Get a specific model
128
+ model = manager.get_model("gpt-4")
129
+
130
+ # Set default model
131
+ manager.set_default_model("gpt-4")
132
+ ```
133
+
134
+ ### Processing Messages
135
+
136
+ Process conversations using the registered processors:
137
+
138
+ ```python
139
+ from appkit_assistant.backend.models import Message, MessageType
140
+
141
+ messages = [
142
+ Message(role="user", content="Hello, how are you?", type=MessageType.TEXT)
143
+ ]
144
+
145
+ async for chunk in manager.get_processor_for_model("gpt-4").process(messages, "gpt-4"):
146
+ print(f"Received: {chunk.content}")
147
+ ```
148
+
149
+ ### MCP Server Management
150
+
151
+ Manage MCP servers for tool integration:
152
+
153
+ ```python
154
+ from appkit_assistant.backend.models import MCPServer
155
+
156
+ mcp_server = MCPServer(
157
+ name="my-server",
158
+ command="python",
159
+ args=["-m", "my_mcp_server"],
160
+ headers={"Authorization": "Bearer token"}
161
+ )
162
+
163
+ # Use in processing
164
+ async for chunk in processor.process(messages, "gpt-4", mcp_servers=[mcp_server]):
165
+ # Handle response with MCP tools
166
+ pass
167
+ ```
168
+
169
+ ### UI Components
170
+
171
+ #### Assistant Interface
172
+
173
+ The main `Assistant` component provides a complete chat interface:
174
+
175
+ ```python
176
+ import appkit_assistant as assistant
177
+
178
+ def chat_page():
179
+ return assistant.Assistant()
180
+ ```
181
+
182
+ #### Individual Components
183
+
184
+ Use individual components for custom layouts:
185
+
186
+ ```python
187
+ import appkit_assistant as assistant
188
+
189
+ def custom_assistant():
190
+ return rx.vstack(
191
+ assistant.ThreadList(),
192
+ assistant.composer(),
193
+ spacing="4"
194
+ )
195
+ ```
196
+
197
+ #### MCP Server Management UI
198
+
199
+ Display and manage MCP servers:
200
+
201
+ ```python
202
+ def servers_page():
203
+ return assistant.mcp_servers_table()
204
+ ```
205
+
206
+ ---
207
+
208
+ ## 🔧 Configuration
209
+
210
+ ### AssistantConfig
211
+
212
+ Configure API keys and settings:
213
+
214
+ ```python
215
+ from appkit_assistant.configuration import AssistantConfig
216
+
217
+ config = AssistantConfig(
218
+ openai_api_key="sk-...",
219
+ openai_base_url="https://custom.openai.endpoint/v1",
220
+ perplexity_api_key="pplx-...",
221
+ google_api_key="AIza..." # For future Google integrations
222
+ )
223
+ ```
224
+
225
+ ### Processor Registration
226
+
227
+ Register processors based on available credentials:
228
+
229
+ ```python
230
+ from appkit_assistant.backend.processors import (
231
+ OpenAIChatCompletionProcessor,
232
+ PerplexityProcessor,
233
+ LoremIpsumProcessor
234
+ )
235
+
236
+ manager = ModelManager()
237
+
238
+ if config.openai_api_key:
239
+ manager.register_processor("openai", OpenAIChatCompletionProcessor(config))
240
+
241
+ if config.perplexity_api_key:
242
+ manager.register_processor("perplexity", PerplexityProcessor(config))
243
+
244
+ # Always available fallback
245
+ manager.register_processor("lorem", LoremIpsumProcessor())
246
+ ```
247
+
248
+ ---
249
+
250
+ ## 📋 API Reference
251
+
252
+ ### Core Classes
253
+
254
+ - `ModelManager` - Singleton model and processor registry
255
+ - `Processor` - Abstract base for AI processors
256
+ - `AIModel` - Model metadata and configuration
257
+ - `Message` - Conversation message structure
258
+ - `MCPServer` - MCP server configuration
259
+
260
+ ### Component API
261
+
262
+ - `Assistant` - Complete assistant interface
263
+ - `composer` - Message input component
264
+ - `ThreadList` - Conversation thread list
265
+ - `MessageComponent` - Individual message display
266
+ - `mcp_servers_table` - MCP server management table
267
+
268
+ ### State Management
269
+
270
+ - `ThreadState` - Individual thread state
271
+ - `ThreadListState` - Thread list management
272
+
273
+ ---
274
+
275
+ ## 🔒 Security
276
+
277
+ > [!IMPORTANT]
278
+ > API keys and MCP server credentials are handled securely using the appkit-commons configuration system. Never hardcode secrets in your code.
279
+
280
+ - Use `SecretStr` for sensitive configuration values
281
+ - Credentials are encrypted at rest when stored in the database
282
+ - MCP server headers support encrypted storage
283
+
284
+ ---
285
+
286
+ ## 🤝 Integration Examples
287
+
288
+ ### With AppKit User Management
289
+
290
+ Combine with appkit-user for authenticated assistants:
291
+
292
+ ```python
293
+ from appkit_user import authenticated, requires_role
294
+
295
+ @authenticated()
296
+ @requires_role("assistant_user")
297
+ def protected_assistant_page():
298
+ return assistant.Assistant()
299
+ ```
300
+
301
+ ### Custom Processor Implementation
302
+
303
+ Implement your own AI processor:
304
+
305
+ ```python
306
+ from appkit_assistant.backend.processor import Processor
307
+ from appkit_assistant.backend.models import AIModel, Chunk, Message
308
+
309
+ class CustomProcessor(Processor):
310
+ def get_supported_models(self):
311
+ return {
312
+ "custom-model": AIModel(
313
+ id="custom-model",
314
+ text="Custom AI Model",
315
+ icon="🤖"
316
+ )
317
+ }
318
+
319
+ async def process(self, messages, model_id, files=None, mcp_servers=None):
320
+ # Your AI processing logic here
321
+ yield Chunk(content="Response from custom model", type="text")
322
+ ```
323
+
324
+ ---
325
+
326
+ ## 📚 Related Components
327
+
328
+ - **[appkit-mantine](./../appkit-mantine)** - UI components used in the assistant interface
329
+ - **[appkit-user](./../appkit-user)** - User authentication and authorization
330
+ - **[appkit-commons](./../appkit-commons)** - Shared utilities and configuration
@@ -0,0 +1,320 @@
1
+ # appkit-assistant
2
+
3
+ [![Python 3.13+](https://img.shields.io/badge/python-3.13+-blue.svg)](https://www.python.org/downloads/)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
5
+
6
+ **AI assistant component for Reflex applications with MCP server integration.**
7
+
8
+ appkit-assistant provides a complete conversational AI interface built on Reflex, featuring OpenAI and Perplexity integrations, Model Context Protocol (MCP) server management, and secure credential handling. It includes both backend processing services and ready-to-use UI components for building AI-powered applications.
9
+
10
+ ![Assistant](./docs/assistant.png)
11
+
12
+ ---
13
+
14
+ ## ✨ Features
15
+
16
+ - **Multi-Model Support** - OpenAI Chat Completions, OpenAI Responses API, Perplexity, and fallback Lorem Ipsum processor
17
+ - **MCP Server Integration** - Manage and connect to Model Context Protocol servers as tools
18
+ - **Secure Credential Management** - Encrypted storage and handling of API keys and server credentials
19
+ - **Reflex UI Components** - Pre-built assistant interface with composer, thread management, and message display
20
+ - **Streaming Responses** - Real-time streaming of AI responses with chunked content
21
+ - **Thread Management** - Persistent conversation threads with state management
22
+
23
+ ---
24
+
25
+ ## 🚀 Installation
26
+
27
+ ### As Part of AppKit Workspace
28
+
29
+ If you're using the full AppKit workspace:
30
+
31
+ ```bash
32
+ git clone https://github.com/jenreh/appkit.git
33
+ cd appkit
34
+ uv sync
35
+ ```
36
+
37
+ ### Standalone Installation
38
+
39
+ Install from PyPI:
40
+
41
+ ```bash
42
+ pip install appkit-assistant
43
+ ```
44
+
45
+ Or with uv:
46
+
47
+ ```bash
48
+ uv add appkit-assistant
49
+ ```
50
+
51
+ ### Dependencies
52
+
53
+ - `appkit-commons` (shared utilities)
54
+ - `openai>=2.3.0` (OpenAI API client)
55
+
56
+ ---
57
+
58
+ ## 🏁 Quick Start
59
+
60
+ ### Basic Setup
61
+
62
+ 1. Configure your API keys in your application's configuration:
63
+
64
+ ```python
65
+ from appkit_assistant.configuration import AssistantConfig
66
+
67
+ # In your app configuration
68
+ assistant_config = AssistantConfig(
69
+ openai_api_key="your-openai-key",
70
+ perplexity_api_key="your-perplexity-key",
71
+ # Optional: custom OpenAI base URL
72
+ openai_base_url="https://api.openai.com/v1"
73
+ )
74
+ ```
75
+
76
+ 2. Register processors with the ModelManager:
77
+
78
+ ```python
79
+ from appkit_assistant.backend.model_manager import ModelManager
80
+ from appkit_assistant.backend.processors.openai_chat_completion_processor import OpenAIChatCompletionProcessor
81
+ from appkit_assistant.backend.processors.perplexity_processor import PerplexityProcessor
82
+
83
+ manager = ModelManager()
84
+ manager.register_processor("openai", OpenAIChatCompletionProcessor(assistant_config))
85
+ manager.register_processor("perplexity", PerplexityProcessor(assistant_config))
86
+ ```
87
+
88
+ 3. Use the assistant component in your Reflex app:
89
+
90
+ ```python
91
+ import reflex as rx
92
+ import appkit_assistant as assistant
93
+
94
+ def assistant_page():
95
+ return rx.container(
96
+ assistant.Assistant(),
97
+ height="100vh"
98
+ )
99
+ ```
100
+
101
+ ---
102
+
103
+ ## 📖 Usage
104
+
105
+ ### Model Management
106
+
107
+ The `ModelManager` singleton handles all AI processors and models:
108
+
109
+ ```python
110
+ from appkit_assistant.backend.model_manager import ModelManager
111
+
112
+ manager = ModelManager()
113
+
114
+ # Get all available models
115
+ models = manager.get_all_models()
116
+
117
+ # Get a specific model
118
+ model = manager.get_model("gpt-4")
119
+
120
+ # Set default model
121
+ manager.set_default_model("gpt-4")
122
+ ```
123
+
124
+ ### Processing Messages
125
+
126
+ Process conversations using the registered processors:
127
+
128
+ ```python
129
+ from appkit_assistant.backend.models import Message, MessageType
130
+
131
+ messages = [
132
+ Message(role="user", content="Hello, how are you?", type=MessageType.TEXT)
133
+ ]
134
+
135
+ async for chunk in manager.get_processor_for_model("gpt-4").process(messages, "gpt-4"):
136
+ print(f"Received: {chunk.content}")
137
+ ```
138
+
139
+ ### MCP Server Management
140
+
141
+ Manage MCP servers for tool integration:
142
+
143
+ ```python
144
+ from appkit_assistant.backend.models import MCPServer
145
+
146
+ mcp_server = MCPServer(
147
+ name="my-server",
148
+ command="python",
149
+ args=["-m", "my_mcp_server"],
150
+ headers={"Authorization": "Bearer token"}
151
+ )
152
+
153
+ # Use in processing
154
+ async for chunk in processor.process(messages, "gpt-4", mcp_servers=[mcp_server]):
155
+ # Handle response with MCP tools
156
+ pass
157
+ ```
158
+
159
+ ### UI Components
160
+
161
+ #### Assistant Interface
162
+
163
+ The main `Assistant` component provides a complete chat interface:
164
+
165
+ ```python
166
+ import appkit_assistant as assistant
167
+
168
+ def chat_page():
169
+ return assistant.Assistant()
170
+ ```
171
+
172
+ #### Individual Components
173
+
174
+ Use individual components for custom layouts:
175
+
176
+ ```python
177
+ import appkit_assistant as assistant
178
+
179
+ def custom_assistant():
180
+ return rx.vstack(
181
+ assistant.ThreadList(),
182
+ assistant.composer(),
183
+ spacing="4"
184
+ )
185
+ ```
186
+
187
+ #### MCP Server Management UI
188
+
189
+ Display and manage MCP servers:
190
+
191
+ ```python
192
+ def servers_page():
193
+ return assistant.mcp_servers_table()
194
+ ```
195
+
196
+ ---
197
+
198
+ ## 🔧 Configuration
199
+
200
+ ### AssistantConfig
201
+
202
+ Configure API keys and settings:
203
+
204
+ ```python
205
+ from appkit_assistant.configuration import AssistantConfig
206
+
207
+ config = AssistantConfig(
208
+ openai_api_key="sk-...",
209
+ openai_base_url="https://custom.openai.endpoint/v1",
210
+ perplexity_api_key="pplx-...",
211
+ google_api_key="AIza..." # For future Google integrations
212
+ )
213
+ ```
214
+
215
+ ### Processor Registration
216
+
217
+ Register processors based on available credentials:
218
+
219
+ ```python
220
+ from appkit_assistant.backend.processors import (
221
+ OpenAIChatCompletionProcessor,
222
+ PerplexityProcessor,
223
+ LoremIpsumProcessor
224
+ )
225
+
226
+ manager = ModelManager()
227
+
228
+ if config.openai_api_key:
229
+ manager.register_processor("openai", OpenAIChatCompletionProcessor(config))
230
+
231
+ if config.perplexity_api_key:
232
+ manager.register_processor("perplexity", PerplexityProcessor(config))
233
+
234
+ # Always available fallback
235
+ manager.register_processor("lorem", LoremIpsumProcessor())
236
+ ```
237
+
238
+ ---
239
+
240
+ ## 📋 API Reference
241
+
242
+ ### Core Classes
243
+
244
+ - `ModelManager` - Singleton model and processor registry
245
+ - `Processor` - Abstract base for AI processors
246
+ - `AIModel` - Model metadata and configuration
247
+ - `Message` - Conversation message structure
248
+ - `MCPServer` - MCP server configuration
249
+
250
+ ### Component API
251
+
252
+ - `Assistant` - Complete assistant interface
253
+ - `composer` - Message input component
254
+ - `ThreadList` - Conversation thread list
255
+ - `MessageComponent` - Individual message display
256
+ - `mcp_servers_table` - MCP server management table
257
+
258
+ ### State Management
259
+
260
+ - `ThreadState` - Individual thread state
261
+ - `ThreadListState` - Thread list management
262
+
263
+ ---
264
+
265
+ ## 🔒 Security
266
+
267
+ > [!IMPORTANT]
268
+ > API keys and MCP server credentials are handled securely using the appkit-commons configuration system. Never hardcode secrets in your code.
269
+
270
+ - Use `SecretStr` for sensitive configuration values
271
+ - Credentials are encrypted at rest when stored in the database
272
+ - MCP server headers support encrypted storage
273
+
274
+ ---
275
+
276
+ ## 🤝 Integration Examples
277
+
278
+ ### With AppKit User Management
279
+
280
+ Combine with appkit-user for authenticated assistants:
281
+
282
+ ```python
283
+ from appkit_user import authenticated, requires_role
284
+
285
+ @authenticated()
286
+ @requires_role("assistant_user")
287
+ def protected_assistant_page():
288
+ return assistant.Assistant()
289
+ ```
290
+
291
+ ### Custom Processor Implementation
292
+
293
+ Implement your own AI processor:
294
+
295
+ ```python
296
+ from appkit_assistant.backend.processor import Processor
297
+ from appkit_assistant.backend.models import AIModel, Chunk, Message
298
+
299
+ class CustomProcessor(Processor):
300
+ def get_supported_models(self):
301
+ return {
302
+ "custom-model": AIModel(
303
+ id="custom-model",
304
+ text="Custom AI Model",
305
+ icon="🤖"
306
+ )
307
+ }
308
+
309
+ async def process(self, messages, model_id, files=None, mcp_servers=None):
310
+ # Your AI processing logic here
311
+ yield Chunk(content="Response from custom model", type="text")
312
+ ```
313
+
314
+ ---
315
+
316
+ ## 📚 Related Components
317
+
318
+ - **[appkit-mantine](./../appkit-mantine)** - UI components used in the assistant interface
319
+ - **[appkit-user](./../appkit-user)** - User authentication and authorization
320
+ - **[appkit-commons](./../appkit-commons)** - Shared utilities and configuration
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "appkit-assistant"
3
- version = "0.7.2"
3
+ version = "0.7.4"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Jens Rehpöhler" }]
@@ -73,6 +73,8 @@ class AIModel(BaseModel):
73
73
  temperature: float = 0.05
74
74
  supports_tools: bool = False
75
75
  supports_attachments: bool = False
76
+ keywords: list[str] = []
77
+ disabled: bool = False
76
78
 
77
79
 
78
80
  class Suggestion(BaseModel):
@@ -16,6 +16,7 @@ GEMINI_2_5_FLASH: Final = AIModel(
16
16
  icon="googlegemini",
17
17
  model="gemini-2-5-flash",
18
18
  )
19
+
19
20
  LLAMA_3_2_VISION: Final = AIModel(
20
21
  id="llama32_vision_90b",
21
22
  text="Llama 3.2 Vision 90B (OnPrem)",
@@ -76,6 +77,17 @@ GPT_5: Final = AIModel(
76
77
  temperature=1,
77
78
  )
78
79
 
80
+ GPT_5_1: Final = AIModel(
81
+ id="gpt-5.1",
82
+ text="GPT 5.1",
83
+ icon="openai",
84
+ model="gpt-5.1",
85
+ stream=True,
86
+ supports_attachments=True,
87
+ supports_tools=True,
88
+ temperature=1,
89
+ )
90
+
79
91
  GPT_5_CHAT: Final = AIModel(
80
92
  id="gpt-5-chat",
81
93
  text="GPT 5 Chat",
@@ -97,6 +109,17 @@ GPT_5_MINI: Final = AIModel(
97
109
  temperature=1,
98
110
  )
99
111
 
112
+ GPT_5_1_MINI: Final = AIModel(
113
+ id="gpt-5.1-mini",
114
+ text="GPT 5.1 Mini",
115
+ icon="openai",
116
+ model="gpt-5.1-mini",
117
+ stream=True,
118
+ supports_attachments=True,
119
+ supports_tools=True,
120
+ temperature=1,
121
+ )
122
+
100
123
  GPT_5_NANO: Final = AIModel(
101
124
  id="gpt-5-nano",
102
125
  text="GPT 5 Nano",
@@ -41,6 +41,7 @@ SONAR_PRO = PerplexityAIModel(
41
41
  icon="perplexity",
42
42
  model="sonar-pro",
43
43
  stream=True,
44
+ keywords=["sonar", "perplexity"],
44
45
  )
45
46
 
46
47
  SONAR_DEEP_RESEARCH = PerplexityAIModel(
@@ -50,6 +51,7 @@ SONAR_DEEP_RESEARCH = PerplexityAIModel(
50
51
  model="sonar-deep-research",
51
52
  search_context_size=ContextSize.HIGH,
52
53
  stream=True,
54
+ keywords=["reasoning", "deep", "research", "perplexity"],
53
55
  )
54
56
 
55
57
  SONAR_REASONING = PerplexityAIModel(
@@ -59,6 +61,7 @@ SONAR_REASONING = PerplexityAIModel(
59
61
  model="sonar-reasoning",
60
62
  search_context_size=ContextSize.HIGH,
61
63
  stream=True,
64
+ keywords=["reasoning", "perplexity"],
62
65
  )
63
66
 
64
67
  ALL_MODELS = {
@@ -1,8 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: appkit-assistant
3
- Version: 0.7.2
4
- Summary: Add your description here
5
- Author: Jens Rehpöhler
6
- Requires-Python: >=3.13
7
- Requires-Dist: appkit-commons
8
- Requires-Dist: openai>=2.3.0
File without changes
@@ -1,67 +0,0 @@
1
- # Render information with a dedicated UI
2
-
3
- "Action" classes, e.g. something like
4
-
5
- ```python
6
- class Calender(AssitantAction):
7
- name: str
8
- description: str
9
- parameters: list[dict]
10
-
11
- def __init__(self):
12
- name = "showCalendarMeeting"
13
- description = "Displays calendar meeting information"
14
- parameters = [
15
- {
16
- name: "date",
17
- type: "string",
18
- description: "Meeting date (YYYY-MM-DD)",
19
- required: true
20
- },
21
- {
22
- name: "time",
23
- type: "string",
24
- description: "Meeting time (HH:mm)",
25
- required: true
26
- },
27
- {
28
- name: "meetingName",
29
- type: "string",
30
- description: "Name of the meeting",
31
- required: false
32
- }
33
- ]
34
-
35
- def render(status, **kwargs):
36
- if status == Status.LOADING:
37
- return loading_view()
38
- else:
39
- return calendar_entry(**kwargs)
40
- ```
41
-
42
- ## Next Actions
43
-
44
- ```python
45
- class Suggestion(AssitantAction):
46
- name: str
47
- description: str
48
- parameters: list[dict]
49
-
50
- def __init__(self):
51
- name = "showSuggestion"
52
- description = "Displays suggestions"
53
- parameters = [
54
- {
55
- name: "suggestion",
56
- type: "string",
57
- description: "Suggestion what how to continue or what to ask next",
58
- required: false
59
- }
60
- ]
61
-
62
- def render(status, **kwargs):
63
- if status == Status.LOADING:
64
- return loading_view()
65
- else:
66
- return render_suggestion(**kwargs)
67
- ```