appkit-assistant 0.9.0__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- appkit_assistant/backend/model_manager.py +4 -3
- appkit_assistant/backend/models.py +50 -3
- appkit_assistant/backend/processors/openai_base.py +117 -1
- appkit_assistant/backend/processors/openai_responses_processor.py +13 -9
- appkit_assistant/backend/repositories.py +141 -1
- appkit_assistant/components/__init__.py +2 -4
- appkit_assistant/components/mcp_server_dialogs.py +7 -2
- appkit_assistant/components/message.py +3 -3
- appkit_assistant/components/thread.py +8 -16
- appkit_assistant/components/threadlist.py +42 -29
- appkit_assistant/components/tools_modal.py +1 -1
- appkit_assistant/configuration.py +1 -0
- appkit_assistant/state/system_prompt_state.py +2 -4
- appkit_assistant/state/thread_list_state.py +271 -0
- appkit_assistant/state/thread_state.py +525 -608
- {appkit_assistant-0.9.0.dist-info → appkit_assistant-0.11.0.dist-info}/METADATA +2 -2
- appkit_assistant-0.11.0.dist-info/RECORD +28 -0
- appkit_assistant/backend/processors/ai_models.py +0 -132
- appkit_assistant/backend/processors/knowledgeai_processor.py +0 -275
- appkit_assistant/backend/system_prompt.py +0 -56
- appkit_assistant-0.9.0.dist-info/RECORD +0 -30
- {appkit_assistant-0.9.0.dist-info → appkit_assistant-0.11.0.dist-info}/WHEEL +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: appkit-assistant
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.11.0
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/jenreh/appkit
|
|
6
6
|
Project-URL: Documentation, https://github.com/jenreh/appkit/tree/main/docs
|
|
@@ -20,7 +20,7 @@ Requires-Dist: appkit-commons
|
|
|
20
20
|
Requires-Dist: appkit-mantine
|
|
21
21
|
Requires-Dist: appkit-ui
|
|
22
22
|
Requires-Dist: openai>=2.3.0
|
|
23
|
-
Requires-Dist: reflex>=0.8.
|
|
23
|
+
Requires-Dist: reflex>=0.8.22
|
|
24
24
|
Description-Content-Type: text/markdown
|
|
25
25
|
|
|
26
26
|
# appkit-assistant
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
appkit_assistant/configuration.py,sha256=3nBL-dEGYsvSnRDNpxtikZn4QMMkMlNbb4VqGOPolJI,346
|
|
2
|
+
appkit_assistant/backend/model_manager.py,sha256=7f65UbZ51qOYM6F73eKbZ8hMnCzxdanFZKgdKF8bbCk,4366
|
|
3
|
+
appkit_assistant/backend/models.py,sha256=-sr10xChq_lMlLpt7Jbmq4VGweuW4_UUwYsRtIY7HFY,6015
|
|
4
|
+
appkit_assistant/backend/processor.py,sha256=dhBg3pYXdmpj9JtAJc-d83SeUA1NsICj1C_YI0M2QYE,1289
|
|
5
|
+
appkit_assistant/backend/repositories.py,sha256=ueGreZH2ioefwNh1Et5RivDB8IkSLz76aajrWleAVEU,11232
|
|
6
|
+
appkit_assistant/backend/system_prompt_cache.py,sha256=fC2GdTqObfcJdt7nzTUE99GGzX9nLZPCGBUOENcBE5Q,5083
|
|
7
|
+
appkit_assistant/backend/processors/lorem_ipsum_processor.py,sha256=j-MZhzibrtabzbGB2Pf4Xcdlr1TlTYWNRdE22LsDp9Q,4635
|
|
8
|
+
appkit_assistant/backend/processors/openai_base.py,sha256=IQS4m375BOD_K0PBFOk4i7wL1z5MEiPFxbSmC-HBNgU,4414
|
|
9
|
+
appkit_assistant/backend/processors/openai_chat_completion_processor.py,sha256=nTxouoXDU6VcQr8UhA2KiMNt60KvIwM8cH9Z8lo4dXY,4218
|
|
10
|
+
appkit_assistant/backend/processors/openai_responses_processor.py,sha256=Ns8owrvimtZofyyzhoTgi2t_P0feEgLzooJVfCxC3kw,18800
|
|
11
|
+
appkit_assistant/backend/processors/perplexity_processor.py,sha256=weHukv78MSCF_uSCKGSMpNYHsET9OB8IhpvUiMfPQ8A,3355
|
|
12
|
+
appkit_assistant/components/__init__.py,sha256=5tzK5VjX9FGKK-qTUHLjr8-ohT4ykb4a-zC-I3yeRLY,916
|
|
13
|
+
appkit_assistant/components/composer.py,sha256=F4VPxWp4P6fvTW4rQ7S-YWn0eje5c3jGsWrpC1aewss,3885
|
|
14
|
+
appkit_assistant/components/composer_key_handler.py,sha256=KyZYyhxzFR8DH_7F_DrvTFNT6v5kG6JihlGTmCv2wv0,1028
|
|
15
|
+
appkit_assistant/components/mcp_server_dialogs.py,sha256=wRfQQKJ28z-kz2aO7R2N5D958y7cU9qdx9R_ZuRNS0I,11441
|
|
16
|
+
appkit_assistant/components/mcp_server_table.py,sha256=1dziN7hDDvE8Y3XcdIs0wUPv1H64kP9gRAEjgH9Yvzo,2323
|
|
17
|
+
appkit_assistant/components/message.py,sha256=Tr19CsRqKiMP_fhSByVlUtigeXF13duR6Rp2mQ08IeQ,9636
|
|
18
|
+
appkit_assistant/components/system_prompt_editor.py,sha256=REl33zFmcpYRe9kxvFrBRYg40dV4L4FtVC_3ibLsmrU,2940
|
|
19
|
+
appkit_assistant/components/thread.py,sha256=rYM4LA6PVdEvZ5oz5ZtheVfQfFuvTXTIskTt15kI1kg,7886
|
|
20
|
+
appkit_assistant/components/threadlist.py,sha256=1xVakSTQYi5-wgED3fTJVggeIjL_fkthehce0wKUYtM,4896
|
|
21
|
+
appkit_assistant/components/tools_modal.py,sha256=12iiAVahy3j4JwjGfRlegVEa4ePhGsEu7Bq92JLn1ZI,3353
|
|
22
|
+
appkit_assistant/state/mcp_server_state.py,sha256=L5r3Bd_OzFh_kgWH81cKVBWhE3Ys6m8TsJs2vadCWhU,7945
|
|
23
|
+
appkit_assistant/state/system_prompt_state.py,sha256=cNjjCOHir5sYgsmd5Cv-lAkAIYesKr_jbgQD1Jyuqu0,6620
|
|
24
|
+
appkit_assistant/state/thread_list_state.py,sha256=fQcMy4SZbum1YcbXJ7XFAsZRIeFS-l8BFeNVy01M1Iw,9445
|
|
25
|
+
appkit_assistant/state/thread_state.py,sha256=qImM6x49--0XYonzwgExrpMy7lcgbKtToH_441iP9Gk,29050
|
|
26
|
+
appkit_assistant-0.11.0.dist-info/METADATA,sha256=rh7MGcMLx80pKwkcoWg5jvYpe6XF8FNIsK2MAeb3LtQ,8934
|
|
27
|
+
appkit_assistant-0.11.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
28
|
+
appkit_assistant-0.11.0.dist-info/RECORD,,
|
|
@@ -1,132 +0,0 @@
|
|
|
1
|
-
from typing import Final
|
|
2
|
-
|
|
3
|
-
from appkit_assistant.backend.models import AIModel
|
|
4
|
-
|
|
5
|
-
DEFAULT: Final = AIModel(
|
|
6
|
-
id="default",
|
|
7
|
-
text="Default (GPT 4.1 Mini)",
|
|
8
|
-
icon="avvia_intelligence",
|
|
9
|
-
model="default",
|
|
10
|
-
stream=True,
|
|
11
|
-
)
|
|
12
|
-
|
|
13
|
-
GEMINI_2_5_FLASH: Final = AIModel(
|
|
14
|
-
id="gemini-2-5-flash",
|
|
15
|
-
text="Gemini 2.5 Flash",
|
|
16
|
-
icon="googlegemini",
|
|
17
|
-
model="gemini-2-5-flash",
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
LLAMA_3_2_VISION: Final = AIModel(
|
|
21
|
-
id="llama32_vision_90b",
|
|
22
|
-
text="Llama 3.2 Vision 90B (OnPrem)",
|
|
23
|
-
icon="ollama",
|
|
24
|
-
model="lllama32_vision_90b",
|
|
25
|
-
)
|
|
26
|
-
|
|
27
|
-
GPT_4o: Final = AIModel(
|
|
28
|
-
id="gpt-4o",
|
|
29
|
-
text="GPT 4o",
|
|
30
|
-
icon="openai",
|
|
31
|
-
model="gpt-4o",
|
|
32
|
-
stream=True,
|
|
33
|
-
supports_attachments=True,
|
|
34
|
-
supports_tools=True,
|
|
35
|
-
)
|
|
36
|
-
|
|
37
|
-
GPT_4_1: Final = AIModel(
|
|
38
|
-
id="gpt-4.1",
|
|
39
|
-
text="GPT-4.1",
|
|
40
|
-
icon="openai",
|
|
41
|
-
model="gpt-4.1",
|
|
42
|
-
stream=True,
|
|
43
|
-
supports_attachments=True,
|
|
44
|
-
supports_tools=True,
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
O3: Final = AIModel(
|
|
48
|
-
id="o3",
|
|
49
|
-
text="o3 Reasoning",
|
|
50
|
-
icon="openai",
|
|
51
|
-
model="o3",
|
|
52
|
-
temperature=1,
|
|
53
|
-
stream=True,
|
|
54
|
-
supports_attachments=True,
|
|
55
|
-
supports_tools=True,
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
O4_MINI: Final = AIModel(
|
|
59
|
-
id="o4-mini",
|
|
60
|
-
text="o4 Mini Reasoning",
|
|
61
|
-
icon="openai",
|
|
62
|
-
model="o4-mini",
|
|
63
|
-
stream=True,
|
|
64
|
-
supports_attachments=True,
|
|
65
|
-
supports_tools=True,
|
|
66
|
-
temperature=1,
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
GPT_5: Final = AIModel(
|
|
70
|
-
id="gpt-5",
|
|
71
|
-
text="GPT 5",
|
|
72
|
-
icon="openai",
|
|
73
|
-
model="gpt-5",
|
|
74
|
-
stream=True,
|
|
75
|
-
supports_attachments=True,
|
|
76
|
-
supports_tools=True,
|
|
77
|
-
temperature=1,
|
|
78
|
-
)
|
|
79
|
-
|
|
80
|
-
GPT_5_1: Final = AIModel(
|
|
81
|
-
id="gpt-5.1",
|
|
82
|
-
text="GPT 5.1",
|
|
83
|
-
icon="openai",
|
|
84
|
-
model="gpt-5.1",
|
|
85
|
-
stream=True,
|
|
86
|
-
supports_attachments=True,
|
|
87
|
-
supports_tools=True,
|
|
88
|
-
temperature=1,
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
GPT_5_CHAT: Final = AIModel(
|
|
92
|
-
id="gpt-5-chat",
|
|
93
|
-
text="GPT 5 Chat",
|
|
94
|
-
icon="openai",
|
|
95
|
-
model="gpt-5-chat",
|
|
96
|
-
stream=True,
|
|
97
|
-
supports_attachments=True,
|
|
98
|
-
supports_tools=False,
|
|
99
|
-
)
|
|
100
|
-
|
|
101
|
-
GPT_5_MINI: Final = AIModel(
|
|
102
|
-
id="gpt-5-mini",
|
|
103
|
-
text="GPT 5 Mini",
|
|
104
|
-
icon="openai",
|
|
105
|
-
model="gpt-5-mini",
|
|
106
|
-
stream=True,
|
|
107
|
-
supports_attachments=True,
|
|
108
|
-
supports_tools=True,
|
|
109
|
-
temperature=1,
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
GPT_5_1_MINI: Final = AIModel(
|
|
113
|
-
id="gpt-5.1-mini",
|
|
114
|
-
text="GPT 5.1 Mini",
|
|
115
|
-
icon="openai",
|
|
116
|
-
model="gpt-5.1-mini",
|
|
117
|
-
stream=True,
|
|
118
|
-
supports_attachments=True,
|
|
119
|
-
supports_tools=True,
|
|
120
|
-
temperature=1,
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
GPT_5_NANO: Final = AIModel(
|
|
124
|
-
id="gpt-5-nano",
|
|
125
|
-
text="GPT 5 Nano",
|
|
126
|
-
icon="openai",
|
|
127
|
-
model="gpt-5-nano",
|
|
128
|
-
stream=True,
|
|
129
|
-
supports_attachments=True,
|
|
130
|
-
supports_tools=True,
|
|
131
|
-
temperature=1,
|
|
132
|
-
)
|
|
@@ -1,275 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import logging
|
|
3
|
-
from collections.abc import AsyncGenerator
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
from openai import AsyncOpenAI, AsyncStream
|
|
7
|
-
from openai.types.chat import ChatCompletionMessageParam
|
|
8
|
-
|
|
9
|
-
from appkit_assistant.backend.models import (
|
|
10
|
-
AIModel,
|
|
11
|
-
Chunk,
|
|
12
|
-
ChunkType,
|
|
13
|
-
MCPServer,
|
|
14
|
-
Message,
|
|
15
|
-
MessageType,
|
|
16
|
-
)
|
|
17
|
-
from appkit_assistant.backend.processor import Processor
|
|
18
|
-
|
|
19
|
-
logger = logging.getLogger(__name__)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class KnowledgeAIProcessor(Processor):
|
|
23
|
-
"""Processor that generates Knowledge AI text responses."""
|
|
24
|
-
|
|
25
|
-
def __init__(
|
|
26
|
-
self,
|
|
27
|
-
server: str,
|
|
28
|
-
api_key: str,
|
|
29
|
-
models: dict[str, AIModel] | None = None,
|
|
30
|
-
with_projects: bool = False,
|
|
31
|
-
) -> None:
|
|
32
|
-
"""Initialize the Knowledge AI processor."""
|
|
33
|
-
super().__init__()
|
|
34
|
-
self.api_key = api_key
|
|
35
|
-
self.server = server
|
|
36
|
-
self.models = models
|
|
37
|
-
self.with_projects = with_projects
|
|
38
|
-
|
|
39
|
-
if with_projects:
|
|
40
|
-
self._initialize_models()
|
|
41
|
-
|
|
42
|
-
def _initialize_models(self) -> None:
|
|
43
|
-
"""Initialize the models supported by this processor."""
|
|
44
|
-
try:
|
|
45
|
-
from knai_avvia.backend.models import Project # noqa: PLC0415
|
|
46
|
-
from knai_avvia.backend.project_repository import ( # noqa: PLC0415
|
|
47
|
-
load_projects, # noqa: E402
|
|
48
|
-
)
|
|
49
|
-
except ImportError as e:
|
|
50
|
-
logger.error("knai_avvia package not available: %s", e)
|
|
51
|
-
self.models = {}
|
|
52
|
-
return
|
|
53
|
-
|
|
54
|
-
try:
|
|
55
|
-
projects: list[Project] = asyncio.run(
|
|
56
|
-
load_projects(
|
|
57
|
-
url=self.server,
|
|
58
|
-
api_key=self.api_key,
|
|
59
|
-
)
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
if self.models is None:
|
|
63
|
-
self.models = {}
|
|
64
|
-
|
|
65
|
-
for project in projects:
|
|
66
|
-
project_key = f"{project.id}"
|
|
67
|
-
self.models[project_key] = AIModel(
|
|
68
|
-
id=project_key,
|
|
69
|
-
text=project.name,
|
|
70
|
-
icon="avvia_intelligence",
|
|
71
|
-
)
|
|
72
|
-
except Exception as e:
|
|
73
|
-
logger.error("Failed to load projects from Knowledge AI: %s", e)
|
|
74
|
-
self.models = {}
|
|
75
|
-
|
|
76
|
-
async def process(
|
|
77
|
-
self,
|
|
78
|
-
messages: list[Message],
|
|
79
|
-
model_id: str,
|
|
80
|
-
files: list[str] | None = None, # noqa: ARG002
|
|
81
|
-
mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
|
|
82
|
-
) -> AsyncGenerator[Chunk, None]:
|
|
83
|
-
try:
|
|
84
|
-
from knai_avvia.backend.chat_client import chat_completion # noqa: PLC0415
|
|
85
|
-
except ImportError as e:
|
|
86
|
-
logger.error("knai_avvia package not available: %s", e)
|
|
87
|
-
raise ImportError(
|
|
88
|
-
"knai_avvia package is required for KnowledgeAIProcessor"
|
|
89
|
-
) from e
|
|
90
|
-
|
|
91
|
-
if model_id not in self.models:
|
|
92
|
-
logger.error("Model %s not supported by OpenAI processor", model_id)
|
|
93
|
-
raise ValueError(f"Model {model_id} not supported by OpenAI processor")
|
|
94
|
-
|
|
95
|
-
chat_messages = self._convert_messages(messages)
|
|
96
|
-
|
|
97
|
-
try:
|
|
98
|
-
result = await chat_completion(
|
|
99
|
-
api_key=self.api_key,
|
|
100
|
-
server=self.server,
|
|
101
|
-
project_id=int(model_id),
|
|
102
|
-
question=messages[-2].text, # last human message
|
|
103
|
-
history=chat_messages,
|
|
104
|
-
temperature=0.05,
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
if result.answer:
|
|
108
|
-
yield Chunk(
|
|
109
|
-
type=ChunkType.TEXT,
|
|
110
|
-
text=result.answer,
|
|
111
|
-
chunk_metadata={
|
|
112
|
-
"source": "knowledgeai",
|
|
113
|
-
"project_id": model_id,
|
|
114
|
-
"streaming": str(False),
|
|
115
|
-
},
|
|
116
|
-
)
|
|
117
|
-
except Exception as e:
|
|
118
|
-
raise e
|
|
119
|
-
|
|
120
|
-
def get_supported_models(self) -> dict[str, AIModel]:
|
|
121
|
-
return self.models if self.api_key else {}
|
|
122
|
-
|
|
123
|
-
def _convert_messages(self, messages: list[Message]) -> list[dict[str, str]]:
|
|
124
|
-
return [
|
|
125
|
-
{"role": "Human", "message": msg.text}
|
|
126
|
-
if msg.type == MessageType.HUMAN
|
|
127
|
-
else {"role": "AI", "message": msg.text}
|
|
128
|
-
for msg in (messages or [])
|
|
129
|
-
if msg.type in (MessageType.HUMAN, MessageType.ASSISTANT)
|
|
130
|
-
]
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
class KnowledgeAIOpenAIProcessor(Processor):
|
|
134
|
-
"""Processor that generates Knowledge AI text responses."""
|
|
135
|
-
|
|
136
|
-
def __init__(
|
|
137
|
-
self,
|
|
138
|
-
server: str,
|
|
139
|
-
api_key: str,
|
|
140
|
-
models: dict[str, AIModel] | None = None,
|
|
141
|
-
with_projects: bool = False,
|
|
142
|
-
) -> None:
|
|
143
|
-
"""Initialize the Knowledge AI processor."""
|
|
144
|
-
self.api_key = api_key
|
|
145
|
-
self.server = server
|
|
146
|
-
self.models = models
|
|
147
|
-
self.with_projects = with_projects
|
|
148
|
-
self.client = (
|
|
149
|
-
AsyncOpenAI(api_key=self.api_key, base_url=self.server + "/api/openai/v1")
|
|
150
|
-
if self.api_key
|
|
151
|
-
else None
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
if self.with_projects:
|
|
155
|
-
self._initialize_models()
|
|
156
|
-
|
|
157
|
-
def _initialize_models(self) -> None:
|
|
158
|
-
"""Initialize the models supported by this processor."""
|
|
159
|
-
try:
|
|
160
|
-
from knai_avvia.backend.models import Project # noqa: PLC0415
|
|
161
|
-
from knai_avvia.backend.project_repository import ( # noqa: PLC0415
|
|
162
|
-
load_projects, # noqa: E402
|
|
163
|
-
)
|
|
164
|
-
except ImportError as e:
|
|
165
|
-
logger.error("knai_avvia package not available: %s", e)
|
|
166
|
-
self.models = {}
|
|
167
|
-
return
|
|
168
|
-
|
|
169
|
-
try:
|
|
170
|
-
projects: list[Project] = asyncio.run(
|
|
171
|
-
load_projects(
|
|
172
|
-
url=self.server,
|
|
173
|
-
api_key=self.api_key,
|
|
174
|
-
)
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
if self.models is None:
|
|
178
|
-
self.models = {}
|
|
179
|
-
|
|
180
|
-
for project in projects:
|
|
181
|
-
project_key = f"{project.id}"
|
|
182
|
-
self.models[project_key] = AIModel(
|
|
183
|
-
id=project_key,
|
|
184
|
-
project_id=project.id,
|
|
185
|
-
text=project.name,
|
|
186
|
-
icon="avvia_intelligence",
|
|
187
|
-
stream=False,
|
|
188
|
-
)
|
|
189
|
-
except Exception as e:
|
|
190
|
-
logger.error("Failed to load projects from Knowledge AI: %s", e)
|
|
191
|
-
self.models = {}
|
|
192
|
-
|
|
193
|
-
async def process(
|
|
194
|
-
self,
|
|
195
|
-
messages: list[Message],
|
|
196
|
-
model_id: str,
|
|
197
|
-
files: list[str] | None = None, # noqa: ARG002
|
|
198
|
-
mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
|
|
199
|
-
) -> AsyncGenerator[Chunk, None]:
|
|
200
|
-
if not self.client:
|
|
201
|
-
raise ValueError("KnowledgeAI OpenAI Client not initialized.")
|
|
202
|
-
|
|
203
|
-
model = self.models.get(model_id)
|
|
204
|
-
if not model:
|
|
205
|
-
raise ValueError(
|
|
206
|
-
"Model %s not supported by KnowledgeAI processor", model_id
|
|
207
|
-
)
|
|
208
|
-
|
|
209
|
-
chat_messages = self._convert_messages_to_openai_format(messages)
|
|
210
|
-
|
|
211
|
-
try:
|
|
212
|
-
session_params: dict[str, Any] = {
|
|
213
|
-
"model": model.model if model.project_id else model.id,
|
|
214
|
-
"messages": chat_messages[:-1],
|
|
215
|
-
"stream": model.stream,
|
|
216
|
-
}
|
|
217
|
-
if model.project_id:
|
|
218
|
-
session_params["user"] = str(model.project_id)
|
|
219
|
-
|
|
220
|
-
session = await self.client.chat.completions.create(**session_params)
|
|
221
|
-
|
|
222
|
-
if isinstance(session, AsyncStream):
|
|
223
|
-
async for event in session:
|
|
224
|
-
if event.choices and event.choices[0].delta:
|
|
225
|
-
content = event.choices[0].delta.content
|
|
226
|
-
if content:
|
|
227
|
-
yield Chunk(
|
|
228
|
-
type=ChunkType.TEXT,
|
|
229
|
-
text=content,
|
|
230
|
-
chunk_metadata={
|
|
231
|
-
"source": "knowledgeai_openai",
|
|
232
|
-
"streaming": str(True),
|
|
233
|
-
"model_id": model_id,
|
|
234
|
-
},
|
|
235
|
-
)
|
|
236
|
-
elif session.choices and session.choices[0].message:
|
|
237
|
-
content = session.choices[0].message.content
|
|
238
|
-
if content:
|
|
239
|
-
logger.debug("Content:\n%s", content)
|
|
240
|
-
yield Chunk(
|
|
241
|
-
type=ChunkType.TEXT,
|
|
242
|
-
text=content,
|
|
243
|
-
chunk_metadata={
|
|
244
|
-
"source": "knowledgeai_openai",
|
|
245
|
-
"streaming": str(False),
|
|
246
|
-
"model_id": model_id,
|
|
247
|
-
},
|
|
248
|
-
)
|
|
249
|
-
except Exception as e:
|
|
250
|
-
logger.exception("Failed to get response from OpenAI: %s", e)
|
|
251
|
-
raise e
|
|
252
|
-
|
|
253
|
-
def get_supported_models(self) -> dict[str, AIModel]:
|
|
254
|
-
return self.models if self.api_key else {}
|
|
255
|
-
|
|
256
|
-
def _convert_messages_to_openai_format(
|
|
257
|
-
self, messages: list[Message]
|
|
258
|
-
) -> list[ChatCompletionMessageParam]:
|
|
259
|
-
formatted: list[ChatCompletionMessageParam] = []
|
|
260
|
-
role_map = {
|
|
261
|
-
MessageType.HUMAN: "user",
|
|
262
|
-
MessageType.SYSTEM: "system",
|
|
263
|
-
MessageType.ASSISTANT: "assistant",
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
for msg in messages or []:
|
|
267
|
-
if msg.type not in role_map:
|
|
268
|
-
continue
|
|
269
|
-
role = role_map[msg.type]
|
|
270
|
-
if formatted and role != "system" and formatted[-1]["role"] == role:
|
|
271
|
-
formatted[-1]["content"] = formatted[-1]["content"] + "\n\n" + msg.text
|
|
272
|
-
else:
|
|
273
|
-
formatted.append({"role": role, "content": msg.text})
|
|
274
|
-
|
|
275
|
-
return formatted
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
from typing import Final
|
|
2
|
-
|
|
3
|
-
SYSTEM_PROMPT: Final[str] = """
|
|
4
|
-
# System Prompt: Kontextbewusster, Tool-orientierter Chat-Client
|
|
5
|
-
|
|
6
|
-
## 1) Auftrag
|
|
7
|
-
Interpretiere Benutzereingaben semantisch, berücksichtige Kontext (Verlauf, Metadaten, Projekte) und führe die geeignetsten Tools aus, um präzise, nachvollziehbare Ergebnisse zu liefern.
|
|
8
|
-
|
|
9
|
-
## 2) Prioritäten
|
|
10
|
-
1. Korrektheit und Prägnanz vor Länge.
|
|
11
|
-
2. Tool-Einsatz, wenn verfügbar und sinnvoll; ansonsten fundierte Eigenableitung.
|
|
12
|
-
3. Ergebnisse stets direkt anzeigen (kein Warten/Platzhalter).
|
|
13
|
-
|
|
14
|
-
## 3) Ausgabeformate
|
|
15
|
-
- **Code:** In Markdown-Codeblöcken mit korrektem Sprach-Tag.
|
|
16
|
-
- **Diagramme:** Immer in Mermaid-Syntax als korrekter Markdown Source.
|
|
17
|
-
- **Analysen/Vergleiche:** Datengetrieben; Tabellen verwenden.
|
|
18
|
-
- **Bilder (wichtig):** Mit Bilderzeugungs-Tools generieren und **immer inline** im Chat anzeigen. Bei realen Personen nur nach vorgängiger Zustimmung.
|
|
19
|
-
|
|
20
|
-
## 4) Tool-Nutzung
|
|
21
|
-
- Wähle genau **ein** primäres Tool pro Aufgabe (falls mehrere möglich, wähle das mit größtem Nutzen).
|
|
22
|
-
- Nutze Capability-Deskriptoren, führe Tool(s) deterministisch aus, zeige Output unmittelbar.
|
|
23
|
-
- Exploratives Vorgehen ist erlaubt, sofern Ziel und Kontext klar sind.
|
|
24
|
-
- Falls kein Tool passt: direkt antworten (strukturierte Begründung implizit, nicht ausgeben).
|
|
25
|
-
|
|
26
|
-
{mcp_prompts}
|
|
27
|
-
|
|
28
|
-
## 5) Kontext
|
|
29
|
-
- Berücksichtige durchgehend Gesprächsverlauf, Nutzerrolle, Organisation und laufende Projekte.
|
|
30
|
-
- Halte Kohärenz über mehrere Turns; vermeide Wiederholungen.
|
|
31
|
-
|
|
32
|
-
## 6) Fehler- & Ausnahmebehandlung
|
|
33
|
-
- **Toolfehler/Unverfügbarkeit:** Kurz informieren und sofort eine belastbare Alternative liefern (z. B. lokale Schätzung/Analyse).
|
|
34
|
-
- **Mehrdeutigkeit:** Triff eine nachvollziehbare Annahme und liefere ein vollständiges Ergebnis.
|
|
35
|
-
- **Kein geeignetes Tool:** Antwort mit eigener Inferenz, klar strukturiert.
|
|
36
|
-
|
|
37
|
-
## 7) Qualitätskriterien
|
|
38
|
-
- Präzise, testbare Aussagen; wenn sinnvoll, mit Zahlen/Tabellen.
|
|
39
|
-
- Klare Struktur (Überschriften, Listen, Tabellen, Codeblöcke, Diagramme).
|
|
40
|
-
- Konsistente Terminologie; keine redundanten Passagen.
|
|
41
|
-
|
|
42
|
-
## 8) Beispiele (Format)
|
|
43
|
-
```python
|
|
44
|
-
def hello_world():
|
|
45
|
-
print("Hello, world!")
|
|
46
|
-
```
|
|
47
|
-
```mermaid
|
|
48
|
-
flowchart TD
|
|
49
|
-
A["LLM/Chat-Client"] --> B["MCP Client"]
|
|
50
|
-
B --> C{{"Transport"}}
|
|
51
|
-
C -->|stdio| D["FastMCP Server (lokal)"]
|
|
52
|
-
C -->|http| E["FastMCP Server (remote)"]
|
|
53
|
-
D --> F["@mcp.tool web_search()"]
|
|
54
|
-
E --> F
|
|
55
|
-
```
|
|
56
|
-
"""
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
appkit_assistant/configuration.py,sha256=u05fg5qmPyHhhSSOxhwwFFeiTPqYpe3oINlcv6iY0cI,296
|
|
2
|
-
appkit_assistant/backend/model_manager.py,sha256=LrRqjCfT4Fo1-DCblcn40nPbyT0TjZuFiVcNEstpMJ0,4365
|
|
3
|
-
appkit_assistant/backend/models.py,sha256=z0sUH7KPjanINAk8RgOKylC-MPg93YEJgek51DT0y5U,4206
|
|
4
|
-
appkit_assistant/backend/processor.py,sha256=dhBg3pYXdmpj9JtAJc-d83SeUA1NsICj1C_YI0M2QYE,1289
|
|
5
|
-
appkit_assistant/backend/repositories.py,sha256=0UdTmZmC5Ur1srb5viol8dWnrbyQSbpmh2LP5MPDx-0,6208
|
|
6
|
-
appkit_assistant/backend/system_prompt.py,sha256=PmqGToI0beRaQB-36ZjTolWC4fne40TjptIft0LkCrQ,2395
|
|
7
|
-
appkit_assistant/backend/system_prompt_cache.py,sha256=fC2GdTqObfcJdt7nzTUE99GGzX9nLZPCGBUOENcBE5Q,5083
|
|
8
|
-
appkit_assistant/backend/processors/ai_models.py,sha256=KG6nyeb22BAI_MxPPFY9uBts-B1BvDFF6C2BsF_RFk8,2517
|
|
9
|
-
appkit_assistant/backend/processors/knowledgeai_processor.py,sha256=0i5E71y7zwBc5mW_8rKuTjeNk_GLv2kfXPPwKNBaazA,9513
|
|
10
|
-
appkit_assistant/backend/processors/lorem_ipsum_processor.py,sha256=j-MZhzibrtabzbGB2Pf4Xcdlr1TlTYWNRdE22LsDp9Q,4635
|
|
11
|
-
appkit_assistant/backend/processors/openai_base.py,sha256=f8X7yMWRBiRsS8vDrbEBY-oR1luzJMn8F7LcAlGUKko,2272
|
|
12
|
-
appkit_assistant/backend/processors/openai_chat_completion_processor.py,sha256=nTxouoXDU6VcQr8UhA2KiMNt60KvIwM8cH9Z8lo4dXY,4218
|
|
13
|
-
appkit_assistant/backend/processors/openai_responses_processor.py,sha256=VvkIDcWTCis34ylLoYfWS1HYBO8Rs8f61hr2xA7j0kE,18681
|
|
14
|
-
appkit_assistant/backend/processors/perplexity_processor.py,sha256=weHukv78MSCF_uSCKGSMpNYHsET9OB8IhpvUiMfPQ8A,3355
|
|
15
|
-
appkit_assistant/components/__init__.py,sha256=fgXWhM_i-fOUwWhASgmyuPWfV2Oh-tBkysAtTAxm0C8,877
|
|
16
|
-
appkit_assistant/components/composer.py,sha256=F4VPxWp4P6fvTW4rQ7S-YWn0eje5c3jGsWrpC1aewss,3885
|
|
17
|
-
appkit_assistant/components/composer_key_handler.py,sha256=KyZYyhxzFR8DH_7F_DrvTFNT6v5kG6JihlGTmCv2wv0,1028
|
|
18
|
-
appkit_assistant/components/mcp_server_dialogs.py,sha256=zyJARtO_wjGWMSqn2JSvUujWMOVYAAn_Mqvlea-DRgw,11314
|
|
19
|
-
appkit_assistant/components/mcp_server_table.py,sha256=1dziN7hDDvE8Y3XcdIs0wUPv1H64kP9gRAEjgH9Yvzo,2323
|
|
20
|
-
appkit_assistant/components/message.py,sha256=EyYM33Z2RmxKotDsHdj6qtHlA2ofV74MYiFNJ-pgRFE,9624
|
|
21
|
-
appkit_assistant/components/system_prompt_editor.py,sha256=REl33zFmcpYRe9kxvFrBRYg40dV4L4FtVC_3ibLsmrU,2940
|
|
22
|
-
appkit_assistant/components/thread.py,sha256=TcK1wl4Zr7HnZDqyLjKfESqmMyzOMzBq5Gr-0CT9Kco,7997
|
|
23
|
-
appkit_assistant/components/threadlist.py,sha256=NKB9GshghdSlUY2hVIRdhX9nxd6mxJeII4bTfpfNZUU,4182
|
|
24
|
-
appkit_assistant/components/tools_modal.py,sha256=gThgOzYa_r74IHWEKxmmT85c2MiHLIhOFtiV9IT_r3E,3355
|
|
25
|
-
appkit_assistant/state/mcp_server_state.py,sha256=L5r3Bd_OzFh_kgWH81cKVBWhE3Ys6m8TsJs2vadCWhU,7945
|
|
26
|
-
appkit_assistant/state/system_prompt_state.py,sha256=w6icp-ehDUQl7eluZSOGnCNHqeS-ayEuDbK2FhQFysY,6728
|
|
27
|
-
appkit_assistant/state/thread_state.py,sha256=h8jmE0hZmSWJ5-D4EqxBTO0ydBSNa9IxUTa5tfYdkMk,32618
|
|
28
|
-
appkit_assistant-0.9.0.dist-info/METADATA,sha256=qCMDBHEI80QqkZdXrShsKhinAcpldVG-yuBJVyElmEw,8933
|
|
29
|
-
appkit_assistant-0.9.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
30
|
-
appkit_assistant-0.9.0.dist-info/RECORD,,
|
|
File without changes
|