appkit-assistant 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
+ from __future__ import annotations
2
+
1
3
  import logging
2
4
  import threading
3
- from typing import Optional
4
5
 
5
6
  from appkit_assistant.backend.models import AIModel
6
7
  from appkit_assistant.backend.processor import Processor
@@ -11,13 +12,13 @@ logger = logging.getLogger(__name__)
11
12
  class ModelManager:
12
13
  """Singleton service manager for AI processing services."""
13
14
 
14
- _instance: Optional["ModelManager"] = None
15
+ _instance: ModelManager | None = None
15
16
  _lock = threading.Lock()
16
17
  _default_model_id = (
17
18
  None # Default model ID will be set to the first registered model
18
19
  )
19
20
 
20
- def __new__(cls) -> "ModelManager":
21
+ def __new__(cls) -> ModelManager:
21
22
  if cls._instance is None:
22
23
  with cls._lock:
23
24
  if cls._instance is None:
@@ -5,7 +5,7 @@ OpenAI processor for generating AI responses using OpenAI's API.
5
5
  import logging
6
6
  from abc import ABC, abstractmethod
7
7
  from collections.abc import AsyncGenerator
8
- from typing import Any
8
+ from typing import Any, Final
9
9
 
10
10
  from openai import AsyncAzureOpenAI, AsyncOpenAI
11
11
 
@@ -19,6 +19,122 @@ from appkit_assistant.backend.processor import Processor
19
19
 
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
+ DEFAULT: Final = AIModel(
23
+ id="default",
24
+ text="Default (GPT 4.1 Mini)",
25
+ icon="avvia_intelligence",
26
+ model="default",
27
+ stream=True,
28
+ )
29
+
30
+ GPT_4o: Final = AIModel(
31
+ id="gpt-4o",
32
+ text="GPT 4o",
33
+ icon="openai",
34
+ model="gpt-4o",
35
+ stream=True,
36
+ supports_attachments=True,
37
+ supports_tools=True,
38
+ )
39
+
40
+ GPT_4_1: Final = AIModel(
41
+ id="gpt-4.1",
42
+ text="GPT-4.1",
43
+ icon="openai",
44
+ model="gpt-4.1",
45
+ stream=True,
46
+ supports_attachments=True,
47
+ supports_tools=True,
48
+ )
49
+
50
+ O3: Final = AIModel(
51
+ id="o3",
52
+ text="o3 Reasoning",
53
+ icon="openai",
54
+ model="o3",
55
+ temperature=1,
56
+ stream=True,
57
+ supports_attachments=True,
58
+ supports_tools=True,
59
+ )
60
+
61
+ O4_MINI: Final = AIModel(
62
+ id="o4-mini",
63
+ text="o4 Mini Reasoning",
64
+ icon="openai",
65
+ model="o4-mini",
66
+ stream=True,
67
+ supports_attachments=True,
68
+ supports_tools=True,
69
+ temperature=1,
70
+ )
71
+
72
+ GPT_5: Final = AIModel(
73
+ id="gpt-5",
74
+ text="GPT 5",
75
+ icon="openai",
76
+ model="gpt-5",
77
+ stream=True,
78
+ supports_attachments=True,
79
+ supports_tools=True,
80
+ temperature=1,
81
+ )
82
+
83
+ GPT_5_1: Final = AIModel(
84
+ id="gpt-5.1",
85
+ text="GPT 5.1",
86
+ icon="openai",
87
+ model="gpt-5.1",
88
+ stream=True,
89
+ supports_attachments=True,
90
+ supports_tools=True,
91
+ temperature=1,
92
+ )
93
+
94
+ GPT_5_2: Final = AIModel(
95
+ id="gpt-5.2",
96
+ text="GPT 5.2",
97
+ icon="openai",
98
+ model="gpt-5.2",
99
+ stream=True,
100
+ supports_attachments=True,
101
+ supports_tools=True,
102
+ temperature=1,
103
+ )
104
+
105
+ GPT_5_MINI: Final = AIModel(
106
+ id="gpt-5-mini",
107
+ text="GPT 5 Mini",
108
+ icon="openai",
109
+ model="gpt-5-mini",
110
+ stream=True,
111
+ supports_attachments=True,
112
+ supports_tools=True,
113
+ temperature=1,
114
+ )
115
+
116
+ GPT_5_1_MINI: Final = AIModel(
117
+ id="gpt-5.1-mini",
118
+ text="GPT 5.1 Mini",
119
+ icon="openai",
120
+ model="gpt-5.1-mini",
121
+ stream=True,
122
+ supports_attachments=True,
123
+ supports_tools=True,
124
+ temperature=1,
125
+ )
126
+
127
+ GPT_5_NANO: Final = AIModel(
128
+ id="gpt-5-nano",
129
+ text="GPT 5 Nano",
130
+ icon="openai",
131
+ model="gpt-5-nano",
132
+ stream=True,
133
+ supports_attachments=True,
134
+ supports_tools=True,
135
+ temperature=1,
136
+ )
137
+
22
138
 
23
139
  class BaseOpenAIProcessor(Processor, ABC):
24
140
  """Base class for OpenAI processors with common initialization and utilities."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: appkit-assistant
3
- Version: 0.10.0
3
+ Version: 0.11.0
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/jenreh/appkit
6
6
  Project-URL: Documentation, https://github.com/jenreh/appkit/tree/main/docs
@@ -1,14 +1,11 @@
1
1
  appkit_assistant/configuration.py,sha256=3nBL-dEGYsvSnRDNpxtikZn4QMMkMlNbb4VqGOPolJI,346
2
- appkit_assistant/backend/model_manager.py,sha256=LrRqjCfT4Fo1-DCblcn40nPbyT0TjZuFiVcNEstpMJ0,4365
2
+ appkit_assistant/backend/model_manager.py,sha256=7f65UbZ51qOYM6F73eKbZ8hMnCzxdanFZKgdKF8bbCk,4366
3
3
  appkit_assistant/backend/models.py,sha256=-sr10xChq_lMlLpt7Jbmq4VGweuW4_UUwYsRtIY7HFY,6015
4
4
  appkit_assistant/backend/processor.py,sha256=dhBg3pYXdmpj9JtAJc-d83SeUA1NsICj1C_YI0M2QYE,1289
5
5
  appkit_assistant/backend/repositories.py,sha256=ueGreZH2ioefwNh1Et5RivDB8IkSLz76aajrWleAVEU,11232
6
- appkit_assistant/backend/system_prompt.py,sha256=PmqGToI0beRaQB-36ZjTolWC4fne40TjptIft0LkCrQ,2395
7
6
  appkit_assistant/backend/system_prompt_cache.py,sha256=fC2GdTqObfcJdt7nzTUE99GGzX9nLZPCGBUOENcBE5Q,5083
8
- appkit_assistant/backend/processors/ai_models.py,sha256=KG6nyeb22BAI_MxPPFY9uBts-B1BvDFF6C2BsF_RFk8,2517
9
- appkit_assistant/backend/processors/knowledgeai_processor.py,sha256=0i5E71y7zwBc5mW_8rKuTjeNk_GLv2kfXPPwKNBaazA,9513
10
7
  appkit_assistant/backend/processors/lorem_ipsum_processor.py,sha256=j-MZhzibrtabzbGB2Pf4Xcdlr1TlTYWNRdE22LsDp9Q,4635
11
- appkit_assistant/backend/processors/openai_base.py,sha256=f8X7yMWRBiRsS8vDrbEBY-oR1luzJMn8F7LcAlGUKko,2272
8
+ appkit_assistant/backend/processors/openai_base.py,sha256=IQS4m375BOD_K0PBFOk4i7wL1z5MEiPFxbSmC-HBNgU,4414
12
9
  appkit_assistant/backend/processors/openai_chat_completion_processor.py,sha256=nTxouoXDU6VcQr8UhA2KiMNt60KvIwM8cH9Z8lo4dXY,4218
13
10
  appkit_assistant/backend/processors/openai_responses_processor.py,sha256=Ns8owrvimtZofyyzhoTgi2t_P0feEgLzooJVfCxC3kw,18800
14
11
  appkit_assistant/backend/processors/perplexity_processor.py,sha256=weHukv78MSCF_uSCKGSMpNYHsET9OB8IhpvUiMfPQ8A,3355
@@ -26,6 +23,6 @@ appkit_assistant/state/mcp_server_state.py,sha256=L5r3Bd_OzFh_kgWH81cKVBWhE3Ys6m
26
23
  appkit_assistant/state/system_prompt_state.py,sha256=cNjjCOHir5sYgsmd5Cv-lAkAIYesKr_jbgQD1Jyuqu0,6620
27
24
  appkit_assistant/state/thread_list_state.py,sha256=fQcMy4SZbum1YcbXJ7XFAsZRIeFS-l8BFeNVy01M1Iw,9445
28
25
  appkit_assistant/state/thread_state.py,sha256=qImM6x49--0XYonzwgExrpMy7lcgbKtToH_441iP9Gk,29050
29
- appkit_assistant-0.10.0.dist-info/METADATA,sha256=jgWA_ZjZdcquEcYBZRuVG0XeaMclBl-5yAkrL6lLMmw,8934
30
- appkit_assistant-0.10.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
31
- appkit_assistant-0.10.0.dist-info/RECORD,,
26
+ appkit_assistant-0.11.0.dist-info/METADATA,sha256=rh7MGcMLx80pKwkcoWg5jvYpe6XF8FNIsK2MAeb3LtQ,8934
27
+ appkit_assistant-0.11.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
28
+ appkit_assistant-0.11.0.dist-info/RECORD,,
@@ -1,132 +0,0 @@
1
- from typing import Final
2
-
3
- from appkit_assistant.backend.models import AIModel
4
-
5
- DEFAULT: Final = AIModel(
6
- id="default",
7
- text="Default (GPT 4.1 Mini)",
8
- icon="avvia_intelligence",
9
- model="default",
10
- stream=True,
11
- )
12
-
13
- GEMINI_2_5_FLASH: Final = AIModel(
14
- id="gemini-2-5-flash",
15
- text="Gemini 2.5 Flash",
16
- icon="googlegemini",
17
- model="gemini-2-5-flash",
18
- )
19
-
20
- LLAMA_3_2_VISION: Final = AIModel(
21
- id="llama32_vision_90b",
22
- text="Llama 3.2 Vision 90B (OnPrem)",
23
- icon="ollama",
24
- model="lllama32_vision_90b",
25
- )
26
-
27
- GPT_4o: Final = AIModel(
28
- id="gpt-4o",
29
- text="GPT 4o",
30
- icon="openai",
31
- model="gpt-4o",
32
- stream=True,
33
- supports_attachments=True,
34
- supports_tools=True,
35
- )
36
-
37
- GPT_4_1: Final = AIModel(
38
- id="gpt-4.1",
39
- text="GPT-4.1",
40
- icon="openai",
41
- model="gpt-4.1",
42
- stream=True,
43
- supports_attachments=True,
44
- supports_tools=True,
45
- )
46
-
47
- O3: Final = AIModel(
48
- id="o3",
49
- text="o3 Reasoning",
50
- icon="openai",
51
- model="o3",
52
- temperature=1,
53
- stream=True,
54
- supports_attachments=True,
55
- supports_tools=True,
56
- )
57
-
58
- O4_MINI: Final = AIModel(
59
- id="o4-mini",
60
- text="o4 Mini Reasoning",
61
- icon="openai",
62
- model="o4-mini",
63
- stream=True,
64
- supports_attachments=True,
65
- supports_tools=True,
66
- temperature=1,
67
- )
68
-
69
- GPT_5: Final = AIModel(
70
- id="gpt-5",
71
- text="GPT 5",
72
- icon="openai",
73
- model="gpt-5",
74
- stream=True,
75
- supports_attachments=True,
76
- supports_tools=True,
77
- temperature=1,
78
- )
79
-
80
- GPT_5_1: Final = AIModel(
81
- id="gpt-5.1",
82
- text="GPT 5.1",
83
- icon="openai",
84
- model="gpt-5.1",
85
- stream=True,
86
- supports_attachments=True,
87
- supports_tools=True,
88
- temperature=1,
89
- )
90
-
91
- GPT_5_CHAT: Final = AIModel(
92
- id="gpt-5-chat",
93
- text="GPT 5 Chat",
94
- icon="openai",
95
- model="gpt-5-chat",
96
- stream=True,
97
- supports_attachments=True,
98
- supports_tools=False,
99
- )
100
-
101
- GPT_5_MINI: Final = AIModel(
102
- id="gpt-5-mini",
103
- text="GPT 5 Mini",
104
- icon="openai",
105
- model="gpt-5-mini",
106
- stream=True,
107
- supports_attachments=True,
108
- supports_tools=True,
109
- temperature=1,
110
- )
111
-
112
- GPT_5_1_MINI: Final = AIModel(
113
- id="gpt-5.1-mini",
114
- text="GPT 5.1 Mini",
115
- icon="openai",
116
- model="gpt-5.1-mini",
117
- stream=True,
118
- supports_attachments=True,
119
- supports_tools=True,
120
- temperature=1,
121
- )
122
-
123
- GPT_5_NANO: Final = AIModel(
124
- id="gpt-5-nano",
125
- text="GPT 5 Nano",
126
- icon="openai",
127
- model="gpt-5-nano",
128
- stream=True,
129
- supports_attachments=True,
130
- supports_tools=True,
131
- temperature=1,
132
- )
@@ -1,275 +0,0 @@
1
- import asyncio
2
- import logging
3
- from collections.abc import AsyncGenerator
4
- from typing import Any
5
-
6
- from openai import AsyncOpenAI, AsyncStream
7
- from openai.types.chat import ChatCompletionMessageParam
8
-
9
- from appkit_assistant.backend.models import (
10
- AIModel,
11
- Chunk,
12
- ChunkType,
13
- MCPServer,
14
- Message,
15
- MessageType,
16
- )
17
- from appkit_assistant.backend.processor import Processor
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class KnowledgeAIProcessor(Processor):
23
- """Processor that generates Knowledge AI text responses."""
24
-
25
- def __init__(
26
- self,
27
- server: str,
28
- api_key: str,
29
- models: dict[str, AIModel] | None = None,
30
- with_projects: bool = False,
31
- ) -> None:
32
- """Initialize the Knowledge AI processor."""
33
- super().__init__()
34
- self.api_key = api_key
35
- self.server = server
36
- self.models = models
37
- self.with_projects = with_projects
38
-
39
- if with_projects:
40
- self._initialize_models()
41
-
42
- def _initialize_models(self) -> None:
43
- """Initialize the models supported by this processor."""
44
- try:
45
- from knai_avvia.backend.models import Project # noqa: PLC0415
46
- from knai_avvia.backend.project_repository import ( # noqa: PLC0415
47
- load_projects, # noqa: E402
48
- )
49
- except ImportError as e:
50
- logger.error("knai_avvia package not available: %s", e)
51
- self.models = {}
52
- return
53
-
54
- try:
55
- projects: list[Project] = asyncio.run(
56
- load_projects(
57
- url=self.server,
58
- api_key=self.api_key,
59
- )
60
- )
61
-
62
- if self.models is None:
63
- self.models = {}
64
-
65
- for project in projects:
66
- project_key = f"{project.id}"
67
- self.models[project_key] = AIModel(
68
- id=project_key,
69
- text=project.name,
70
- icon="avvia_intelligence",
71
- )
72
- except Exception as e:
73
- logger.error("Failed to load projects from Knowledge AI: %s", e)
74
- self.models = {}
75
-
76
- async def process(
77
- self,
78
- messages: list[Message],
79
- model_id: str,
80
- files: list[str] | None = None, # noqa: ARG002
81
- mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
82
- ) -> AsyncGenerator[Chunk, None]:
83
- try:
84
- from knai_avvia.backend.chat_client import chat_completion # noqa: PLC0415
85
- except ImportError as e:
86
- logger.error("knai_avvia package not available: %s", e)
87
- raise ImportError(
88
- "knai_avvia package is required for KnowledgeAIProcessor"
89
- ) from e
90
-
91
- if model_id not in self.models:
92
- logger.error("Model %s not supported by OpenAI processor", model_id)
93
- raise ValueError(f"Model {model_id} not supported by OpenAI processor")
94
-
95
- chat_messages = self._convert_messages(messages)
96
-
97
- try:
98
- result = await chat_completion(
99
- api_key=self.api_key,
100
- server=self.server,
101
- project_id=int(model_id),
102
- question=messages[-2].text, # last human message
103
- history=chat_messages,
104
- temperature=0.05,
105
- )
106
-
107
- if result.answer:
108
- yield Chunk(
109
- type=ChunkType.TEXT,
110
- text=result.answer,
111
- chunk_metadata={
112
- "source": "knowledgeai",
113
- "project_id": model_id,
114
- "streaming": str(False),
115
- },
116
- )
117
- except Exception as e:
118
- raise e
119
-
120
- def get_supported_models(self) -> dict[str, AIModel]:
121
- return self.models if self.api_key else {}
122
-
123
- def _convert_messages(self, messages: list[Message]) -> list[dict[str, str]]:
124
- return [
125
- {"role": "Human", "message": msg.text}
126
- if msg.type == MessageType.HUMAN
127
- else {"role": "AI", "message": msg.text}
128
- for msg in (messages or [])
129
- if msg.type in (MessageType.HUMAN, MessageType.ASSISTANT)
130
- ]
131
-
132
-
133
- class KnowledgeAIOpenAIProcessor(Processor):
134
- """Processor that generates Knowledge AI text responses."""
135
-
136
- def __init__(
137
- self,
138
- server: str,
139
- api_key: str,
140
- models: dict[str, AIModel] | None = None,
141
- with_projects: bool = False,
142
- ) -> None:
143
- """Initialize the Knowledge AI processor."""
144
- self.api_key = api_key
145
- self.server = server
146
- self.models = models
147
- self.with_projects = with_projects
148
- self.client = (
149
- AsyncOpenAI(api_key=self.api_key, base_url=self.server + "/api/openai/v1")
150
- if self.api_key
151
- else None
152
- )
153
-
154
- if self.with_projects:
155
- self._initialize_models()
156
-
157
- def _initialize_models(self) -> None:
158
- """Initialize the models supported by this processor."""
159
- try:
160
- from knai_avvia.backend.models import Project # noqa: PLC0415
161
- from knai_avvia.backend.project_repository import ( # noqa: PLC0415
162
- load_projects, # noqa: E402
163
- )
164
- except ImportError as e:
165
- logger.error("knai_avvia package not available: %s", e)
166
- self.models = {}
167
- return
168
-
169
- try:
170
- projects: list[Project] = asyncio.run(
171
- load_projects(
172
- url=self.server,
173
- api_key=self.api_key,
174
- )
175
- )
176
-
177
- if self.models is None:
178
- self.models = {}
179
-
180
- for project in projects:
181
- project_key = f"{project.id}"
182
- self.models[project_key] = AIModel(
183
- id=project_key,
184
- project_id=project.id,
185
- text=project.name,
186
- icon="avvia_intelligence",
187
- stream=False,
188
- )
189
- except Exception as e:
190
- logger.error("Failed to load projects from Knowledge AI: %s", e)
191
- self.models = {}
192
-
193
- async def process(
194
- self,
195
- messages: list[Message],
196
- model_id: str,
197
- files: list[str] | None = None, # noqa: ARG002
198
- mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
199
- ) -> AsyncGenerator[Chunk, None]:
200
- if not self.client:
201
- raise ValueError("KnowledgeAI OpenAI Client not initialized.")
202
-
203
- model = self.models.get(model_id)
204
- if not model:
205
- raise ValueError(
206
- "Model %s not supported by KnowledgeAI processor", model_id
207
- )
208
-
209
- chat_messages = self._convert_messages_to_openai_format(messages)
210
-
211
- try:
212
- session_params: dict[str, Any] = {
213
- "model": model.model if model.project_id else model.id,
214
- "messages": chat_messages[:-1],
215
- "stream": model.stream,
216
- }
217
- if model.project_id:
218
- session_params["user"] = str(model.project_id)
219
-
220
- session = await self.client.chat.completions.create(**session_params)
221
-
222
- if isinstance(session, AsyncStream):
223
- async for event in session:
224
- if event.choices and event.choices[0].delta:
225
- content = event.choices[0].delta.content
226
- if content:
227
- yield Chunk(
228
- type=ChunkType.TEXT,
229
- text=content,
230
- chunk_metadata={
231
- "source": "knowledgeai_openai",
232
- "streaming": str(True),
233
- "model_id": model_id,
234
- },
235
- )
236
- elif session.choices and session.choices[0].message:
237
- content = session.choices[0].message.content
238
- if content:
239
- logger.debug("Content:\n%s", content)
240
- yield Chunk(
241
- type=ChunkType.TEXT,
242
- text=content,
243
- chunk_metadata={
244
- "source": "knowledgeai_openai",
245
- "streaming": str(False),
246
- "model_id": model_id,
247
- },
248
- )
249
- except Exception as e:
250
- logger.exception("Failed to get response from OpenAI: %s", e)
251
- raise e
252
-
253
- def get_supported_models(self) -> dict[str, AIModel]:
254
- return self.models if self.api_key else {}
255
-
256
- def _convert_messages_to_openai_format(
257
- self, messages: list[Message]
258
- ) -> list[ChatCompletionMessageParam]:
259
- formatted: list[ChatCompletionMessageParam] = []
260
- role_map = {
261
- MessageType.HUMAN: "user",
262
- MessageType.SYSTEM: "system",
263
- MessageType.ASSISTANT: "assistant",
264
- }
265
-
266
- for msg in messages or []:
267
- if msg.type not in role_map:
268
- continue
269
- role = role_map[msg.type]
270
- if formatted and role != "system" and formatted[-1]["role"] == role:
271
- formatted[-1]["content"] = formatted[-1]["content"] + "\n\n" + msg.text
272
- else:
273
- formatted.append({"role": role, "content": msg.text})
274
-
275
- return formatted
@@ -1,56 +0,0 @@
1
- from typing import Final
2
-
3
- SYSTEM_PROMPT: Final[str] = """
4
- # System Prompt: Kontextbewusster, Tool-orientierter Chat-Client
5
-
6
- ## 1) Auftrag
7
- Interpretiere Benutzereingaben semantisch, berücksichtige Kontext (Verlauf, Metadaten, Projekte) und führe die geeignetsten Tools aus, um präzise, nachvollziehbare Ergebnisse zu liefern.
8
-
9
- ## 2) Prioritäten
10
- 1. Korrektheit und Prägnanz vor Länge.
11
- 2. Tool-Einsatz, wenn verfügbar und sinnvoll; ansonsten fundierte Eigenableitung.
12
- 3. Ergebnisse stets direkt anzeigen (kein Warten/Platzhalter).
13
-
14
- ## 3) Ausgabeformate
15
- - **Code:** In Markdown-Codeblöcken mit korrektem Sprach-Tag.
16
- - **Diagramme:** Immer in Mermaid-Syntax als korrekter Markdown Source.
17
- - **Analysen/Vergleiche:** Datengetrieben; Tabellen verwenden.
18
- - **Bilder (wichtig):** Mit Bilderzeugungs-Tools generieren und **immer inline** im Chat anzeigen. Bei realen Personen nur nach vorgängiger Zustimmung.
19
-
20
- ## 4) Tool-Nutzung
21
- - Wähle genau **ein** primäres Tool pro Aufgabe (falls mehrere möglich, wähle das mit größtem Nutzen).
22
- - Nutze Capability-Deskriptoren, führe Tool(s) deterministisch aus, zeige Output unmittelbar.
23
- - Exploratives Vorgehen ist erlaubt, sofern Ziel und Kontext klar sind.
24
- - Falls kein Tool passt: direkt antworten (strukturierte Begründung implizit, nicht ausgeben).
25
-
26
- {mcp_prompts}
27
-
28
- ## 5) Kontext
29
- - Berücksichtige durchgehend Gesprächsverlauf, Nutzerrolle, Organisation und laufende Projekte.
30
- - Halte Kohärenz über mehrere Turns; vermeide Wiederholungen.
31
-
32
- ## 6) Fehler- & Ausnahmebehandlung
33
- - **Toolfehler/Unverfügbarkeit:** Kurz informieren und sofort eine belastbare Alternative liefern (z. B. lokale Schätzung/Analyse).
34
- - **Mehrdeutigkeit:** Triff eine nachvollziehbare Annahme und liefere ein vollständiges Ergebnis.
35
- - **Kein geeignetes Tool:** Antwort mit eigener Inferenz, klar strukturiert.
36
-
37
- ## 7) Qualitätskriterien
38
- - Präzise, testbare Aussagen; wenn sinnvoll, mit Zahlen/Tabellen.
39
- - Klare Struktur (Überschriften, Listen, Tabellen, Codeblöcke, Diagramme).
40
- - Konsistente Terminologie; keine redundanten Passagen.
41
-
42
- ## 8) Beispiele (Format)
43
- ```python
44
- def hello_world():
45
- print("Hello, world!")
46
- ```
47
- ```mermaid
48
- flowchart TD
49
- A["LLM/Chat-Client"] --> B["MCP Client"]
50
- B --> C{{"Transport"}}
51
- C -->|stdio| D["FastMCP Server (lokal)"]
52
- C -->|http| E["FastMCP Server (remote)"]
53
- D --> F["@mcp.tool web_search()"]
54
- E --> F
55
- ```
56
- """