appkit-assistant 0.16.3__py3-none-any.whl → 0.17.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,6 +38,8 @@ LOREM_MODELS = {
38
38
  icon="codesandbox",
39
39
  model="lorem-short",
40
40
  stream=True,
41
+ supports_attachments=True,
42
+ supports_tools=True,
41
43
  )
42
44
  }
43
45
 
@@ -33,7 +33,7 @@ GPT_4o: Final = AIModel(
33
33
  icon="openai",
34
34
  model="gpt-4o",
35
35
  stream=True,
36
- supports_attachments=True,
36
+ supports_attachments=False,
37
37
  supports_tools=True,
38
38
  )
39
39
 
@@ -43,7 +43,7 @@ GPT_4_1: Final = AIModel(
43
43
  icon="openai",
44
44
  model="gpt-4.1",
45
45
  stream=True,
46
- supports_attachments=True,
46
+ supports_attachments=False,
47
47
  supports_tools=True,
48
48
  )
49
49
 
@@ -54,7 +54,7 @@ O3: Final = AIModel(
54
54
  model="o3",
55
55
  temperature=1,
56
56
  stream=True,
57
- supports_attachments=True,
57
+ supports_attachments=False,
58
58
  supports_tools=True,
59
59
  )
60
60
 
@@ -64,7 +64,7 @@ O4_MINI: Final = AIModel(
64
64
  icon="openai",
65
65
  model="o4-mini",
66
66
  stream=True,
67
- supports_attachments=True,
67
+ supports_attachments=False,
68
68
  supports_tools=True,
69
69
  temperature=1,
70
70
  )
@@ -75,7 +75,7 @@ GPT_5: Final = AIModel(
75
75
  icon="openai",
76
76
  model="gpt-5",
77
77
  stream=True,
78
- supports_attachments=True,
78
+ supports_attachments=False,
79
79
  supports_tools=True,
80
80
  temperature=1,
81
81
  )
@@ -86,7 +86,7 @@ GPT_5_1: Final = AIModel(
86
86
  icon="openai",
87
87
  model="gpt-5.1",
88
88
  stream=True,
89
- supports_attachments=True,
89
+ supports_attachments=False,
90
90
  supports_tools=True,
91
91
  temperature=1,
92
92
  )
@@ -97,7 +97,7 @@ GPT_5_2: Final = AIModel(
97
97
  icon="openai",
98
98
  model="gpt-5.2",
99
99
  stream=True,
100
- supports_attachments=True,
100
+ supports_attachments=False,
101
101
  supports_tools=True,
102
102
  temperature=1,
103
103
  )
@@ -108,7 +108,7 @@ GPT_5_MINI: Final = AIModel(
108
108
  icon="openai",
109
109
  model="gpt-5-mini",
110
110
  stream=True,
111
- supports_attachments=True,
111
+ supports_attachments=False,
112
112
  supports_tools=True,
113
113
  temperature=1,
114
114
  )
@@ -119,7 +119,7 @@ GPT_5_1_MINI: Final = AIModel(
119
119
  icon="openai",
120
120
  model="gpt-5.1-mini",
121
121
  stream=True,
122
- supports_attachments=True,
122
+ supports_attachments=False,
123
123
  supports_tools=True,
124
124
  temperature=1,
125
125
  )
@@ -130,7 +130,7 @@ GPT_5_NANO: Final = AIModel(
130
130
  icon="openai",
131
131
  model="gpt-5-nano",
132
132
  stream=True,
133
- supports_attachments=True,
133
+ supports_attachments=False,
134
134
  supports_tools=True,
135
135
  temperature=1,
136
136
  )
@@ -66,23 +66,40 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
66
66
  if event.choices and event.choices[0].delta:
67
67
  content = event.choices[0].delta.content
68
68
  if content:
69
- yield self._create_chunk(content, model.model, stream=True)
69
+ yield self._create_chunk(
70
+ content,
71
+ model.model,
72
+ stream=True,
73
+ message_id=event.id,
74
+ )
70
75
  else:
71
76
  content = session.choices[0].message.content
72
77
  if content:
73
- yield self._create_chunk(content, model.model)
78
+ yield self._create_chunk(
79
+ content, model.model, message_id=session.id
80
+ )
74
81
  except Exception as e:
75
82
  raise e
76
83
 
77
- def _create_chunk(self, content: str, model: str, stream: bool = False) -> Chunk:
84
+ def _create_chunk(
85
+ self,
86
+ content: str,
87
+ model: str,
88
+ stream: bool = False,
89
+ message_id: str | None = None,
90
+ ) -> Chunk:
91
+ metadata = {
92
+ "source": "chat_completions",
93
+ "streaming": str(stream),
94
+ "model": model,
95
+ }
96
+ if message_id:
97
+ metadata["message_id"] = message_id
98
+
78
99
  return Chunk(
79
100
  type=ChunkType.TEXT,
80
101
  text=content,
81
- chunk_metadata={
82
- "source": "chat_completions",
83
- "streaming": str(stream),
84
- "model": model,
85
- },
102
+ chunk_metadata=metadata,
86
103
  )
87
104
 
88
105
  def _convert_messages_to_openai_format(
@@ -206,6 +206,12 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
206
206
  tool_name = getattr(item, "name", "unknown_tool")
207
207
  tool_id = getattr(item, "id", "unknown_id")
208
208
  server_label = getattr(item, "server_label", "unknown_server")
209
+ logger.debug(
210
+ "MCP call started: %s.%s (id=%s)",
211
+ server_label,
212
+ tool_name,
213
+ tool_id,
214
+ )
209
215
  return self._create_chunk(
210
216
  ChunkType.TOOL_CALL,
211
217
  f"Benutze Werkzeug: {server_label}.{tool_name}",
@@ -369,27 +375,28 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
369
375
 
370
376
  if event_type == "response.mcp_call.in_progress":
371
377
  tool_id = getattr(event, "item_id", "unknown_id")
372
- return self._create_chunk(
373
- ChunkType.TOOL_CALL,
374
- "Tool call in progress...",
375
- {"tool_id": tool_id, "status": "in_progress"},
376
- )
378
+ # This event doesn't have tool details, just acknowledge it
379
+ logger.debug("MCP call in progress: %s", tool_id)
380
+ return None
381
+
382
+ if event_type == "response.mcp_call.completed":
383
+ # MCP call completed successfully - handled via response.output_item.done
384
+ # but we can log for debugging
385
+ tool_id = getattr(event, "item_id", "unknown_id")
386
+ logger.debug("MCP call completed: %s", tool_id)
387
+ return None
377
388
 
378
389
  if event_type == "response.mcp_list_tools.in_progress":
390
+ # This is a setup event, not a tool call - just log and return None
379
391
  tool_id = getattr(event, "item_id", "unknown_id")
380
- return self._create_chunk(
381
- ChunkType.TOOL_CALL,
382
- "Lade verfügbare Werkzeuge...",
383
- {"tool_id": tool_id, "status": "listing_tools"},
384
- )
392
+ logger.debug("MCP list_tools in progress: %s", tool_id)
393
+ return None
385
394
 
386
395
  if event_type == "response.mcp_list_tools.completed":
396
+ # This is a setup event, not a tool call - just log and return None
387
397
  tool_id = getattr(event, "item_id", "unknown_id")
388
- return self._create_chunk(
389
- ChunkType.TOOL_RESULT,
390
- "Verfügbare Werkzeuge geladen.",
391
- {"tool_id": tool_id, "status": "tools_listed"},
392
- )
398
+ logger.debug("MCP list_tools completed: %s", tool_id)
399
+ return None
393
400
 
394
401
  if event_type == "response.mcp_list_tools.failed":
395
402
  tool_id = getattr(event, "item_id", "unknown_id")
@@ -52,6 +52,14 @@ class ResponseAccumulator:
52
52
 
53
53
  def process_chunk(self, chunk: Chunk) -> None:
54
54
  """Process a single chunk and update internal state."""
55
+ # Update message ID if provided in metadata
56
+ if (
57
+ self.messages
58
+ and self.messages[-1].type == MessageType.ASSISTANT
59
+ and "message_id" in chunk.chunk_metadata
60
+ ):
61
+ self.messages[-1].id = chunk.chunk_metadata["message_id"]
62
+
55
63
  if chunk.type == ChunkType.TEXT:
56
64
  if self.messages and self.messages[-1].type == MessageType.ASSISTANT:
57
65
  self.messages[-1].text += chunk.text
@@ -128,8 +136,17 @@ class ResponseAccumulator:
128
136
  )
129
137
 
130
138
  if chunk.type == ChunkType.THINKING:
131
- if item.text and item.text != text:
139
+ # Check if this is a streaming delta (has "delta" in metadata)
140
+ is_delta = chunk.chunk_metadata.get("delta") is not None
141
+ if is_delta:
142
+ # Streaming delta - append directly without separator
143
+ item.text = (item.text or "") + chunk.text
144
+ elif item.text and item.text != text:
145
+ # Non-delta chunk with different text - append with newline
132
146
  item.text += f"\n{chunk.text}"
147
+ else:
148
+ # Initial text
149
+ item.text = text
133
150
  elif chunk.type == ChunkType.THINKING_RESULT:
134
151
  item.status = ThinkingStatus.COMPLETED
135
152
  if chunk.text:
@@ -161,8 +178,30 @@ class ResponseAccumulator:
161
178
  tool_id = self._get_or_create_tool_session(chunk)
162
179
 
163
180
  tool_name = chunk.chunk_metadata.get("tool_name", "Unknown")
164
- if chunk.type == ChunkType.TOOL_CALL:
165
- self.current_activity = f"Nutze Werkzeug: {tool_name}..."
181
+ server_label = chunk.chunk_metadata.get("server_label", "")
182
+ # Use server_label.tool_name format if both available
183
+ if server_label and tool_name and tool_name != "Unknown":
184
+ display_name = f"{server_label}.{tool_name}"
185
+ else:
186
+ display_name = tool_name
187
+
188
+ logger.debug(
189
+ "Tool chunk received: type=%s, tool_id=%s, tool_name=%s, "
190
+ "server_label=%s, display_name=%s",
191
+ chunk.type,
192
+ tool_id,
193
+ tool_name,
194
+ server_label,
195
+ display_name,
196
+ )
197
+
198
+ # Only update activity display if we have a real tool name
199
+ if (
200
+ chunk.type == ChunkType.TOOL_CALL
201
+ and display_name
202
+ and display_name != "Unknown"
203
+ ):
204
+ self.current_activity = f"Nutze Werkzeug: {display_name}..."
166
205
 
167
206
  status = ThinkingStatus.IN_PROGRESS
168
207
  text = ""
@@ -174,11 +213,11 @@ class ResponseAccumulator:
174
213
  parameters = chunk.chunk_metadata.get("parameters", chunk.text)
175
214
  text = chunk.chunk_metadata.get("description", "")
176
215
  elif chunk.type == ChunkType.TOOL_RESULT:
177
- is_error = (
178
- "error" in chunk.text.lower()
179
- or "failed" in chunk.text.lower()
180
- or chunk.chunk_metadata.get("error")
181
- )
216
+ # Check error flag from metadata - don't rely on text content
217
+ # as valid results may contain words like "error" in data
218
+ # Note: metadata values may be strings, so check for "True" string
219
+ error_value = chunk.chunk_metadata.get("error")
220
+ is_error = error_value is True or error_value == "True"
182
221
  status = ThinkingStatus.ERROR if is_error else ThinkingStatus.COMPLETED
183
222
  result = chunk.text
184
223
  if is_error:
@@ -186,12 +225,15 @@ class ResponseAccumulator:
186
225
  else:
187
226
  text = chunk.text
188
227
 
228
+ # Only pass tool_name if we have a real value
229
+ effective_tool_name = display_name if display_name != "Unknown" else None
230
+
189
231
  item = self._get_or_create_thinking_item(
190
232
  tool_id,
191
233
  ThinkingType.TOOL_CALL,
192
234
  text=text,
193
235
  status=status,
194
- tool_name=tool_name,
236
+ tool_name=effective_tool_name,
195
237
  parameters=parameters,
196
238
  result=result,
197
239
  error=error,
@@ -200,8 +242,13 @@ class ResponseAccumulator:
200
242
  if chunk.type == ChunkType.TOOL_CALL:
201
243
  item.parameters = parameters
202
244
  item.text = text
203
- if not item.tool_name or item.tool_name == "Unknown":
204
- item.tool_name = tool_name
245
+ # Only update tool_name if we have a better value and item needs it
246
+ if (
247
+ display_name
248
+ and display_name != "Unknown"
249
+ and (not item.tool_name or item.tool_name == "Unknown")
250
+ ):
251
+ item.tool_name = display_name
205
252
  item.status = ThinkingStatus.IN_PROGRESS
206
253
  elif chunk.type == ChunkType.TOOL_RESULT:
207
254
  item.status = status
@@ -11,6 +11,7 @@ from appkit_assistant.backend.models import (
11
11
  MessageType,
12
12
  ThreadModel,
13
13
  ThreadStatus,
14
+ UploadedFile,
14
15
  )
15
16
  from appkit_assistant.state.thread_list_state import ThreadListState
16
17
  from appkit_assistant.state.thread_state import ThreadState
@@ -31,6 +32,7 @@ __all__ = [
31
32
  "ThreadModel",
32
33
  "ThreadState",
33
34
  "ThreadStatus",
35
+ "UploadedFile",
34
36
  "composer",
35
37
  "mcp_servers_table",
36
38
  ]
@@ -3,6 +3,7 @@ from collections.abc import Callable
3
3
  import reflex as rx
4
4
 
5
5
  import appkit_mantine as mn
6
+ from appkit_assistant.backend.models import UploadedFile
6
7
  from appkit_assistant.components.tools_modal import tools_popover
7
8
  from appkit_assistant.state.thread_state import ThreadState
8
9
 
@@ -63,23 +64,107 @@ def submit() -> rx.Component:
63
64
  )
64
65
 
65
66
 
66
- def add_attachment(show: bool = False) -> rx.Component | None:
67
- if not show:
68
- return None
67
+ def _uploaded_file_thumbnail(file: UploadedFile) -> rx.Component:
68
+ """Render a thumbnail for an uploaded file with a remove button."""
69
+ return rx.box(
70
+ rx.hstack(
71
+ rx.icon("file", size=16, color=rx.color("gray", 9)),
72
+ rx.text(
73
+ file.filename,
74
+ size="1",
75
+ max_width="100px",
76
+ overflow="hidden",
77
+ text_overflow="ellipsis",
78
+ white_space="nowrap",
79
+ ),
80
+ spacing="1",
81
+ align="center",
82
+ padding="4px 8px",
83
+ background=rx.color("gray", 3),
84
+ border_radius="6px",
85
+ ),
86
+ rx.icon_button(
87
+ rx.icon("x", size=10),
88
+ width="16px",
89
+ height="16px",
90
+ variant="solid",
91
+ color_scheme="gray",
92
+ position="absolute",
93
+ top="-6px",
94
+ right="-6px",
95
+ border_radius="12px",
96
+ padding="0px",
97
+ cursor="pointer",
98
+ on_click=lambda: ThreadState.remove_file_from_prompt(file.file_path),
99
+ ),
100
+ position="relative",
101
+ )
69
102
 
70
- return rx.tooltip(
71
- rx.button(
72
- rx.icon("paperclip", size=18),
73
- rx.text("2 files", size="1", color="gray.2"),
74
- id="composer-attachment",
75
- variant="ghost",
76
- padding="8px",
77
- access_key="s",
103
+
104
+ def selected_files_row() -> rx.Component:
105
+ """Render the row of selected file thumbnails (only visible when files exist)."""
106
+ return rx.cond(
107
+ ThreadState.uploaded_files.length() > 0,
108
+ rx.hstack(
109
+ rx.foreach(
110
+ ThreadState.uploaded_files,
111
+ _uploaded_file_thumbnail,
112
+ ),
113
+ spacing="2",
114
+ flex_wrap="wrap",
115
+ margin_top="6px",
116
+ margin_left="12px",
78
117
  ),
79
- content="Manage Attachments…",
118
+ rx.fragment(),
80
119
  )
81
120
 
82
121
 
122
+ def file_upload(show: bool = False) -> rx.Component:
123
+ """File upload button with drag-and-drop support."""
124
+ return rx.cond(
125
+ show & ThreadState.selected_model_supports_attachments,
126
+ rx.tooltip(
127
+ rx.upload.root(
128
+ rx.box(
129
+ rx.icon("paperclip", size=18, color=rx.color("gray", 9)),
130
+ cursor="pointer",
131
+ padding="8px",
132
+ border_radius="8px",
133
+ _hover={"background": rx.color("gray", 3)},
134
+ ),
135
+ id="composer_file_upload",
136
+ accept={
137
+ # "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
138
+ # ".xlsx"
139
+ # ],
140
+ # "text/csv": [".csv"],
141
+ # "application/vnd.openxmlformats-officedocument."
142
+ # "wordprocessingml.document": [".docx"],
143
+ # "application/vnd.openxmlformats-officedocument."
144
+ # "presentationml.presentation": [".pptx"],
145
+ # "text/markdown": [".md"],
146
+ "application/pdf": [".pdf"],
147
+ "image/png": [".png"],
148
+ "image/jpeg": [".jpg", ".jpeg"],
149
+ },
150
+ multiple=True,
151
+ max_files=5,
152
+ max_size=5 * 1024 * 1024,
153
+ on_drop=ThreadState.handle_upload(
154
+ rx.upload_files(upload_id="composer_file_upload")
155
+ ),
156
+ ),
157
+ content="Dateien hochladen (max. 5, 5MB pro Datei)",
158
+ ),
159
+ rx.fragment(),
160
+ )
161
+
162
+
163
+ def add_attachment(show: bool = False) -> rx.Component:
164
+ """Legacy attachment function - now wraps file_upload."""
165
+ return file_upload(show=show)
166
+
167
+
83
168
  def choose_model(show: bool = False) -> rx.Component | None:
84
169
  if not show:
85
170
  return None
@@ -146,7 +231,9 @@ class ComposerComponent(rx.ComponentNamespace):
146
231
  add_attachment = staticmethod(add_attachment)
147
232
  choose_model = staticmethod(choose_model)
148
233
  clear = staticmethod(clear)
234
+ file_upload = staticmethod(file_upload)
149
235
  input = staticmethod(composer_input)
236
+ selected_files_row = staticmethod(selected_files_row)
150
237
  submit = staticmethod(submit)
151
238
  tools = staticmethod(tools)
152
239