appkit-assistant 0.17.0__tar.gz → 0.17.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/PKG-INFO +1 -1
  2. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/pyproject.toml +1 -1
  3. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/models.py +3 -0
  4. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processor.py +3 -0
  5. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/claude_responses_processor.py +5 -0
  6. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/gemini_responses_processor.py +20 -3
  7. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/lorem_ipsum_processor.py +9 -0
  8. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/openai_chat_completion_processor.py +33 -8
  9. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/openai_responses_processor.py +5 -0
  10. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/perplexity_processor.py +5 -0
  11. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/response_accumulator.py +8 -0
  12. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/composer.py +14 -2
  13. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/message.py +185 -50
  14. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/state/thread_state.py +154 -0
  15. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/.gitignore +0 -0
  16. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/README.md +0 -0
  17. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/docs/assistant.png +0 -0
  18. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/file_manager.py +0 -0
  19. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/mcp_auth_service.py +0 -0
  20. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/model_manager.py +0 -0
  21. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/claude_base.py +0 -0
  22. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/gemini_base.py +0 -0
  23. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/processors/openai_base.py +0 -0
  24. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/repositories.py +0 -0
  25. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/services/thread_service.py +0 -0
  26. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/backend/system_prompt_cache.py +0 -0
  27. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/__init__.py +0 -0
  28. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/composer_key_handler.py +0 -0
  29. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/mcp_oauth.py +0 -0
  30. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/mcp_server_dialogs.py +0 -0
  31. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/mcp_server_table.py +0 -0
  32. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/system_prompt_editor.py +0 -0
  33. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/thread.py +0 -0
  34. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/threadlist.py +0 -0
  35. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/components/tools_modal.py +0 -0
  36. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/configuration.py +0 -0
  37. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/pages.py +0 -0
  38. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/state/mcp_oauth_state.py +0 -0
  39. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/state/mcp_server_state.py +0 -0
  40. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/state/system_prompt_state.py +0 -0
  41. {appkit_assistant-0.17.0 → appkit_assistant-0.17.2}/src/appkit_assistant/state/thread_list_state.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: appkit-assistant
3
- Version: 0.17.0
3
+ Version: 0.17.2
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/jenreh/appkit
6
6
  Project-URL: Documentation, https://github.com/jenreh/appkit/tree/main/docs
@@ -10,7 +10,7 @@ dependencies = [
10
10
  "reflex>=0.8.22",
11
11
  ]
12
12
  name = "appkit-assistant"
13
- version = "0.17.0"
13
+ version = "0.17.2"
14
14
  description = "Add your description here"
15
15
  readme = "README.md"
16
16
  authors = [{ name = "Jens Rehpöhler" }]
@@ -1,4 +1,5 @@
1
1
  import json
2
+ import uuid
2
3
  from datetime import UTC, datetime
3
4
  from enum import StrEnum
4
5
  from typing import Any
@@ -82,7 +83,9 @@ class MessageType(StrEnum):
82
83
 
83
84
 
84
85
  class Message(BaseModel):
86
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
85
87
  text: str
88
+ original_text: str | None = None # To store original text if edited
86
89
  editable: bool = False
87
90
  type: MessageType
88
91
  done: bool = False
@@ -3,6 +3,7 @@ Base processor interface for AI processing services.
3
3
  """
4
4
 
5
5
  import abc
6
+ import asyncio
6
7
  import logging
7
8
  from collections.abc import AsyncGenerator
8
9
 
@@ -40,6 +41,7 @@ class Processor(abc.ABC):
40
41
  model_id: str,
41
42
  files: list[str] | None = None,
42
43
  mcp_servers: list[MCPServer] | None = None,
44
+ cancellation_token: asyncio.Event | None = None,
43
45
  ) -> AsyncGenerator[Chunk, None]:
44
46
  """
45
47
  Process the thread using an AI model.
@@ -49,6 +51,7 @@ class Processor(abc.ABC):
49
51
  model_id: The ID of the model to use.
50
52
  files: Optional list of file paths that were uploaded.
51
53
  mcp_servers: Optional list of MCP servers to use as tools.
54
+ cancellation_token: Optional event to signal cancellation.
52
55
 
53
56
  Returns:
54
57
  An async generator that yields Chunk objects containing different content
@@ -5,6 +5,7 @@ Supports MCP tools, file uploads (images and documents), extended thinking,
5
5
  and automatic citation extraction.
6
6
  """
7
7
 
8
+ import asyncio
8
9
  import base64
9
10
  import json
10
11
  import logging
@@ -69,6 +70,7 @@ class ClaudeResponsesProcessor(BaseClaudeProcessor):
69
70
  mcp_servers: list[MCPServer] | None = None,
70
71
  payload: dict[str, Any] | None = None,
71
72
  user_id: int | None = None,
73
+ cancellation_token: asyncio.Event | None = None,
72
74
  ) -> AsyncGenerator[Chunk, None]:
73
75
  """Process messages using Claude Messages API with streaming."""
74
76
  if not self.client:
@@ -103,6 +105,9 @@ class ClaudeResponsesProcessor(BaseClaudeProcessor):
103
105
  # Process streaming events
104
106
  async with stream as response:
105
107
  async for event in response:
108
+ if cancellation_token and cancellation_token.is_set():
109
+ logger.info("Processing cancelled by user")
110
+ break
106
111
  chunk = self._handle_event(event)
107
112
  if chunk:
108
113
  yield chunk
@@ -2,6 +2,7 @@
2
2
  Gemini responses processor for generating AI responses using Google's GenAI API.
3
3
  """
4
4
 
5
+ import asyncio
5
6
  import copy
6
7
  import json
7
8
  import logging
@@ -72,6 +73,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
72
73
  mcp_servers: list[MCPServer] | None = None,
73
74
  payload: dict[str, Any] | None = None,
74
75
  user_id: int | None = None,
76
+ cancellation_token: asyncio.Event | None = None,
75
77
  ) -> AsyncGenerator[Chunk, None]:
76
78
  """Process messages using Google GenAI API with native MCP support."""
77
79
  if not self.client:
@@ -120,7 +122,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
120
122
  try:
121
123
  # Generate content with MCP tools
122
124
  async for chunk in self._stream_with_mcp(
123
- model.model, contents, config, mcp_sessions
125
+ model.model, contents, config, mcp_sessions, cancellation_token
124
126
  ):
125
127
  yield chunk
126
128
 
@@ -193,11 +195,14 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
193
195
  contents: list[types.Content],
194
196
  config: types.GenerateContentConfig,
195
197
  mcp_sessions: list[Any],
198
+ cancellation_token: asyncio.Event | None = None,
196
199
  ) -> AsyncGenerator[Chunk, None]:
197
200
  """Stream responses with MCP tool support."""
198
201
  if not mcp_sessions:
199
202
  # No MCP sessions, direct streaming
200
- async for chunk in self._stream_generation(model_name, contents, config):
203
+ async for chunk in self._stream_generation(
204
+ model_name, contents, config, cancellation_token
205
+ ):
201
206
  yield chunk
202
207
  return
203
208
 
@@ -225,7 +230,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
225
230
 
226
231
  # Stream with automatic function calling loop
227
232
  async for chunk in self._stream_with_tool_loop(
228
- model_name, contents, config, tool_contexts
233
+ model_name, contents, config, tool_contexts, cancellation_token
229
234
  ):
230
235
  yield chunk
231
236
 
@@ -235,12 +240,17 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
235
240
  contents: list[types.Content],
236
241
  config: types.GenerateContentConfig,
237
242
  tool_contexts: list[MCPToolContext],
243
+ cancellation_token: asyncio.Event | None = None,
238
244
  ) -> AsyncGenerator[Chunk, None]:
239
245
  """Stream generation with tool call handling loop."""
240
246
  max_tool_rounds = 10
241
247
  current_contents = list(contents)
242
248
 
243
249
  for _round_num in range(max_tool_rounds):
250
+ if cancellation_token and cancellation_token.is_set():
251
+ logger.info("Processing cancelled by user")
252
+ break
253
+
244
254
  response = await self.client.aio.models.generate_content(
245
255
  model=model_name, contents=current_contents, config=config
246
256
  )
@@ -560,6 +570,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
560
570
  model_name: str,
561
571
  contents: list[types.Content],
562
572
  config: types.GenerateContentConfig,
573
+ cancellation_token: asyncio.Event | None = None,
563
574
  ) -> AsyncGenerator[Chunk, None]:
564
575
  """Stream generation from Gemini model."""
565
576
  # generate_content_stream returns an awaitable that yields an async generator
@@ -567,6 +578,9 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
567
578
  model=model_name, contents=contents, config=config
568
579
  )
569
580
  async for chunk in stream:
581
+ if cancellation_token and cancellation_token.is_set():
582
+ logger.info("Processing cancelled by user")
583
+ break
570
584
  processed = self._handle_chunk(chunk)
571
585
  if processed:
572
586
  yield processed
@@ -643,6 +657,9 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
643
657
  content = candidate.content
644
658
 
645
659
  # List comprehension for text parts
660
+ if not content.parts:
661
+ return None
662
+
646
663
  text_parts = [part.text for part in content.parts if part.text]
647
664
 
648
665
  if text_parts:
@@ -6,6 +6,7 @@ import asyncio
6
6
  import logging
7
7
  import random
8
8
  from collections.abc import AsyncGenerator
9
+ from typing import Any
9
10
 
10
11
  from appkit_assistant.backend.models import (
11
12
  AIModel,
@@ -58,6 +59,8 @@ class LoremIpsumProcessor(Processor):
58
59
  model_id: str,
59
60
  files: list[str] | None = None, # noqa: ARG002
60
61
  mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
62
+ cancellation_token: asyncio.Event | None = None,
63
+ **kwargs: Any, # noqa: ARG002
61
64
  ) -> AsyncGenerator[Chunk, None]:
62
65
  """
63
66
  Generate a Lorem Ipsum response of varying lengths based on the model_id.
@@ -67,6 +70,8 @@ class LoremIpsumProcessor(Processor):
67
70
  model_id: The model ID (determines response length).
68
71
  files: Optional list of files (ignored for this processor).
69
72
  mcp_servers: Optional list of MCP servers (ignored for this processor).
73
+ cancellation_token: Optional event to signal cancellation.
74
+ **kwargs: Additional arguments.
70
75
 
71
76
  Returns:
72
77
  An async generator that yields Chunk objects with text content.
@@ -84,9 +89,13 @@ class LoremIpsumProcessor(Processor):
84
89
 
85
90
  num_paragraphs = random.randint(4, 8) # noqa: S311
86
91
  for i in range(num_paragraphs):
92
+ if cancellation_token and cancellation_token.is_set():
93
+ break
87
94
  paragraph = random.choice(LOREM_PARAGRAPHS) # noqa: S311
88
95
  words = paragraph.split()
89
96
  for word in words:
97
+ if cancellation_token and cancellation_token.is_set():
98
+ break
90
99
  content = word + " "
91
100
  await asyncio.sleep(0.01)
92
101
  yield Chunk(
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import logging
2
3
  from collections.abc import AsyncGenerator
3
4
  from typing import Any
@@ -27,6 +28,8 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
27
28
  files: list[str] | None = None, # noqa: ARG002
28
29
  mcp_servers: list[MCPServer] | None = None,
29
30
  payload: dict[str, Any] | None = None,
31
+ cancellation_token: asyncio.Event | None = None,
32
+ **kwargs: Any, # noqa: ARG002
30
33
  ) -> AsyncGenerator[Chunk, None]:
31
34
  """Process messages using the Chat Completions API.
32
35
 
@@ -36,6 +39,8 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
36
39
  files: File attachments (not used in chat completions)
37
40
  mcp_servers: MCP servers (will log warning if provided)
38
41
  payload: Additional payload parameters
42
+ cancellation_token: Optional event to signal cancellation
43
+ **kwargs: Additional arguments
39
44
  """
40
45
  if not self.client:
41
46
  raise ValueError("OpenAI Client not initialized.")
@@ -63,26 +68,46 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
63
68
 
64
69
  if isinstance(session, AsyncStream):
65
70
  async for event in session:
71
+ if cancellation_token and cancellation_token.is_set():
72
+ logger.info("Processing cancelled by user")
73
+ break
66
74
  if event.choices and event.choices[0].delta:
67
75
  content = event.choices[0].delta.content
68
76
  if content:
69
- yield self._create_chunk(content, model.model, stream=True)
77
+ yield self._create_chunk(
78
+ content,
79
+ model.model,
80
+ stream=True,
81
+ message_id=event.id,
82
+ )
70
83
  else:
71
84
  content = session.choices[0].message.content
72
85
  if content:
73
- yield self._create_chunk(content, model.model)
86
+ yield self._create_chunk(
87
+ content, model.model, message_id=session.id
88
+ )
74
89
  except Exception as e:
75
90
  raise e
76
91
 
77
- def _create_chunk(self, content: str, model: str, stream: bool = False) -> Chunk:
92
+ def _create_chunk(
93
+ self,
94
+ content: str,
95
+ model: str,
96
+ stream: bool = False,
97
+ message_id: str | None = None,
98
+ ) -> Chunk:
99
+ metadata = {
100
+ "source": "chat_completions",
101
+ "streaming": str(stream),
102
+ "model": model,
103
+ }
104
+ if message_id:
105
+ metadata["message_id"] = message_id
106
+
78
107
  return Chunk(
79
108
  type=ChunkType.TEXT,
80
109
  text=content,
81
- chunk_metadata={
82
- "source": "chat_completions",
83
- "streaming": str(stream),
84
- "model": model,
85
- },
110
+ chunk_metadata=metadata,
86
111
  )
87
112
 
88
113
  def _convert_messages_to_openai_format(
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import json
2
3
  import logging
3
4
  from collections.abc import AsyncGenerator
@@ -52,6 +53,7 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
52
53
  mcp_servers: list[MCPServer] | None = None,
53
54
  payload: dict[str, Any] | None = None,
54
55
  user_id: int | None = None,
56
+ cancellation_token: asyncio.Event | None = None,
55
57
  ) -> AsyncGenerator[Chunk, None]:
56
58
  """Process messages using simplified content accumulator pattern."""
57
59
  if not self.client:
@@ -73,6 +75,9 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
73
75
  try:
74
76
  if hasattr(session, "__aiter__"): # Streaming
75
77
  async for event in session:
78
+ if cancellation_token and cancellation_token.is_set():
79
+ logger.info("Processing cancelled by user")
80
+ break
76
81
  chunk = self._handle_event(event)
77
82
  if chunk:
78
83
  yield chunk
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import enum
2
3
  import logging
3
4
  import os
@@ -90,6 +91,8 @@ class PerplexityProcessor(OpenAIChatCompletionsProcessor):
90
91
  files: list[str] | None = None,
91
92
  mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
92
93
  payload: dict[str, Any] | None = None,
94
+ cancellation_token: asyncio.Event | None = None,
95
+ **kwargs: Any,
93
96
  ) -> AsyncGenerator[Chunk, None]:
94
97
  if model_id not in self.models:
95
98
  logger.error("Model %s not supported by Perplexity processor", model_id)
@@ -117,5 +120,7 @@ class PerplexityProcessor(OpenAIChatCompletionsProcessor):
117
120
  files=files,
118
121
  mcp_servers=None,
119
122
  payload=perplexity_payload,
123
+ cancellation_token=cancellation_token,
124
+ **kwargs,
120
125
  ):
121
126
  yield response
@@ -52,6 +52,14 @@ class ResponseAccumulator:
52
52
 
53
53
  def process_chunk(self, chunk: Chunk) -> None:
54
54
  """Process a single chunk and update internal state."""
55
+ # Update message ID if provided in metadata
56
+ if (
57
+ self.messages
58
+ and self.messages[-1].type == MessageType.ASSISTANT
59
+ and "message_id" in chunk.chunk_metadata
60
+ ):
61
+ self.messages[-1].id = chunk.chunk_metadata["message_id"]
62
+
55
63
  if chunk.type == ChunkType.TEXT:
56
64
  if self.messages and self.messages[-1].type == MessageType.ASSISTANT:
57
65
  self.messages[-1].text += chunk.text
@@ -53,13 +53,25 @@ def composer_input(placeholder: str = "Frage etwas...") -> rx.Component:
53
53
 
54
54
 
55
55
  def submit() -> rx.Component:
56
- return rx.fragment(
56
+ return rx.cond(
57
+ ThreadState.processing,
58
+ rx.tooltip(
59
+ rx.button(
60
+ rx.icon("square", size=14, fill="currentColor"),
61
+ on_click=ThreadState.request_cancellation,
62
+ color_scheme="red",
63
+ variant="solid",
64
+ type="button",
65
+ cursor="pointer",
66
+ loading=ThreadState.cancellation_requested,
67
+ ),
68
+ content="Stoppen",
69
+ ),
57
70
  rx.button(
58
71
  rx.icon("arrow-right", size=18),
59
72
  id="composer-submit",
60
73
  name="composer_submit",
61
74
  type="submit",
62
- loading=ThreadState.processing,
63
75
  ),
64
76
  )
65
77
 
@@ -12,6 +12,7 @@ from appkit_assistant.state.thread_state import (
12
12
  ThreadState,
13
13
  )
14
14
  from appkit_ui.components.collabsible import collabsible
15
+ from appkit_ui.components.dialogs import delete_dialog
15
16
 
16
17
  message_styles = {
17
18
  "spacing": "4",
@@ -85,6 +86,70 @@ class AuthCardComponent:
85
86
  )
86
87
 
87
88
 
89
+ class MessageActionsBar:
90
+ """Component for message action buttons (copy, download, retry)."""
91
+
92
+ @staticmethod
93
+ def render(message: Message) -> rx.Component:
94
+ return rx.hstack(
95
+ rx.tooltip(
96
+ rx.icon_button(
97
+ rx.icon("copy", size=14),
98
+ on_click=ThreadState.copy_message(message.text),
99
+ variant="ghost",
100
+ size="1",
101
+ color_scheme="gray",
102
+ ),
103
+ content="Kopieren",
104
+ ),
105
+ rx.tooltip(
106
+ rx.icon_button(
107
+ rx.icon("download", size=14),
108
+ on_click=ThreadState.download_message(message.text, message.id),
109
+ variant="ghost",
110
+ size="1",
111
+ color_scheme="gray",
112
+ ),
113
+ content="Herunterladen",
114
+ ),
115
+ rx.tooltip(
116
+ delete_dialog(
117
+ title="Nachricht löschen",
118
+ content="diese Nachricht",
119
+ on_click=ThreadState.delete_message(message.id),
120
+ icon_button=True,
121
+ variant="ghost",
122
+ size="1",
123
+ color_scheme="gray",
124
+ ),
125
+ content="Löschen",
126
+ ),
127
+ rx.cond(
128
+ (message.type == MessageType.ASSISTANT)
129
+ | (message.type == MessageType.ERROR),
130
+ rx.tooltip(
131
+ rx.icon_button(
132
+ rx.cond(
133
+ ThreadState.processing,
134
+ rx.spinner(size="1"),
135
+ rx.icon("refresh-cw", size=14),
136
+ ),
137
+ on_click=ThreadState.retry_message(message.id),
138
+ variant="ghost",
139
+ size="1",
140
+ color_scheme="gray",
141
+ disabled=ThreadState.processing,
142
+ ),
143
+ content="Erneut generieren (folgende Nachrichten werden entfernt)",
144
+ ),
145
+ rx.fragment(),
146
+ ),
147
+ spacing="3",
148
+ margin_top="-9px",
149
+ margin_left="9px",
150
+ )
151
+
152
+
88
153
  class MessageComponent:
89
154
  @staticmethod
90
155
  def _file_badge(filename: str) -> rx.Component:
@@ -116,31 +181,101 @@ class MessageComponent:
116
181
 
117
182
  @staticmethod
118
183
  def human_message(message: Message) -> rx.Component:
119
- return rx.hstack(
120
- rx.spacer(),
184
+ return rx.cond(
185
+ ThreadState.editing_message_id == message.id,
186
+ # Edit Mode
121
187
  rx.vstack(
122
- rx.box(
123
- rx.text(
124
- message.text,
125
- padding="0.5em",
126
- border_radius="10px",
127
- white_space="pre-line",
188
+ rx.text_area(
189
+ value=ThreadState.edited_message_content,
190
+ on_change=ThreadState.set_edited_message_content,
191
+ height="112px",
192
+ width="824px",
193
+ auto_focus=True,
194
+ bg=rx.color("gray", 3),
195
+ variant="soft",
196
+ ),
197
+ rx.hstack(
198
+ rx.button(
199
+ "Abbrechen",
200
+ on_click=ThreadState.cancel_edit,
201
+ variant="soft",
202
+ color_scheme="gray",
128
203
  ),
129
- padding="4px",
130
- max_width="100%",
131
- background_color=rx.color_mode_cond(
132
- light=rx.color("accent", 3),
133
- dark=rx.color("accent", 3),
204
+ rx.button("Senden", on_click=ThreadState.submit_edited_message),
205
+ justify="end",
206
+ width="100%",
207
+ spacing="2",
208
+ ),
209
+ style=message_styles,
210
+ align="end",
211
+ ),
212
+ rx.vstack(
213
+ rx.hstack(
214
+ rx.spacer(),
215
+ rx.vstack(
216
+ rx.box(
217
+ rx.text(
218
+ message.text,
219
+ padding="0.5em",
220
+ border_radius="10px",
221
+ white_space="pre-line",
222
+ ),
223
+ padding="4px",
224
+ max_width="800px",
225
+ background_color=rx.color_mode_cond(
226
+ light=rx.color("accent", 3),
227
+ dark=rx.color("accent", 3),
228
+ ),
229
+ border_radius="9px",
230
+ ),
231
+ MessageComponent._attachments_row(message.attachments),
232
+ align="end",
233
+ spacing="1",
134
234
  ),
135
- border_radius="9px",
136
235
  ),
137
- MessageComponent._attachments_row(message.attachments),
236
+ rx.hstack(
237
+ rx.spacer(),
238
+ rx.tooltip(
239
+ rx.icon_button(
240
+ rx.icon("pencil", size=14),
241
+ on_click=ThreadState.set_editing_mode(
242
+ message.id, message.text
243
+ ),
244
+ variant="ghost",
245
+ size="1",
246
+ color_scheme="gray",
247
+ ),
248
+ content="Bearbeiten",
249
+ ),
250
+ rx.tooltip(
251
+ delete_dialog(
252
+ title="Nachricht löschen",
253
+ content="diese Nachricht",
254
+ on_click=ThreadState.delete_message(message.id),
255
+ icon_button=True,
256
+ variant="ghost",
257
+ size="1",
258
+ color_scheme="gray",
259
+ ),
260
+ content="Löschen",
261
+ ),
262
+ rx.tooltip(
263
+ rx.icon_button(
264
+ rx.icon("copy", size=14),
265
+ on_click=ThreadState.copy_message(message.text),
266
+ variant="ghost",
267
+ size="1",
268
+ color_scheme="gray",
269
+ ),
270
+ content="Kopieren",
271
+ ),
272
+ spacing="3",
273
+ justify="end",
274
+ margin_right="9px",
275
+ ),
138
276
  align="end",
139
- spacing="1",
277
+ style=message_styles,
140
278
  ),
141
- max_width="80%",
142
- margin_top="24px",
143
- style=message_styles,
144
279
  )
145
280
 
146
281
  @staticmethod
@@ -190,12 +325,6 @@ class MessageComponent:
190
325
  color=rx.color("gray", 8),
191
326
  margin_right="9px",
192
327
  ),
193
- rx.hstack(
194
- rx.el.span(""),
195
- rx.el.span(""),
196
- rx.el.span(""),
197
- rx.el.span(""),
198
- ),
199
328
  class_name="loading",
200
329
  height="40px",
201
330
  color=rx.color("gray", 8),
@@ -206,23 +335,24 @@ class MessageComponent:
206
335
  padding_right="18px",
207
336
  ),
208
337
  # Actual message content
209
- mn.markdown_preview(
210
- source=message.text,
211
- enable_mermaid=message.done,
212
- enable_katex=message.done,
213
- security_level="standard",
338
+ rx.box(
339
+ mn.markdown_preview(
340
+ source=message.text,
341
+ enable_mermaid=message.done,
342
+ enable_katex=message.done,
343
+ security_level="standard",
344
+ class_name="markdown",
345
+ ),
214
346
  padding="0.5em",
215
- border_radius="9px",
347
+ margin_top="18px",
216
348
  max_width="90%",
217
- class_name="markdown",
218
349
  ),
219
- # rx.markdown(
220
- # message.text,
221
- # padding="0.5em",
222
- # border_radius="9px",
223
- # max_width="90%",
224
- # class_name="markdown",
225
- # ),
350
+ ),
351
+ # Actions bar
352
+ rx.cond(
353
+ message.done,
354
+ MessageActionsBar.render(message),
355
+ rx.fragment(),
226
356
  ),
227
357
  spacing="3",
228
358
  width="100%",
@@ -263,7 +393,7 @@ class MessageComponent:
263
393
  )
264
394
 
265
395
  @staticmethod
266
- def error_message(message: str) -> rx.Component:
396
+ def error_message(message: Message) -> rx.Component:
267
397
  return rx.hstack(
268
398
  rx.avatar(
269
399
  fallback="!",
@@ -273,15 +403,20 @@ class MessageComponent:
273
403
  margin_top="16px",
274
404
  color_scheme="red",
275
405
  ),
276
- rx.callout(
277
- message,
278
- icon="triangle-alert",
279
- color_scheme="red",
280
- max_width="90%",
281
- size="1",
282
- padding="0.5em",
283
- border_radius="9px",
284
- margin_top="18px",
406
+ rx.vstack(
407
+ rx.callout(
408
+ message.text,
409
+ icon="triangle-alert",
410
+ color_scheme="red",
411
+ max_width="100%",
412
+ size="1",
413
+ padding="0.5em",
414
+ border_radius="9px",
415
+ margin_top="18px",
416
+ ),
417
+ MessageActionsBar.render(message),
418
+ width="90%",
419
+ spacing="2",
285
420
  ),
286
421
  style=message_styles,
287
422
  )
@@ -328,7 +463,7 @@ class MessageComponent:
328
463
  ),
329
464
  (
330
465
  MessageType.ERROR,
331
- MessageComponent.error_message(message.text),
466
+ MessageComponent.error_message(message),
332
467
  ),
333
468
  (
334
469
  MessageType.SYSTEM,
@@ -10,10 +10,12 @@ This module contains ThreadState which manages the current active thread:
10
10
  See thread_list_state.py for ThreadListState which manages the thread list sidebar.
11
11
  """
12
12
 
13
+ import asyncio
13
14
  import json
14
15
  import logging
15
16
  import uuid
16
17
  from collections.abc import AsyncGenerator
18
+ from datetime import UTC, datetime
17
19
  from typing import Any
18
20
 
19
21
  import reflex as rx
@@ -60,6 +62,7 @@ class ThreadState(rx.State):
60
62
  ai_models: list[AIModel] = []
61
63
  selected_model: str = ""
62
64
  processing: bool = False
65
+ cancellation_requested: bool = False
63
66
  messages: list[Message] = []
64
67
  prompt: str = ""
65
68
  suggestions: list[Suggestion] = []
@@ -74,6 +77,10 @@ class ThreadState(rx.State):
74
77
  # File upload state
75
78
  uploaded_files: list[UploadedFile] = []
76
79
 
80
+ # Editing state
81
+ editing_message_id: str | None = None
82
+ edited_message_content: str = ""
83
+
77
84
  # Internal logic helper (not reactive)
78
85
  @property
79
86
  def _thread_service(self) -> ThreadService:
@@ -103,6 +110,8 @@ class ThreadState(rx.State):
103
110
  _current_user_id: str = ""
104
111
  _skip_user_message: bool = False # Skip adding user message (for OAuth resend)
105
112
  _pending_file_cleanup: list[str] = [] # Files to delete after processing
113
+ # Internal cancellation event
114
+ _cancel_event: asyncio.Event | None = None
106
115
 
107
116
  # -------------------------------------------------------------------------
108
117
  # Computed properties
@@ -510,6 +519,67 @@ class ThreadState(rx.State):
510
519
  # Message processing
511
520
  # -------------------------------------------------------------------------
512
521
 
522
+ @rx.event
523
+ def set_editing_mode(self, message_id: str, content: str) -> None:
524
+ """Enable editing mode for a message."""
525
+ self.editing_message_id = message_id
526
+ self.edited_message_content = content
527
+
528
+ @rx.event
529
+ def set_edited_message_content(self, content: str) -> None:
530
+ """Set the content of the message currently being edited."""
531
+ self.edited_message_content = content
532
+
533
+ @rx.event
534
+ def cancel_edit(self) -> None:
535
+ """Cancel editing mode."""
536
+ self.editing_message_id = None
537
+ self.edited_message_content = ""
538
+
539
+ @rx.event(background=True)
540
+ async def submit_edited_message(self) -> AsyncGenerator[Any, Any]:
541
+ """Submit edited message."""
542
+ async with self:
543
+ content = self.edited_message_content.strip()
544
+ if len(content) < 1:
545
+ yield rx.toast.error(
546
+ "Nachricht darf nicht leer sein", position="top-right"
547
+ )
548
+ return
549
+
550
+ # Find message index
551
+ msg_index = -1
552
+ for i, m in enumerate(self.messages):
553
+ if m.id == self.editing_message_id:
554
+ msg_index = i
555
+ break
556
+
557
+ if msg_index == -1:
558
+ self.cancel_edit()
559
+ return
560
+
561
+ target_message = self.messages[msg_index]
562
+
563
+ # Update message
564
+ target_message.original_text = (
565
+ target_message.original_text or target_message.text
566
+ )
567
+ target_message.text = content
568
+
569
+ # Remove all messages AFTER this one
570
+ self.messages = self.messages[: msg_index + 1]
571
+
572
+ # Set prompt to bypass empty check in _begin_message_processing
573
+ self.prompt = content
574
+ self._skip_user_message = True
575
+
576
+ # Clear edit state
577
+ self.editing_message_id = None
578
+ self.edited_message_content = ""
579
+
580
+ # Trigger processing
581
+ await self._process_message()
582
+
513
583
  @rx.event(background=True)
514
584
  async def submit_message(self) -> AsyncGenerator[Any, Any]:
515
585
  """Submit a message and process the response."""
@@ -524,6 +594,84 @@ class ThreadState(rx.State):
524
594
  }
525
595
  """)
526
596
 
597
+ @rx.event(background=True)
598
+ async def delete_message(self, message_id: str) -> None:
599
+ """Delete a message from the conversation."""
600
+ async with self:
601
+ self.messages = [m for m in self.messages if m.id != message_id]
602
+ self._thread.messages = self.messages
603
+
604
+ if self._thread.state != ThreadStatus.NEW:
605
+ await self._thread_service.save_thread(
606
+ self._thread, self.current_user_id
607
+ )
608
+
609
+ @rx.event
610
+ def copy_message(self, text: str) -> list[Any]:
611
+ """Copy message text to clipboard."""
612
+ return [
613
+ rx.set_clipboard(text),
614
+ rx.toast.success("Nachricht kopiert"),
615
+ ]
616
+
617
+ @rx.event
618
+ def download_message(self, text: str, message_id: str) -> Any:
619
+ """Download message as markdown file."""
620
+ timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
621
+ filename = (
622
+ f"message_{message_id}_{timestamp}.md"
623
+ if message_id
624
+ else f"message_{timestamp}.md"
625
+ )
626
+
627
+ # Use JavaScript to trigger download
628
+ return rx.call_script(f"""
629
+ const blob = new Blob([{json.dumps(text)}], {{type: 'text/markdown'}});
630
+ const url = URL.createObjectURL(blob);
631
+ const a = document.createElement('a');
632
+ a.href = url;
633
+ a.download = '{filename}';
634
+ document.body.appendChild(a);
635
+ a.click();
636
+ document.body.removeChild(a);
637
+ URL.revokeObjectURL(url);
638
+ """)
639
+
640
+ @rx.event(background=True)
641
+ async def retry_message(self, message_id: str) -> None:
642
+ """Retry generating a message."""
643
+ async with self:
644
+ # Find message index
645
+ index = -1
646
+ for i, msg in enumerate(self.messages):
647
+ if msg.id == message_id:
648
+ index = i
649
+ break
650
+
651
+ if index == -1:
652
+ return
653
+
654
+ # Keep context up to this message
655
+ # effectively removing this message and everything after
656
+ self.messages = self.messages[:index]
657
+
658
+ # Set prompt to bypass check (content checks)
659
+ self.prompt = "Regenerate"
660
+
661
+ # Flag to skip adding a new user message
662
+ self._skip_user_message = True
663
+
664
+ # Trigger processing directly
665
+ await self._process_message()
666
+
667
+ @rx.event
668
+ def request_cancellation(self) -> None:
669
+ """Signal that the current processing should be cancelled."""
670
+ self.cancellation_requested = True
671
+ if self._cancel_event:
672
+ self._cancel_event.set()
673
+ logger.info("Cancellation requested by user")
674
+
527
675
  async def _process_message(self) -> None:
528
676
  """Process the current message and stream the response."""
529
677
  logger.debug("Processing message: %s", self.prompt)
@@ -552,6 +700,9 @@ class ThreadState(rx.State):
552
700
  self.uploaded_files = []
553
701
  self._pending_file_cleanup = file_paths
554
702
 
703
+ # Initialize cancellation event
704
+ self._cancel_event = asyncio.Event()
705
+
555
706
  first_response_received = False
556
707
  try:
557
708
  async for chunk in processor.process(
@@ -560,6 +711,7 @@ class ThreadState(rx.State):
560
711
  files=file_paths or None,
561
712
  mcp_servers=mcp_servers,
562
713
  user_id=user_id,
714
+ cancellation_token=self._cancel_event,
563
715
  ):
564
716
  first_response_received = await self._handle_stream_chunk(
565
717
  chunk=chunk,
@@ -741,7 +893,9 @@ class ThreadState(rx.State):
741
893
  if self.messages:
742
894
  self.messages[-1].done = True
743
895
  self.processing = False
896
+ self.cancellation_requested = False
744
897
  self.current_activity = ""
898
+ self._cancel_event = None
745
899
 
746
900
  # Clean up uploaded files from disk
747
901
  if self._pending_file_cleanup: