appkit-assistant 0.17.1__tar.gz → 0.17.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/PKG-INFO +1 -1
  2. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/pyproject.toml +1 -1
  3. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processor.py +3 -0
  4. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/claude_responses_processor.py +5 -0
  5. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/gemini_responses_processor.py +17 -3
  6. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/lorem_ipsum_processor.py +9 -0
  7. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/openai_chat_completion_processor.py +8 -0
  8. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/openai_responses_processor.py +5 -0
  9. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/perplexity_processor.py +5 -0
  10. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/composer.py +14 -2
  11. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/state/thread_state.py +18 -0
  12. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/.gitignore +0 -0
  13. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/README.md +0 -0
  14. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/docs/assistant.png +0 -0
  15. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/file_manager.py +0 -0
  16. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/mcp_auth_service.py +0 -0
  17. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/model_manager.py +0 -0
  18. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/models.py +0 -0
  19. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/claude_base.py +0 -0
  20. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/gemini_base.py +0 -0
  21. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/processors/openai_base.py +0 -0
  22. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/repositories.py +0 -0
  23. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/response_accumulator.py +0 -0
  24. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/services/thread_service.py +0 -0
  25. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/backend/system_prompt_cache.py +0 -0
  26. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/__init__.py +0 -0
  27. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/composer_key_handler.py +0 -0
  28. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/mcp_oauth.py +0 -0
  29. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/mcp_server_dialogs.py +0 -0
  30. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/mcp_server_table.py +0 -0
  31. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/message.py +0 -0
  32. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/system_prompt_editor.py +0 -0
  33. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/thread.py +0 -0
  34. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/threadlist.py +0 -0
  35. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/components/tools_modal.py +0 -0
  36. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/configuration.py +0 -0
  37. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/pages.py +0 -0
  38. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/state/mcp_oauth_state.py +0 -0
  39. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/state/mcp_server_state.py +0 -0
  40. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/state/system_prompt_state.py +0 -0
  41. {appkit_assistant-0.17.1 → appkit_assistant-0.17.3}/src/appkit_assistant/state/thread_list_state.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: appkit-assistant
3
- Version: 0.17.1
3
+ Version: 0.17.3
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/jenreh/appkit
6
6
  Project-URL: Documentation, https://github.com/jenreh/appkit/tree/main/docs
@@ -10,7 +10,7 @@ dependencies = [
10
10
  "reflex>=0.8.22",
11
11
  ]
12
12
  name = "appkit-assistant"
13
- version = "0.17.1"
13
+ version = "0.17.3"
14
14
  description = "Add your description here"
15
15
  readme = "README.md"
16
16
  authors = [{ name = "Jens Rehpöhler" }]
@@ -3,6 +3,7 @@ Base processor interface for AI processing services.
3
3
  """
4
4
 
5
5
  import abc
6
+ import asyncio
6
7
  import logging
7
8
  from collections.abc import AsyncGenerator
8
9
 
@@ -40,6 +41,7 @@ class Processor(abc.ABC):
40
41
  model_id: str,
41
42
  files: list[str] | None = None,
42
43
  mcp_servers: list[MCPServer] | None = None,
44
+ cancellation_token: asyncio.Event | None = None,
43
45
  ) -> AsyncGenerator[Chunk, None]:
44
46
  """
45
47
  Process the thread using an AI model.
@@ -49,6 +51,7 @@ class Processor(abc.ABC):
49
51
  model_id: The ID of the model to use.
50
52
  files: Optional list of file paths that were uploaded.
51
53
  mcp_servers: Optional list of MCP servers to use as tools.
54
+ cancellation_token: Optional event to signal cancellation.
52
55
 
53
56
  Returns:
54
57
  An async generator that yields Chunk objects containing different content
@@ -5,6 +5,7 @@ Supports MCP tools, file uploads (images and documents), extended thinking,
5
5
  and automatic citation extraction.
6
6
  """
7
7
 
8
+ import asyncio
8
9
  import base64
9
10
  import json
10
11
  import logging
@@ -69,6 +70,7 @@ class ClaudeResponsesProcessor(BaseClaudeProcessor):
69
70
  mcp_servers: list[MCPServer] | None = None,
70
71
  payload: dict[str, Any] | None = None,
71
72
  user_id: int | None = None,
73
+ cancellation_token: asyncio.Event | None = None,
72
74
  ) -> AsyncGenerator[Chunk, None]:
73
75
  """Process messages using Claude Messages API with streaming."""
74
76
  if not self.client:
@@ -103,6 +105,9 @@ class ClaudeResponsesProcessor(BaseClaudeProcessor):
103
105
  # Process streaming events
104
106
  async with stream as response:
105
107
  async for event in response:
108
+ if cancellation_token and cancellation_token.is_set():
109
+ logger.info("Processing cancelled by user")
110
+ break
106
111
  chunk = self._handle_event(event)
107
112
  if chunk:
108
113
  yield chunk
@@ -2,6 +2,7 @@
2
2
  Gemini responses processor for generating AI responses using Google's GenAI API.
3
3
  """
4
4
 
5
+ import asyncio
5
6
  import copy
6
7
  import json
7
8
  import logging
@@ -72,6 +73,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
72
73
  mcp_servers: list[MCPServer] | None = None,
73
74
  payload: dict[str, Any] | None = None,
74
75
  user_id: int | None = None,
76
+ cancellation_token: asyncio.Event | None = None,
75
77
  ) -> AsyncGenerator[Chunk, None]:
76
78
  """Process messages using Google GenAI API with native MCP support."""
77
79
  if not self.client:
@@ -120,7 +122,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
120
122
  try:
121
123
  # Generate content with MCP tools
122
124
  async for chunk in self._stream_with_mcp(
123
- model.model, contents, config, mcp_sessions
125
+ model.model, contents, config, mcp_sessions, cancellation_token
124
126
  ):
125
127
  yield chunk
126
128
 
@@ -193,11 +195,14 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
193
195
  contents: list[types.Content],
194
196
  config: types.GenerateContentConfig,
195
197
  mcp_sessions: list[Any],
198
+ cancellation_token: asyncio.Event | None = None,
196
199
  ) -> AsyncGenerator[Chunk, None]:
197
200
  """Stream responses with MCP tool support."""
198
201
  if not mcp_sessions:
199
202
  # No MCP sessions, direct streaming
200
- async for chunk in self._stream_generation(model_name, contents, config):
203
+ async for chunk in self._stream_generation(
204
+ model_name, contents, config, cancellation_token
205
+ ):
201
206
  yield chunk
202
207
  return
203
208
 
@@ -225,7 +230,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
225
230
 
226
231
  # Stream with automatic function calling loop
227
232
  async for chunk in self._stream_with_tool_loop(
228
- model_name, contents, config, tool_contexts
233
+ model_name, contents, config, tool_contexts, cancellation_token
229
234
  ):
230
235
  yield chunk
231
236
 
@@ -235,12 +240,17 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
235
240
  contents: list[types.Content],
236
241
  config: types.GenerateContentConfig,
237
242
  tool_contexts: list[MCPToolContext],
243
+ cancellation_token: asyncio.Event | None = None,
238
244
  ) -> AsyncGenerator[Chunk, None]:
239
245
  """Stream generation with tool call handling loop."""
240
246
  max_tool_rounds = 10
241
247
  current_contents = list(contents)
242
248
 
243
249
  for _round_num in range(max_tool_rounds):
250
+ if cancellation_token and cancellation_token.is_set():
251
+ logger.info("Processing cancelled by user")
252
+ break
253
+
244
254
  response = await self.client.aio.models.generate_content(
245
255
  model=model_name, contents=current_contents, config=config
246
256
  )
@@ -560,6 +570,7 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
560
570
  model_name: str,
561
571
  contents: list[types.Content],
562
572
  config: types.GenerateContentConfig,
573
+ cancellation_token: asyncio.Event | None = None,
563
574
  ) -> AsyncGenerator[Chunk, None]:
564
575
  """Stream generation from Gemini model."""
565
576
  # generate_content_stream returns an awaitable that yields an async generator
@@ -567,6 +578,9 @@ class GeminiResponsesProcessor(BaseGeminiProcessor):
567
578
  model=model_name, contents=contents, config=config
568
579
  )
569
580
  async for chunk in stream:
581
+ if cancellation_token and cancellation_token.is_set():
582
+ logger.info("Processing cancelled by user")
583
+ break
570
584
  processed = self._handle_chunk(chunk)
571
585
  if processed:
572
586
  yield processed
@@ -6,6 +6,7 @@ import asyncio
6
6
  import logging
7
7
  import random
8
8
  from collections.abc import AsyncGenerator
9
+ from typing import Any
9
10
 
10
11
  from appkit_assistant.backend.models import (
11
12
  AIModel,
@@ -58,6 +59,8 @@ class LoremIpsumProcessor(Processor):
58
59
  model_id: str,
59
60
  files: list[str] | None = None, # noqa: ARG002
60
61
  mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
62
+ cancellation_token: asyncio.Event | None = None,
63
+ **kwargs: Any, # noqa: ARG002
61
64
  ) -> AsyncGenerator[Chunk, None]:
62
65
  """
63
66
  Generate a Lorem Ipsum response of varying lengths based on the model_id.
@@ -67,6 +70,8 @@ class LoremIpsumProcessor(Processor):
67
70
  model_id: The model ID (determines response length).
68
71
  files: Optional list of files (ignored for this processor).
69
72
  mcp_servers: Optional list of MCP servers (ignored for this processor).
73
+ cancellation_token: Optional event to signal cancellation.
74
+ **kwargs: Additional arguments.
70
75
 
71
76
  Returns:
72
77
  An async generator that yields Chunk objects with text content.
@@ -84,9 +89,13 @@ class LoremIpsumProcessor(Processor):
84
89
 
85
90
  num_paragraphs = random.randint(4, 8) # noqa: S311
86
91
  for i in range(num_paragraphs):
92
+ if cancellation_token and cancellation_token.is_set():
93
+ break
87
94
  paragraph = random.choice(LOREM_PARAGRAPHS) # noqa: S311
88
95
  words = paragraph.split()
89
96
  for word in words:
97
+ if cancellation_token and cancellation_token.is_set():
98
+ break
90
99
  content = word + " "
91
100
  await asyncio.sleep(0.01)
92
101
  yield Chunk(
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import logging
2
3
  from collections.abc import AsyncGenerator
3
4
  from typing import Any
@@ -27,6 +28,8 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
27
28
  files: list[str] | None = None, # noqa: ARG002
28
29
  mcp_servers: list[MCPServer] | None = None,
29
30
  payload: dict[str, Any] | None = None,
31
+ cancellation_token: asyncio.Event | None = None,
32
+ **kwargs: Any, # noqa: ARG002
30
33
  ) -> AsyncGenerator[Chunk, None]:
31
34
  """Process messages using the Chat Completions API.
32
35
 
@@ -36,6 +39,8 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
36
39
  files: File attachments (not used in chat completions)
37
40
  mcp_servers: MCP servers (will log warning if provided)
38
41
  payload: Additional payload parameters
42
+ cancellation_token: Optional event to signal cancellation
43
+ **kwargs: Additional arguments
39
44
  """
40
45
  if not self.client:
41
46
  raise ValueError("OpenAI Client not initialized.")
@@ -63,6 +68,9 @@ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
63
68
 
64
69
  if isinstance(session, AsyncStream):
65
70
  async for event in session:
71
+ if cancellation_token and cancellation_token.is_set():
72
+ logger.info("Processing cancelled by user")
73
+ break
66
74
  if event.choices and event.choices[0].delta:
67
75
  content = event.choices[0].delta.content
68
76
  if content:
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import json
2
3
  import logging
3
4
  from collections.abc import AsyncGenerator
@@ -52,6 +53,7 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
52
53
  mcp_servers: list[MCPServer] | None = None,
53
54
  payload: dict[str, Any] | None = None,
54
55
  user_id: int | None = None,
56
+ cancellation_token: asyncio.Event | None = None,
55
57
  ) -> AsyncGenerator[Chunk, None]:
56
58
  """Process messages using simplified content accumulator pattern."""
57
59
  if not self.client:
@@ -73,6 +75,9 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
73
75
  try:
74
76
  if hasattr(session, "__aiter__"): # Streaming
75
77
  async for event in session:
78
+ if cancellation_token and cancellation_token.is_set():
79
+ logger.info("Processing cancelled by user")
80
+ break
76
81
  chunk = self._handle_event(event)
77
82
  if chunk:
78
83
  yield chunk
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import enum
2
3
  import logging
3
4
  import os
@@ -90,6 +91,8 @@ class PerplexityProcessor(OpenAIChatCompletionsProcessor):
90
91
  files: list[str] | None = None,
91
92
  mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
92
93
  payload: dict[str, Any] | None = None,
94
+ cancellation_token: asyncio.Event | None = None,
95
+ **kwargs: Any,
93
96
  ) -> AsyncGenerator[Chunk, None]:
94
97
  if model_id not in self.models:
95
98
  logger.error("Model %s not supported by Perplexity processor", model_id)
@@ -117,5 +120,7 @@ class PerplexityProcessor(OpenAIChatCompletionsProcessor):
117
120
  files=files,
118
121
  mcp_servers=None,
119
122
  payload=perplexity_payload,
123
+ cancellation_token=cancellation_token,
124
+ **kwargs,
120
125
  ):
121
126
  yield response
@@ -53,13 +53,25 @@ def composer_input(placeholder: str = "Frage etwas...") -> rx.Component:
53
53
 
54
54
 
55
55
  def submit() -> rx.Component:
56
- return rx.fragment(
56
+ return rx.cond(
57
+ ThreadState.processing,
58
+ rx.tooltip(
59
+ rx.button(
60
+ rx.icon("square", size=14, fill="currentColor"),
61
+ on_click=ThreadState.request_cancellation,
62
+ color_scheme="red",
63
+ variant="solid",
64
+ type="button",
65
+ cursor="pointer",
66
+ loading=ThreadState.cancellation_requested,
67
+ ),
68
+ content="Stoppen",
69
+ ),
57
70
  rx.button(
58
71
  rx.icon("arrow-right", size=18),
59
72
  id="composer-submit",
60
73
  name="composer_submit",
61
74
  type="submit",
62
- loading=ThreadState.processing,
63
75
  ),
64
76
  )
65
77
 
@@ -10,6 +10,7 @@ This module contains ThreadState which manages the current active thread:
10
10
  See thread_list_state.py for ThreadListState which manages the thread list sidebar.
11
11
  """
12
12
 
13
+ import asyncio
13
14
  import json
14
15
  import logging
15
16
  import uuid
@@ -61,6 +62,7 @@ class ThreadState(rx.State):
61
62
  ai_models: list[AIModel] = []
62
63
  selected_model: str = ""
63
64
  processing: bool = False
65
+ cancellation_requested: bool = False
64
66
  messages: list[Message] = []
65
67
  prompt: str = ""
66
68
  suggestions: list[Suggestion] = []
@@ -108,6 +110,8 @@ class ThreadState(rx.State):
108
110
  _current_user_id: str = ""
109
111
  _skip_user_message: bool = False # Skip adding user message (for OAuth resend)
110
112
  _pending_file_cleanup: list[str] = [] # Files to delete after processing
113
+ # Internal cancellation event
114
+ _cancel_event: asyncio.Event | None = None
111
115
 
112
116
  # -------------------------------------------------------------------------
113
117
  # Computed properties
@@ -660,6 +664,14 @@ class ThreadState(rx.State):
660
664
  # Trigger processing directly
661
665
  await self._process_message()
662
666
 
667
+ @rx.event
668
+ def request_cancellation(self) -> None:
669
+ """Signal that the current processing should be cancelled."""
670
+ self.cancellation_requested = True
671
+ if self._cancel_event:
672
+ self._cancel_event.set()
673
+ logger.info("Cancellation requested by user")
674
+
663
675
  async def _process_message(self) -> None:
664
676
  """Process the current message and stream the response."""
665
677
  logger.debug("Processing message: %s", self.prompt)
@@ -688,6 +700,9 @@ class ThreadState(rx.State):
688
700
  self.uploaded_files = []
689
701
  self._pending_file_cleanup = file_paths
690
702
 
703
+ # Initialize cancellation event
704
+ self._cancel_event = asyncio.Event()
705
+
691
706
  first_response_received = False
692
707
  try:
693
708
  async for chunk in processor.process(
@@ -696,6 +711,7 @@ class ThreadState(rx.State):
696
711
  files=file_paths or None,
697
712
  mcp_servers=mcp_servers,
698
713
  user_id=user_id,
714
+ cancellation_token=self._cancel_event,
699
715
  ):
700
716
  first_response_received = await self._handle_stream_chunk(
701
717
  chunk=chunk,
@@ -877,7 +893,9 @@ class ThreadState(rx.State):
877
893
  if self.messages:
878
894
  self.messages[-1].done = True
879
895
  self.processing = False
896
+ self.cancellation_requested = False
880
897
  self.current_activity = ""
898
+ self._cancel_event = None
881
899
 
882
900
  # Clean up uploaded files from disk
883
901
  if self._pending_file_cleanup: