appkit-assistant 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,874 @@
1
+ import base64
2
+ import json
3
+ import logging
4
+ import re
5
+ import uuid
6
+ from collections.abc import AsyncGenerator
7
+ from enum import StrEnum
8
+ from re import Match
9
+ from typing import Any
10
+
11
+ import reflex as rx
12
+ from pydantic import BaseModel
13
+
14
+ from appkit_assistant.backend.model_manager import ModelManager
15
+ from appkit_assistant.backend.models import (
16
+ AIModel,
17
+ Chunk,
18
+ ChunkType,
19
+ MCPServer,
20
+ Message,
21
+ MessageType,
22
+ Suggestion,
23
+ ThreadModel,
24
+ ThreadStatus,
25
+ )
26
+ from appkit_assistant.backend.repositories import MCPServerRepository
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ MERMAID_BLOCK_PATTERN = re.compile(
31
+ r"```mermaid\s*\r?\n(.*?)```", re.IGNORECASE | re.DOTALL
32
+ )
33
+ BRACKET_PAIRS: dict[str, str] = {
34
+ "[": "]",
35
+ "(": ")",
36
+ "{": "}",
37
+ "<": ">",
38
+ }
39
+
40
+
41
+ def _escape_mermaid_label_newlines(block: str) -> str:
42
+ """Convert literal newlines inside node labels to escaped sequences.
43
+
44
+ Ensures Mermaid labels that previously used ``\n`` survive JSON roundtrips
45
+ where sequences were converted into raw newlines.
46
+ """
47
+
48
+ if "\n" not in block:
49
+ return block
50
+
51
+ result: list[str] = []
52
+ stack: list[str] = []
53
+ for char in block:
54
+ if stack:
55
+ if char == "\r":
56
+ continue
57
+ if char == "\n":
58
+ result.append("\\n")
59
+ continue
60
+ if char == stack[-1]:
61
+ stack.pop()
62
+ result.append(char)
63
+ continue
64
+ if char in BRACKET_PAIRS:
65
+ stack.append(BRACKET_PAIRS[char])
66
+ result.append(char)
67
+ continue
68
+ result.append(char)
69
+ continue
70
+
71
+ if char in BRACKET_PAIRS:
72
+ stack.append(BRACKET_PAIRS[char])
73
+ result.append(char)
74
+
75
+ return "".join(result)
76
+
77
+
78
+ def _rehydrate_mermaid_text(text: str) -> str:
79
+ """Restore Mermaid code blocks by escaping label newlines when needed."""
80
+
81
+ if "```mermaid" not in text.lower():
82
+ return text
83
+
84
+ def _replace(match: Match[str]) -> str:
85
+ code_block = match.group(1)
86
+ repaired = _escape_mermaid_label_newlines(code_block)
87
+ return f"```mermaid\n{repaired}```"
88
+
89
+ try:
90
+ return MERMAID_BLOCK_PATTERN.sub(_replace, text)
91
+ except Exception as exc: # pragma: no cover - defensive
92
+ logger.debug("Failed to rehydrate mermaid text: %s", exc)
93
+ return text
94
+
95
+
96
+ class ThinkingType(StrEnum):
97
+ REASONING = "reasoning"
98
+ TOOL_CALL = "tool_call"
99
+
100
+
101
+ class ThinkingStatus(StrEnum):
102
+ IN_PROGRESS = "in_progress"
103
+ COMPLETED = "completed"
104
+ ERROR = "error"
105
+
106
+
107
+ class Thinking(BaseModel):
108
+ type: ThinkingType
109
+ id: str # reasoning_session_id or tool_id
110
+ text: str
111
+ status: ThinkingStatus = ThinkingStatus.IN_PROGRESS
112
+ tool_name: str | None = None
113
+ parameters: str | None = None
114
+ result: str | None = None
115
+ error: str | None = None
116
+
117
+
118
+ class ThreadState(rx.State):
119
+ _thread: ThreadModel = ThreadModel(thread_id=str(uuid.uuid4()), prompt="")
120
+ ai_models: list[AIModel] = []
121
+ selected_model: str = ""
122
+ processing: bool = False
123
+ messages: list[Message] = []
124
+ prompt: str = ""
125
+ suggestions: list[Suggestion] = [Suggestion(prompt="Wie kann ich dir helfen?")]
126
+
127
+ # Chunk processing state
128
+ current_chunks: list[Chunk] = []
129
+ thinking_items: list[Thinking] = [] # Consolidated reasoning and tool calls
130
+ image_chunks: list[Chunk] = []
131
+ show_thinking: bool = False
132
+ thinking_expanded: bool = False
133
+ current_activity: str = ""
134
+ current_reasoning_session: str = "" # Track current reasoning session
135
+
136
+ # MCP Server tool support state
137
+ selected_mcp_servers: list[MCPServer] = []
138
+ show_tools_modal: bool = False
139
+ available_mcp_servers: list[MCPServer] = []
140
+ temp_selected_mcp_servers: list[int] = []
141
+ server_selection_state: dict[int, bool] = {}
142
+
143
+ # Thread list integration
144
+ with_thread_list: bool = False
145
+
146
+ def initialize(self) -> None:
147
+ """Initialize the state."""
148
+ model_manager = ModelManager()
149
+ self.ai_models = model_manager.get_all_models()
150
+ self.selected_model = model_manager.get_default_model()
151
+
152
+ self._thread = ThreadModel(
153
+ thread_id=str(uuid.uuid4()),
154
+ title="Neuer Chat",
155
+ prompt="",
156
+ messages=[],
157
+ state=ThreadStatus.NEW,
158
+ ai_model=self.selected_model,
159
+ active=True,
160
+ )
161
+ self.messages = []
162
+ logger.debug("Initialized thread state: %s", self._thread)
163
+
164
+ def set_thread(self, thread: ThreadModel) -> None:
165
+ """Set the current thread model."""
166
+ self._thread = thread
167
+ self.messages = thread.messages
168
+ self.selected_model = thread.ai_model
169
+ logger.debug("Set current thread: %s", thread.thread_id)
170
+
171
+ def set_prompt(self, prompt: str) -> None:
172
+ """Set the current prompt."""
173
+ self.prompt = prompt
174
+
175
+ @rx.var
176
+ def has_ai_models(self) -> bool:
177
+ """Check if there are any chat models."""
178
+ return len(self.ai_models) > 0
179
+
180
+ @rx.var
181
+ def has_suggestions(self) -> bool:
182
+ """Check if there are any suggestions."""
183
+ return self.suggestions is not None and len(self.suggestions) > 0
184
+
185
+ @rx.var
186
+ def get_ai_model(self) -> str | None:
187
+ """Get the selected chat model."""
188
+ return self.selected_model
189
+
190
+ @rx.var
191
+ def current_model_supports_tools(self) -> bool:
192
+ """Check if the currently selected model supports tools."""
193
+ if not self.selected_model:
194
+ return False
195
+ model = ModelManager().get_model(self.selected_model)
196
+ return model.supports_tools if model else False
197
+
198
+ @rx.var
199
+ def unique_reasoning_sessions(self) -> list[str]:
200
+ """Get unique reasoning session IDs."""
201
+ return [
202
+ item.id
203
+ for item in self.thinking_items
204
+ if item.type == ThinkingType.REASONING
205
+ ]
206
+
207
+ @rx.var
208
+ def unique_tool_calls(self) -> list[str]:
209
+ """Get unique tool call IDs."""
210
+ return [
211
+ item.id
212
+ for item in self.thinking_items
213
+ if item.type == ThinkingType.TOOL_CALL
214
+ ]
215
+
216
+ @rx.var
217
+ def last_assistant_message_text(self) -> str:
218
+ """Get the text of the last assistant message in the conversation."""
219
+ for i in range(len(self.messages) - 1, -1, -1):
220
+ if self.messages[i].type == MessageType.ASSISTANT:
221
+ return self.messages[i].text
222
+ return ""
223
+
224
+ @rx.var
225
+ def has_thinking_content(self) -> bool:
226
+ """Check if there are any thinking items to display."""
227
+ return len(self.thinking_items) > 0
228
+
229
+ @rx.event
230
+ def update_prompt(self, value: str) -> None:
231
+ self.prompt = value
232
+
233
+ @rx.event
234
+ def set_suggestions(self, suggestions: list[Suggestion]) -> None:
235
+ """Set custom suggestions for the thread."""
236
+ self.suggestions = suggestions
237
+
238
+ @rx.event
239
+ def set_initial_suggestions(self, suggestions: list[dict | Suggestion]) -> None:
240
+ """Set initial suggestions during page load.
241
+
242
+ Can be called via on_load callback to initialize suggestions
243
+ from the assistant page or other sources.
244
+
245
+ Args:
246
+ suggestions: List of suggestions (dict or Suggestion objects) to display.
247
+ """
248
+ # Convert dicts to Suggestion objects
249
+ # (Reflex serializes Pydantic models to dicts during event invocation)
250
+ converted = []
251
+ for item in suggestions:
252
+ if isinstance(item, dict):
253
+ converted.append(Suggestion(**item))
254
+ elif isinstance(item, Suggestion):
255
+ converted.append(item)
256
+ else:
257
+ log = logging.getLogger(__name__)
258
+ log.warning("Unknown suggestion type: %s", type(item))
259
+ self.suggestions = converted
260
+
261
+ @rx.event
262
+ def clear(self) -> None:
263
+ self._thread.messages = []
264
+ self._thread.state = ThreadStatus.NEW
265
+ self._thread.ai_model = ModelManager().get_default_model()
266
+ self._thread.active = True
267
+ self._thread.prompt = ""
268
+ self.prompt = ""
269
+ self.messages = []
270
+ self.selected_mcp_servers = []
271
+ self.current_chunks = []
272
+ self.thinking_items = [] # Clear thinking items only on explicit clear
273
+ self.image_chunks = []
274
+ self.show_thinking = False
275
+
276
+ @rx.event(background=True)
277
+ async def process_message(self) -> None:
278
+ logger.debug("Sending message: %s", self.prompt)
279
+
280
+ async with self:
281
+ # Check if already processing
282
+ if self.processing:
283
+ return
284
+
285
+ self.processing = True
286
+ self._clear_chunks()
287
+ # Clear thinking items for new user question
288
+ self.thinking_items = []
289
+
290
+ current_prompt = self.prompt.strip()
291
+ if not current_prompt:
292
+ self.processing = False
293
+ return
294
+
295
+ self.prompt = ""
296
+
297
+ # Add user message and empty assistant message
298
+ self.messages.extend(
299
+ [
300
+ Message(text=current_prompt, type=MessageType.HUMAN),
301
+ Message(text="", type=MessageType.ASSISTANT),
302
+ ]
303
+ )
304
+
305
+ # Validate model and get processor
306
+ if not self.get_ai_model:
307
+ self._add_error_message("Kein Chat-Modell ausgewählt")
308
+ self.processing = False
309
+ return
310
+
311
+ # Get processor outside context to avoid blocking
312
+ processor = ModelManager().get_processor_for_model(self.get_ai_model)
313
+ if not processor:
314
+ async with self:
315
+ self._add_error_message(
316
+ f"Keinen Adapter für das Modell gefunden: {self.get_ai_model}"
317
+ )
318
+ self.processing = False
319
+ return
320
+
321
+ try:
322
+ # Process chunks
323
+ async for chunk in processor.process(
324
+ self.messages,
325
+ self.get_ai_model,
326
+ mcp_servers=self.selected_mcp_servers,
327
+ ):
328
+ async with self:
329
+ self._handle_chunk(chunk)
330
+
331
+ async with self:
332
+ self.show_thinking = False
333
+
334
+ # Update thread if using thread list
335
+ if self.with_thread_list:
336
+ await self._update_thread_list()
337
+
338
+ except Exception as ex:
339
+ async with self:
340
+ self.messages.pop() # Remove empty assistant message
341
+ self.messages.append(Message(text=str(ex), type=MessageType.ERROR))
342
+ finally:
343
+ async with self:
344
+ self.messages[-1].done = True
345
+ self.processing = False
346
+
347
+ @rx.event
348
+ async def persist_current_thread(self, prompt: str = "") -> None:
349
+ """Persist the current temporary thread to the thread list.
350
+
351
+ Converts the temporary ThreadState._thread to a persistent entry in
352
+ ThreadListState so it appears in the thread list. This is called
353
+ when the user first submits a message.
354
+
355
+ Args:
356
+ prompt: The user's message prompt (used for thread title).
357
+
358
+ Idempotent: calling multiple times won't create duplicates if the
359
+ thread is already in the list.
360
+ """
361
+ # Get ThreadListState to add the thread
362
+ threadlist_state: ThreadListState = await self.get_state(ThreadListState)
363
+
364
+ # Check if thread already exists in list (idempotency check)
365
+ existing_thread = await threadlist_state.get_thread(self._thread.thread_id)
366
+ if existing_thread:
367
+ logger.debug("Thread already persisted: %s", self._thread.thread_id)
368
+ return
369
+
370
+ # Update thread title based on first message if title is still default
371
+ if self._thread.title in {"", "Neuer Chat"}:
372
+ self._thread.title = prompt.strip() if prompt.strip() else "Neuer Chat"
373
+
374
+ # Add current thread to thread list
375
+ self._thread.active = True
376
+ threadlist_state.threads.insert(0, self._thread)
377
+
378
+ # Set as active thread in list
379
+ threadlist_state.active_thread_id = self._thread.thread_id
380
+
381
+ # Save to local storage if autosave is enabled
382
+ if threadlist_state.autosave:
383
+ await threadlist_state.save_threads()
384
+
385
+ logger.debug("Persisted thread: %s", self._thread.thread_id)
386
+
387
+ @rx.event
388
+ async def submit_message(self) -> AsyncGenerator[Any, Any]:
389
+ """Submit a message and reset the textarea."""
390
+ # Persist the current thread before processing the message
391
+ # Pass the prompt so we can use it as the thread title
392
+ await self.persist_current_thread(prompt=self.prompt)
393
+ yield ThreadState.process_message
394
+
395
+ yield rx.call_script("""
396
+ const textarea = document.getElementById('composer-area');
397
+ if (textarea) {
398
+ textarea.value = '';
399
+ textarea.style.height = 'auto';
400
+ textarea.style.height = textarea.scrollHeight + 'px';
401
+ }
402
+ """)
403
+
404
+ def _clear_chunks(self) -> None:
405
+ """Clear all chunk categorization lists except thinking_items for display."""
406
+ self.current_chunks = []
407
+ # Don't clear thinking_items to preserve thinking display for previous messages
408
+ # self.thinking_items = []
409
+ self.image_chunks = []
410
+ self.current_reasoning_session = "" # Reset reasoning session for new message
411
+
412
+ def _handle_chunk(self, chunk: Chunk) -> None:
413
+ """Handle incoming chunk based on its type."""
414
+ self.current_chunks.append(chunk)
415
+
416
+ if chunk.type == ChunkType.TEXT:
417
+ self.messages[-1].text += chunk.text
418
+ elif chunk.type in (ChunkType.THINKING, ChunkType.THINKING_RESULT):
419
+ self._handle_reasoning_chunk(chunk)
420
+ elif chunk.type in (
421
+ ChunkType.TOOL_CALL,
422
+ ChunkType.TOOL_RESULT,
423
+ ChunkType.ACTION,
424
+ ):
425
+ self._handle_tool_chunk(chunk)
426
+ elif chunk.type in (ChunkType.IMAGE, ChunkType.IMAGE_PARTIAL):
427
+ self.image_chunks.append(chunk)
428
+ elif chunk.type == ChunkType.COMPLETION:
429
+ self.show_thinking = False
430
+ logger.debug("Response generation completed")
431
+ elif chunk.type == ChunkType.ERROR:
432
+ self.messages.append(Message(text=chunk.text, type=MessageType.ERROR))
433
+ logger.error("Chunk error: %s", chunk.text)
434
+ else:
435
+ logger.warning("Unhandled chunk type: %s - %s", chunk.type, chunk.text)
436
+
437
+ def _handle_reasoning_chunk(self, chunk: Chunk) -> None:
438
+ """Handle reasoning chunks by consolidating them into thinking items."""
439
+ if chunk.type == ChunkType.THINKING:
440
+ self.show_thinking = True
441
+ logger.debug("Thinking: %s", chunk.text)
442
+
443
+ reasoning_session = self._get_or_create_reasoning_session(chunk)
444
+ existing_item = self._find_existing_reasoning_item(reasoning_session)
445
+
446
+ if existing_item:
447
+ self._update_existing_reasoning_item(existing_item, chunk)
448
+ else:
449
+ self._create_new_reasoning_item(reasoning_session, chunk)
450
+
451
+ def _get_or_create_reasoning_session(self, chunk: Chunk) -> str:
452
+ """Get reasoning session ID from metadata or create new one."""
453
+ reasoning_session = chunk.chunk_metadata.get("reasoning_session")
454
+ if reasoning_session:
455
+ return reasoning_session
456
+
457
+ # If no session ID in metadata, create separate sessions based on context
458
+ last_item = self.thinking_items[-1] if self.thinking_items else None
459
+
460
+ # Create new session if needed
461
+ should_create_new_session = (
462
+ not self.current_reasoning_session
463
+ or (last_item and last_item.type == ThinkingType.TOOL_CALL)
464
+ or (
465
+ last_item
466
+ and last_item.type == ThinkingType.REASONING
467
+ and last_item.status == ThinkingStatus.COMPLETED
468
+ )
469
+ )
470
+
471
+ if should_create_new_session:
472
+ self.current_reasoning_session = f"reasoning_{uuid.uuid4().hex[:8]}"
473
+
474
+ return self.current_reasoning_session
475
+
476
+ def _find_existing_reasoning_item(self, reasoning_session: str) -> Thinking | None:
477
+ """Find existing reasoning item by session ID."""
478
+ for item in self.thinking_items:
479
+ if item.type == ThinkingType.REASONING and item.id == reasoning_session:
480
+ return item
481
+ return None
482
+
483
+ def _update_existing_reasoning_item(
484
+ self, existing_item: Thinking, chunk: Chunk
485
+ ) -> None:
486
+ """Update existing reasoning item with new chunk data."""
487
+ if chunk.type == ChunkType.THINKING:
488
+ if existing_item.text:
489
+ existing_item.text += f"\n{chunk.text}"
490
+ else:
491
+ existing_item.text = chunk.text
492
+ elif chunk.type == ChunkType.THINKING_RESULT:
493
+ existing_item.status = ThinkingStatus.COMPLETED
494
+ if chunk.text:
495
+ existing_item.text += f" {chunk.text}"
496
+ # Trigger Reflex reactivity by reassigning the list
497
+ self.thinking_items = self.thinking_items.copy()
498
+
499
+ def _create_new_reasoning_item(self, reasoning_session: str, chunk: Chunk) -> None:
500
+ """Create new reasoning item."""
501
+ status = (
502
+ ThinkingStatus.COMPLETED
503
+ if chunk.type == ChunkType.THINKING_RESULT
504
+ else ThinkingStatus.IN_PROGRESS
505
+ )
506
+ new_item = Thinking(
507
+ type=ThinkingType.REASONING,
508
+ id=reasoning_session,
509
+ text=chunk.text,
510
+ status=status,
511
+ )
512
+ self.thinking_items = [*self.thinking_items, new_item]
513
+
514
+ def _handle_tool_chunk(self, chunk: Chunk) -> None:
515
+ """Handle tool chunks by consolidating them into thinking items."""
516
+ tool_id = chunk.chunk_metadata.get("tool_id")
517
+ if not tool_id:
518
+ # Generate a tool ID if not provided
519
+ tool_count = len(
520
+ [i for i in self.thinking_items if i.type == ThinkingType.TOOL_CALL]
521
+ )
522
+ tool_id = f"tool_{tool_count}"
523
+
524
+ # Find existing tool item or create new one
525
+ existing_item = self._find_existing_tool_item(tool_id)
526
+
527
+ if existing_item:
528
+ self._update_existing_tool_item(existing_item, chunk)
529
+ else:
530
+ self._create_new_tool_item(tool_id, chunk)
531
+
532
+ logger.debug("Tool event: %s - %s", chunk.type, chunk.text)
533
+
534
+ def _find_existing_tool_item(self, tool_id: str) -> Thinking | None:
535
+ """Find existing tool item by ID."""
536
+ for item in self.thinking_items:
537
+ if item.type == ThinkingType.TOOL_CALL and item.id == tool_id:
538
+ return item
539
+ return None
540
+
541
+ def _update_existing_tool_item(self, existing_item: Thinking, chunk: Chunk) -> None:
542
+ """Update existing tool item with new chunk data."""
543
+ if chunk.type == ChunkType.TOOL_CALL:
544
+ # Store parameters separately from text
545
+ existing_item.parameters = chunk.chunk_metadata.get(
546
+ "parameters", chunk.text
547
+ )
548
+ existing_item.text = chunk.chunk_metadata.get("description", "")
549
+ # Only set tool_name if it's not already present
550
+ if not existing_item.tool_name:
551
+ existing_item.tool_name = chunk.chunk_metadata.get(
552
+ "tool_name", "Unknown"
553
+ )
554
+ existing_item.status = ThinkingStatus.IN_PROGRESS
555
+ elif chunk.type == ChunkType.TOOL_RESULT:
556
+ self._handle_tool_result(existing_item, chunk)
557
+ elif chunk.type == ChunkType.ACTION:
558
+ existing_item.text += f"\n---\nAktion: {chunk.text}"
559
+ # Trigger Reflex reactivity by reassigning the list
560
+ self.thinking_items = self.thinking_items.copy()
561
+
562
+ def _handle_tool_result(self, existing_item: Thinking, chunk: Chunk) -> None:
563
+ """Handle tool result chunk."""
564
+ # Check if this is an error result
565
+ is_error = (
566
+ "error" in chunk.text.lower()
567
+ or "failed" in chunk.text.lower()
568
+ or chunk.chunk_metadata.get("error")
569
+ )
570
+ existing_item.status = (
571
+ ThinkingStatus.ERROR if is_error else ThinkingStatus.COMPLETED
572
+ )
573
+ # Store result separately from text
574
+ existing_item.result = chunk.text
575
+ if is_error:
576
+ existing_item.error = chunk.text
577
+
578
+ def _create_new_tool_item(self, tool_id: str, chunk: Chunk) -> None:
579
+ """Create new tool item."""
580
+ tool_name = chunk.chunk_metadata.get("tool_name", "Unknown")
581
+ status = ThinkingStatus.IN_PROGRESS
582
+ text = ""
583
+ parameters = None
584
+ result = None
585
+
586
+ if chunk.type == ChunkType.TOOL_CALL:
587
+ # Store parameters separately from text
588
+ parameters = chunk.chunk_metadata.get("parameters", chunk.text)
589
+ text = chunk.chunk_metadata.get("description", "")
590
+ elif chunk.type == ChunkType.TOOL_RESULT:
591
+ is_error = "error" in chunk.text.lower() or "failed" in chunk.text.lower()
592
+ status = ThinkingStatus.ERROR if is_error else ThinkingStatus.COMPLETED
593
+ result = chunk.text
594
+ else:
595
+ text = chunk.text
596
+
597
+ new_item = Thinking(
598
+ type=ThinkingType.TOOL_CALL,
599
+ id=tool_id,
600
+ text=text,
601
+ status=status,
602
+ tool_name=tool_name,
603
+ parameters=parameters,
604
+ result=result,
605
+ error=chunk.text if status == ThinkingStatus.ERROR else None,
606
+ )
607
+ self.thinking_items = [*self.thinking_items, new_item]
608
+
609
+ def _add_error_message(self, error_msg: str) -> None:
610
+ """Add an error message to the conversation."""
611
+ logger.error(error_msg)
612
+ self.messages.append(Message(text=error_msg, type=MessageType.ERROR))
613
+
614
+ async def _update_thread_list(self) -> None:
615
+ """Update the thread list with current messages."""
616
+ threadlist_state: ThreadListState = await self.get_state(ThreadListState)
617
+ if self._thread.title in {"", "Neuer Chat"}:
618
+ self._thread.title = (
619
+ self.messages[0].text if self.messages else "Neuer Chat"
620
+ )
621
+
622
+ self._thread.messages = self.messages
623
+ self._thread.ai_model = self.selected_model
624
+ await threadlist_state.update_thread(self._thread)
625
+
626
+ def toggle_thinking_expanded(self) -> None:
627
+ """Toggle the expanded state of the thinking section."""
628
+ self.thinking_expanded = not self.thinking_expanded
629
+
630
+ # MCP Server tool support event handlers
631
+ @rx.event
632
+ async def load_available_mcp_servers(self) -> None:
633
+ """Load available MCP servers from the database."""
634
+ self.available_mcp_servers = await MCPServerRepository.get_all()
635
+
636
+ @rx.event
637
+ def open_tools_modal(self) -> None:
638
+ """Open the tools modal."""
639
+ self.temp_selected_mcp_servers = [
640
+ server.id for server in self.selected_mcp_servers if server.id
641
+ ]
642
+ self.server_selection_state = {
643
+ server.id: server.id in self.temp_selected_mcp_servers
644
+ for server in self.available_mcp_servers
645
+ if server.id
646
+ }
647
+ self.show_tools_modal = True
648
+
649
+ @rx.event
650
+ def set_show_tools_modal(self, show: bool) -> None:
651
+ """Set the visibility of the tools modal."""
652
+ self.show_tools_modal = show
653
+
654
+ @rx.event
655
+ def toggle_mcp_server_selection(self, server_id: int, selected: bool) -> None:
656
+ """Toggle MCP server selection in the modal."""
657
+ self.server_selection_state[server_id] = selected
658
+ if selected and server_id not in self.temp_selected_mcp_servers:
659
+ self.temp_selected_mcp_servers.append(server_id)
660
+ elif not selected and server_id in self.temp_selected_mcp_servers:
661
+ self.temp_selected_mcp_servers.remove(server_id)
662
+
663
+ @rx.event
664
+ def apply_mcp_server_selection(self) -> None:
665
+ """Apply the temporary MCP server selection."""
666
+ self.selected_mcp_servers = [
667
+ server
668
+ for server in self.available_mcp_servers
669
+ if server.id in self.temp_selected_mcp_servers
670
+ ]
671
+ self.show_tools_modal = False
672
+
673
+ def is_mcp_server_selected(self, server_id: int) -> bool:
674
+ """Check if an MCP server is selected."""
675
+ return server_id in self.temp_selected_mcp_servers
676
+
677
+ def set_selected_model(self, model_id: str) -> None:
678
+ """Set the selected model."""
679
+ self.selected_model = model_id
680
+ self._thread.ai_model = model_id
681
+
682
+
683
+ class ThreadListState(rx.State):
684
+ """State for the thread list component."""
685
+
686
+ thread_store: str = rx.LocalStorage("{}", name="asui-threads", sync=True)
687
+ threads: list[ThreadModel] = []
688
+ active_thread_id: str = ""
689
+ autosave: bool = False
690
+
691
+ @rx.var
692
+ def has_threads(self) -> bool:
693
+ """Check if there are any threads."""
694
+ return len(self.threads) > 0
695
+
696
+ async def initialize(
697
+ self, autosave: bool = False, auto_create_default: bool = False
698
+ ) -> None:
699
+ """Initialize the thread list state.
700
+
701
+ Args:
702
+ autosave: Enable auto-saving threads to local storage.
703
+ auto_create_default: If True, create and select a default thread
704
+ when no threads exist (e.g., on first load or after clearing).
705
+ """
706
+ self.autosave = autosave
707
+ await self.load_threads()
708
+
709
+ # Auto-create default thread if enabled and no threads exist
710
+ if auto_create_default and not self.has_threads:
711
+ await self.create_thread()
712
+
713
+ logger.debug("Initialized thread list state")
714
+
715
+ async def load_threads(self) -> None:
716
+ """Load threads from browser local storage."""
717
+ try:
718
+ thread_data = json.loads(self.thread_store)
719
+ if thread_data and "threads" in thread_data:
720
+ processed_threads: list[ThreadModel] = []
721
+ needs_upgrade = False
722
+ for thread in thread_data["threads"]:
723
+ thread_payload = dict(thread)
724
+ messages_payload: list[dict[str, Any]] = []
725
+ for message in thread_payload.get("messages", []):
726
+ msg_data = dict(message)
727
+ encoded = msg_data.pop("text_b64", None)
728
+ if encoded is not None:
729
+ try:
730
+ msg_data["text"] = base64.b64decode(encoded).decode(
731
+ "utf-8"
732
+ )
733
+ except Exception as exc:
734
+ logger.warning(
735
+ "Failed to decode stored message: %s", exc
736
+ )
737
+ msg_data["text"] = _rehydrate_mermaid_text(
738
+ msg_data.get("text", "")
739
+ )
740
+ needs_upgrade = True
741
+ else:
742
+ msg_data["text"] = _rehydrate_mermaid_text(
743
+ msg_data.get("text", "")
744
+ )
745
+ needs_upgrade = True
746
+ messages_payload.append(msg_data)
747
+ thread_payload["messages"] = messages_payload
748
+ processed_threads.append(ThreadModel(**thread_payload))
749
+
750
+ self.threads = processed_threads
751
+ self.active_thread_id = thread_data.get("active_thread_id", "")
752
+ if self.active_thread_id:
753
+ await self.select_thread(self.active_thread_id)
754
+ if needs_upgrade:
755
+ await self.save_threads()
756
+ except Exception as e:
757
+ logger.error("Error loading threads from local storage: %s", e)
758
+ self.threads = []
759
+ self.active_thread_id = ""
760
+
761
+ async def save_threads(self) -> None:
762
+ """Save threads to browser local storage."""
763
+ try:
764
+ thread_list = []
765
+ for thread in self.threads:
766
+ thread_dict = thread.dict()
767
+ encoded_messages: list[dict[str, Any]] = []
768
+ for message in thread.messages:
769
+ msg_dict = message.dict()
770
+ text_value = msg_dict.get("text", "")
771
+ if isinstance(text_value, str):
772
+ try:
773
+ msg_dict["text_b64"] = base64.b64encode(
774
+ text_value.encode("utf-8")
775
+ ).decode("ascii")
776
+ except Exception as exc:
777
+ logger.warning("Failed to encode message text: %s", exc)
778
+ msg_dict["text_b64"] = None
779
+ else:
780
+ msg_dict["text_b64"] = None
781
+ encoded_messages.append(msg_dict)
782
+ thread_dict["messages"] = encoded_messages
783
+ thread_list.append(thread_dict)
784
+
785
+ thread_data = {
786
+ "threads": thread_list,
787
+ "active_thread_id": self.active_thread_id,
788
+ }
789
+ self.thread_store = json.dumps(thread_data)
790
+ logger.debug("Saved threads to local storage")
791
+ except Exception as e:
792
+ logger.error("Error saving threads to local storage: %s", e)
793
+
794
+ async def reset_thread_store(self) -> None:
795
+ self.thread_store = "{}"
796
+
797
+ async def get_thread(self, thread_id: str) -> ThreadModel | None:
798
+ """Get a thread by its ID."""
799
+ for thread in self.threads:
800
+ if thread.thread_id == thread_id:
801
+ return thread
802
+ return None
803
+
804
+ async def create_thread(self) -> None:
805
+ """Create a new thread."""
806
+ new_thread = ThreadModel(
807
+ thread_id=str(uuid.uuid4()),
808
+ title="Neuer Chat",
809
+ prompt="",
810
+ messages=[],
811
+ state=ThreadStatus.NEW,
812
+ ai_model=ModelManager().get_default_model(),
813
+ active=True,
814
+ )
815
+ self.threads.insert(0, new_thread)
816
+ await self.select_thread(new_thread.thread_id)
817
+
818
+ logger.debug("Created new thread: %s", new_thread)
819
+
820
+ async def update_thread(self, thread: ThreadModel) -> None:
821
+ """Update a thread."""
822
+ existing_thread = await self.get_thread(thread.thread_id)
823
+ if existing_thread:
824
+ existing_thread.title = thread.title
825
+ existing_thread.messages = thread.messages
826
+ existing_thread.state = thread.state
827
+ existing_thread.active = thread.active
828
+ existing_thread.ai_model = thread.ai_model
829
+
830
+ if self.autosave:
831
+ await self.save_threads()
832
+ logger.debug("Updated thread: %s", thread.thread_id)
833
+
834
+ async def delete_thread(self, thread_id: str) -> AsyncGenerator[Any, Any]:
835
+ """Delete a thread."""
836
+ thread = await self.get_thread(thread_id)
837
+ if not thread:
838
+ yield rx.toast.error(
839
+ "Chat nicht gefunden.", position="top-right", close_button=True
840
+ )
841
+ logger.warning("Thread with ID %s not found.", thread_id)
842
+ return
843
+
844
+ was_active = thread_id == self.active_thread_id
845
+ self.threads.remove(thread)
846
+ await self.save_threads()
847
+ yield rx.toast.info(
848
+ f"Chat '{thread.title}' erfolgreich gelöscht.",
849
+ position="top-right",
850
+ close_button=True,
851
+ )
852
+
853
+ # If the deleted thread was active, clear ThreadState and show empty view
854
+ if was_active:
855
+ thread_state: ThreadState = await self.get_state(ThreadState)
856
+ thread_state.initialize()
857
+ self.active_thread_id = ""
858
+ # If other threads remain but we deleted the active one,
859
+ # the empty state is now displayed
860
+ # User can select from existing threads or create new one
861
+
862
+ async def select_thread(self, thread_id: str) -> None:
863
+ """Select a thread."""
864
+ for thread in self.threads:
865
+ thread.active = thread.thread_id == thread_id
866
+ self.active_thread_id = thread_id
867
+ active_thread = await self.get_thread(thread_id)
868
+
869
+ if active_thread:
870
+ thread_state: ThreadState = await self.get_state(ThreadState)
871
+ thread_state.set_thread(active_thread)
872
+ thread_state.messages = active_thread.messages
873
+ thread_state.selected_model = active_thread.ai_model
874
+ thread_state.with_thread_list = True