chat-console 0.2.5__tar.gz → 0.2.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {chat_console-0.2.5 → chat_console-0.2.8}/PKG-INFO +1 -1
- {chat_console-0.2.5 → chat_console-0.2.8}/app/__init__.py +1 -1
- {chat_console-0.2.5 → chat_console-0.2.8}/app/main.py +150 -161
- {chat_console-0.2.5 → chat_console-0.2.8}/chat_console.egg-info/PKG-INFO +1 -1
- {chat_console-0.2.5 → chat_console-0.2.8}/LICENSE +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/README.md +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/api/__init__.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/api/anthropic.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/api/base.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/api/ollama.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/api/openai.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/config.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/database.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/models.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/__init__.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/chat_interface.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/chat_list.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/model_browser.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/model_selector.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/search.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/ui/styles.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/app/utils.py +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/chat_console.egg-info/SOURCES.txt +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/chat_console.egg-info/dependency_links.txt +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/chat_console.egg-info/entry_points.txt +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/chat_console.egg-info/requires.txt +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/chat_console.egg-info/top_level.txt +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/setup.cfg +0 -0
- {chat_console-0.2.5 → chat_console-0.2.8}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.8
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -302,7 +302,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
302
302
|
Binding("q", "quit", "Quit", show=True, key_display="q"),
|
303
303
|
# Removed binding for "n" (new chat) since there's a dedicated button
|
304
304
|
Binding("c", "action_new_conversation", "New Chat", show=False, key_display="c", priority=True), # Keep alias with priority
|
305
|
-
Binding("escape", "
|
305
|
+
Binding("escape", "action_escape", "Cancel / Stop", show=True, key_display="esc"), # Updated to call our async method
|
306
306
|
Binding("ctrl+c", "quit", "Quit", show=False),
|
307
307
|
Binding("h", "view_history", "History", show=True, key_display="h", priority=True), # Add priority
|
308
308
|
Binding("s", "settings", "Settings", show=True, key_display="s", priority=True), # Add priority
|
@@ -463,7 +463,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
463
463
|
await self.create_new_conversation() # Keep SimpleChatApp action_new_conversation
|
464
464
|
log("action_new_conversation finished") # Added log
|
465
465
|
|
466
|
-
def action_escape(self) -> None:
|
466
|
+
async def action_escape(self) -> None:
|
467
467
|
"""Handle escape key globally."""
|
468
468
|
log("action_escape triggered")
|
469
469
|
settings_panel = self.query_one("#settings-panel")
|
@@ -477,6 +477,18 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
477
477
|
log("Attempting to cancel generation task")
|
478
478
|
if self.current_generation_task and not self.current_generation_task.done():
|
479
479
|
log("Cancelling active generation task.")
|
480
|
+
# Get the client for the current model
|
481
|
+
try:
|
482
|
+
model = self.selected_model
|
483
|
+
client = BaseModelClient.get_client_for_model(model)
|
484
|
+
# Call the client's cancel method if it's an Ollama client
|
485
|
+
if hasattr(client, 'cancel_stream'):
|
486
|
+
log("Calling client.cancel_stream() to terminate API session")
|
487
|
+
await client.cancel_stream()
|
488
|
+
except Exception as e:
|
489
|
+
log.error(f"Error cancelling client stream: {str(e)}")
|
490
|
+
|
491
|
+
# Now cancel the asyncio task
|
480
492
|
self.current_generation_task.cancel()
|
481
493
|
# The finally block in generate_response will handle is_generating = False and UI updates
|
482
494
|
self.notify("Stopping generation...", severity="warning", timeout=2) # Notify user immediately
|
@@ -608,166 +620,143 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
608
620
|
# Focus back on input
|
609
621
|
input_widget.focus()
|
610
622
|
|
611
|
-
async def generate_response(self) -> None:
|
612
|
-
"""Generate an AI response."""
|
613
|
-
if not self.current_conversation or not self.messages:
|
614
|
-
return
|
615
|
-
|
616
|
-
self.is_generating = True
|
617
|
-
log(
|
618
|
-
loading = self.query_one("#loading-indicator")
|
619
|
-
loading.remove_class("hidden")
|
620
|
-
|
621
|
-
try:
|
622
|
-
# Get conversation parameters
|
623
|
-
model = self.selected_model
|
624
|
-
style = self.selected_style
|
625
|
-
|
626
|
-
# Convert messages to API format
|
627
|
-
api_messages = []
|
628
|
-
for msg in self.messages:
|
629
|
-
api_messages.append({
|
630
|
-
"role": msg.role,
|
631
|
-
"content": msg.content
|
632
|
-
})
|
633
|
-
|
634
|
-
# Get appropriate client
|
635
|
-
try: # Keep SimpleChatApp generate_response
|
636
|
-
client = BaseModelClient.get_client_for_model(model) # Keep SimpleChatApp generate_response
|
637
|
-
if client is None: # Keep SimpleChatApp generate_response
|
638
|
-
raise Exception(f"No client available for model: {model}") # Keep SimpleChatApp generate_response
|
639
|
-
except Exception as e: # Keep SimpleChatApp generate_response
|
640
|
-
self.notify(f"Failed to initialize model client: {str(e)}", severity="error") # Keep SimpleChatApp generate_response
|
641
|
-
return # Keep SimpleChatApp generate_response
|
642
|
-
|
643
|
-
# Start streaming response # Keep SimpleChatApp generate_response
|
644
|
-
assistant_message = Message(role="assistant", content="Thinking...") # Keep SimpleChatApp generate_response
|
645
|
-
self.messages.append(assistant_message) # Keep SimpleChatApp generate_response
|
646
|
-
messages_container = self.query_one("#messages-container") # Keep SimpleChatApp generate_response
|
647
|
-
message_display = MessageDisplay(assistant_message, highlight_code=CONFIG["highlight_code"]) # Keep SimpleChatApp generate_response
|
648
|
-
messages_container.mount(message_display) # Keep SimpleChatApp generate_response
|
649
|
-
messages_container.scroll_end(animate=False) # Keep SimpleChatApp generate_response
|
650
|
-
|
651
|
-
# Add small delay to show thinking state # Keep SimpleChatApp generate_response
|
652
|
-
await asyncio.sleep(0.5) # Keep SimpleChatApp generate_response
|
653
|
-
|
654
|
-
# Stream chunks to the UI with synchronization # Keep SimpleChatApp generate_response
|
655
|
-
update_lock = asyncio.Lock() # Keep SimpleChatApp generate_response
|
656
|
-
|
657
|
-
async def update_ui(content: str): # Keep SimpleChatApp generate_response
|
658
|
-
if not self.is_generating: # Keep SimpleChatApp generate_response
|
659
|
-
log("update_ui called but is_generating is False, returning.") # Added log
|
660
|
-
return # Keep SimpleChatApp generate_response
|
661
|
-
|
662
|
-
async with update_lock: # Keep SimpleChatApp generate_response
|
663
|
-
try: # Keep SimpleChatApp generate_response
|
664
|
-
# Clear thinking indicator on first content # Keep SimpleChatApp generate_response
|
665
|
-
if assistant_message.content == "Thinking...": # Keep SimpleChatApp generate_response
|
666
|
-
assistant_message.content = "" # Keep SimpleChatApp generate_response
|
667
|
-
|
668
|
-
# Update message with full content so far # Keep SimpleChatApp generate_response
|
669
|
-
assistant_message.content = content # Keep SimpleChatApp generate_response
|
670
|
-
# Update UI with full content # Keep SimpleChatApp generate_response
|
671
|
-
await message_display.update_content(content) # Keep SimpleChatApp generate_response
|
672
|
-
# Force a refresh and scroll # Keep SimpleChatApp generate_response
|
673
|
-
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
674
|
-
await asyncio.sleep(0.05) # Longer delay for UI stability # Keep SimpleChatApp generate_response
|
675
|
-
messages_container.scroll_end(animate=False) # Keep SimpleChatApp generate_response
|
676
|
-
# Force another refresh to ensure content is visible # Keep SimpleChatApp generate_response
|
677
|
-
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
678
|
-
except Exception as e: # Keep SimpleChatApp generate_response
|
679
|
-
log.error(f"Error updating UI: {str(e)}") # Use log instead of logger
|
680
|
-
|
681
|
-
# Generate the response with timeout and cleanup # Keep SimpleChatApp generate_response
|
682
|
-
self.current_generation_task = None # Clear previous task reference
|
683
|
-
try: # Keep SimpleChatApp generate_response
|
684
|
-
# Create a task for the response generation # Keep SimpleChatApp generate_response
|
685
|
-
self.current_generation_task = asyncio.create_task( # Keep SimpleChatApp generate_response
|
686
|
-
generate_streaming_response( # Keep SimpleChatApp generate_response
|
687
|
-
self, # Pass the app instance
|
688
|
-
api_messages, # Keep SimpleChatApp generate_response
|
689
|
-
model, # Keep SimpleChatApp generate_response
|
690
|
-
style, # Keep SimpleChatApp generate_response
|
691
|
-
client, # Keep SimpleChatApp generate_response
|
692
|
-
update_ui # Keep SimpleChatApp generate_response
|
693
|
-
) # Keep SimpleChatApp generate_response
|
694
|
-
) # Keep SimpleChatApp generate_response
|
695
|
-
|
696
|
-
# Wait for response with timeout # Keep SimpleChatApp generate_response
|
697
|
-
log.info(f"Waiting for generation task {self.current_generation_task} with timeout...") # Add log
|
698
|
-
full_response = await asyncio.wait_for(self.current_generation_task, timeout=60) # Longer timeout # Keep SimpleChatApp generate_response
|
699
|
-
log.info(f"Generation task {self.current_generation_task} completed. Full response length: {len(full_response) if full_response else 0}") # Add log
|
700
|
-
|
701
|
-
# Save to database only if we got a complete response and weren't cancelled
|
702
|
-
if self.is_generating and full_response: # Check is_generating flag here
|
703
|
-
log("Generation finished normally, saving full response to DB") # Added log
|
704
|
-
self.db.add_message( # Keep SimpleChatApp generate_response
|
705
|
-
self.current_conversation.id, # Keep SimpleChatApp generate_response
|
706
|
-
"assistant", # Keep SimpleChatApp generate_response
|
707
|
-
full_response # Keep SimpleChatApp generate_response
|
708
|
-
) # Keep SimpleChatApp generate_response
|
709
|
-
# Force a final refresh # Keep SimpleChatApp generate_response
|
710
|
-
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
711
|
-
await asyncio.sleep(0.1) # Wait for UI to update # Keep SimpleChatApp generate_response
|
712
|
-
elif not full_response and self.is_generating: # Only log if not cancelled
|
713
|
-
log("Generation finished but full_response is empty/None") # Added log
|
714
|
-
else:
|
715
|
-
# This case handles cancellation where full_response might be partial or None
|
716
|
-
log("Generation was cancelled or finished without a full response.")
|
717
|
-
|
718
|
-
except asyncio.CancelledError: # Handle cancellation explicitly
|
719
|
-
log.warning("Generation task was cancelled.")
|
720
|
-
self.notify("Generation stopped by user.", severity="warning")
|
721
|
-
# Remove the potentially incomplete message from UI state
|
722
|
-
if self.messages and self.messages[-1].role == "assistant":
|
723
|
-
self.messages.pop()
|
724
|
-
await self.update_messages_ui() # Update UI to remove partial message
|
725
|
-
|
726
|
-
except asyncio.TimeoutError: # Keep SimpleChatApp generate_response
|
727
|
-
log.error(f"Response generation timed out waiting for task {self.current_generation_task}") # Use log instead of logger
|
728
|
-
# Log state at timeout
|
729
|
-
log.error(f"Timeout state: is_generating={self.is_generating}, task_done={self.current_generation_task.done() if self.current_generation_task else 'N/A'}")
|
730
|
-
error_msg = "Response generation timed out. The model may be busy or unresponsive. Please try again." # Keep SimpleChatApp generate_response
|
731
|
-
self.notify(error_msg, severity="error") # Keep SimpleChatApp generate_response
|
732
|
-
|
733
|
-
# Remove the incomplete message # Keep SimpleChatApp generate_response
|
734
|
-
if self.messages and self.messages[-1].role == "assistant": # Keep SimpleChatApp generate_response
|
735
|
-
self.messages.pop() # Keep SimpleChatApp generate_response
|
736
|
-
|
737
|
-
# Update UI to remove the incomplete message # Keep SimpleChatApp generate_response
|
738
|
-
await self.update_messages_ui() # Keep SimpleChatApp generate_response
|
739
|
-
|
740
|
-
finally: # Keep SimpleChatApp generate_response
|
741
|
-
# Ensure flag is reset and task reference is cleared
|
742
|
-
log(f"Setting is_generating to False in finally block") # Added log
|
743
|
-
self.is_generating = False # Keep SimpleChatApp generate_response
|
744
|
-
self.current_generation_task = None # Clear task reference
|
745
|
-
loading = self.query_one("#loading-indicator") # Keep SimpleChatApp generate_response
|
746
|
-
loading.add_class("hidden") # Keep SimpleChatApp generate_response
|
747
|
-
# Force a final UI refresh # Keep SimpleChatApp generate_response
|
748
|
-
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
749
|
-
|
750
|
-
except Exception as e: # Keep SimpleChatApp generate_response
|
751
|
-
# Catch any other unexpected errors during generation setup/handling
|
752
|
-
log.error(f"Unexpected exception during generate_response: {str(e)}") # Added log
|
753
|
-
self.notify(f"Error generating response: {str(e)}", severity="error") # Keep SimpleChatApp generate_response
|
754
|
-
# Add error message to UI # Keep SimpleChatApp generate_response
|
755
|
-
error_msg = f"Error: {str(e)}" # Keep SimpleChatApp generate_response
|
756
|
-
self.messages.append(Message(role="assistant", content=error_msg)) # Keep SimpleChatApp generate_response
|
757
|
-
await self.update_messages_ui() # Keep SimpleChatApp generate_response
|
758
|
-
# The finally block below will handle resetting is_generating and hiding loading
|
759
|
-
|
760
|
-
finally: # Keep SimpleChatApp generate_response - This finally block now primarily handles cleanup
|
761
|
-
log(f"Ensuring is_generating is False and task is cleared in outer finally block") # Added log
|
762
|
-
self.is_generating = False # Ensure flag is always reset
|
763
|
-
self.current_generation_task = None # Ensure task ref is cleared
|
764
|
-
loading = self.query_one("#loading-indicator") # Keep SimpleChatApp generate_response
|
765
|
-
loading.add_class("hidden") # Ensure loading indicator is hidden
|
766
|
-
# Re-focus input after generation attempt (success, failure, or cancel)
|
623
|
+
async def generate_response(self) -> None:
|
624
|
+
"""Generate an AI response using a non-blocking worker."""
|
625
|
+
if not self.current_conversation or not self.messages:
|
626
|
+
return
|
627
|
+
|
628
|
+
self.is_generating = True
|
629
|
+
log("Setting is_generating to True")
|
630
|
+
loading = self.query_one("#loading-indicator")
|
631
|
+
loading.remove_class("hidden")
|
632
|
+
|
633
|
+
try:
|
634
|
+
# Get conversation parameters
|
635
|
+
model = self.selected_model
|
636
|
+
style = self.selected_style
|
637
|
+
|
638
|
+
# Convert messages to API format
|
639
|
+
api_messages = []
|
640
|
+
for msg in self.messages:
|
641
|
+
api_messages.append({
|
642
|
+
"role": msg.role,
|
643
|
+
"content": msg.content
|
644
|
+
})
|
645
|
+
|
646
|
+
# Get appropriate client
|
767
647
|
try:
|
768
|
-
|
769
|
-
|
770
|
-
|
648
|
+
client = BaseModelClient.get_client_for_model(model)
|
649
|
+
if client is None:
|
650
|
+
raise Exception(f"No client available for model: {model}")
|
651
|
+
except Exception as e:
|
652
|
+
self.notify(f"Failed to initialize model client: {str(e)}", severity="error")
|
653
|
+
self.is_generating = False
|
654
|
+
loading.add_class("hidden")
|
655
|
+
return
|
656
|
+
|
657
|
+
# Start streaming response
|
658
|
+
assistant_message = Message(role="assistant", content="Thinking...")
|
659
|
+
self.messages.append(assistant_message)
|
660
|
+
messages_container = self.query_one("#messages-container")
|
661
|
+
message_display = MessageDisplay(assistant_message, highlight_code=CONFIG["highlight_code"])
|
662
|
+
messages_container.mount(message_display)
|
663
|
+
messages_container.scroll_end(animate=False)
|
664
|
+
|
665
|
+
# Add small delay to show thinking state
|
666
|
+
await asyncio.sleep(0.5)
|
667
|
+
|
668
|
+
# Stream chunks to the UI with synchronization
|
669
|
+
update_lock = asyncio.Lock()
|
670
|
+
|
671
|
+
async def update_ui(content: str):
|
672
|
+
if not self.is_generating:
|
673
|
+
log("update_ui called but is_generating is False, returning.")
|
674
|
+
return
|
675
|
+
|
676
|
+
async with update_lock:
|
677
|
+
try:
|
678
|
+
# Clear thinking indicator on first content
|
679
|
+
if assistant_message.content == "Thinking...":
|
680
|
+
assistant_message.content = ""
|
681
|
+
|
682
|
+
# Update message with full content so far
|
683
|
+
assistant_message.content = content
|
684
|
+
# Update UI with full content
|
685
|
+
await message_display.update_content(content)
|
686
|
+
# Force a refresh and scroll
|
687
|
+
self.refresh(layout=True)
|
688
|
+
await asyncio.sleep(0.05) # Longer delay for UI stability
|
689
|
+
messages_container.scroll_end(animate=False)
|
690
|
+
# Force another refresh to ensure content is visible
|
691
|
+
self.refresh(layout=True)
|
692
|
+
except Exception as e:
|
693
|
+
log.error(f"Error updating UI: {str(e)}")
|
694
|
+
|
695
|
+
# Define worker for background processing
|
696
|
+
@work(exit_on_error=True)
|
697
|
+
async def run_generation_worker():
|
698
|
+
try:
|
699
|
+
# Generate the response in background
|
700
|
+
full_response = await generate_streaming_response(
|
701
|
+
self,
|
702
|
+
api_messages,
|
703
|
+
model,
|
704
|
+
style,
|
705
|
+
client,
|
706
|
+
update_ui
|
707
|
+
)
|
708
|
+
|
709
|
+
# Save complete response to database
|
710
|
+
if self.is_generating and full_response:
|
711
|
+
log("Generation completed normally, saving to database")
|
712
|
+
self.db.add_message(
|
713
|
+
self.current_conversation.id,
|
714
|
+
"assistant",
|
715
|
+
full_response
|
716
|
+
)
|
717
|
+
|
718
|
+
# Final UI refresh
|
719
|
+
self.refresh(layout=True)
|
720
|
+
|
721
|
+
except asyncio.CancelledError:
|
722
|
+
log.warning("Generation worker was cancelled")
|
723
|
+
# Remove the incomplete message
|
724
|
+
if self.messages and self.messages[-1].role == "assistant":
|
725
|
+
self.messages.pop()
|
726
|
+
await self.update_messages_ui()
|
727
|
+
self.notify("Generation stopped by user", severity="warning", timeout=2)
|
728
|
+
|
729
|
+
except Exception as e:
|
730
|
+
log.error(f"Error in generation worker: {str(e)}")
|
731
|
+
self.notify(f"Generation error: {str(e)}", severity="error", timeout=5)
|
732
|
+
# Add error message to UI
|
733
|
+
if self.messages and self.messages[-1].role == "assistant":
|
734
|
+
self.messages.pop() # Remove thinking message
|
735
|
+
error_msg = f"Error: {str(e)}"
|
736
|
+
self.messages.append(Message(role="assistant", content=error_msg))
|
737
|
+
await self.update_messages_ui()
|
738
|
+
|
739
|
+
finally:
|
740
|
+
# Always clean up state and UI
|
741
|
+
log("Generation worker completed, resetting state")
|
742
|
+
self.is_generating = False
|
743
|
+
self.current_generation_task = None
|
744
|
+
loading = self.query_one("#loading-indicator")
|
745
|
+
loading.add_class("hidden")
|
746
|
+
self.refresh(layout=True)
|
747
|
+
self.query_one("#message-input").focus()
|
748
|
+
|
749
|
+
# Start the worker and keep a reference to it
|
750
|
+
worker = run_generation_worker()
|
751
|
+
self.current_generation_task = worker
|
752
|
+
|
753
|
+
except Exception as e:
|
754
|
+
log.error(f"Error setting up generation: {str(e)}")
|
755
|
+
self.notify(f"Error: {str(e)}", severity="error")
|
756
|
+
self.is_generating = False
|
757
|
+
loading = self.query_one("#loading-indicator")
|
758
|
+
loading.add_class("hidden")
|
759
|
+
self.query_one("#message-input").focus()
|
771
760
|
|
772
761
|
def on_model_selector_model_selected(self, event: ModelSelector.ModelSelected) -> None: # Keep SimpleChatApp on_model_selector_model_selected
|
773
762
|
"""Handle model selection""" # Keep SimpleChatApp on_model_selector_model_selected docstring
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.8
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|