chat-console 0.3.95__py3-none-any.whl → 0.3.993__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/base.py +69 -26
- app/main.py +87 -113
- app/ui/model_selector.py +5 -0
- app/utils.py +10 -3
- {chat_console-0.3.95.dist-info → chat_console-0.3.993.dist-info}/METADATA +1 -1
- {chat_console-0.3.95.dist-info → chat_console-0.3.993.dist-info}/RECORD +11 -11
- {chat_console-0.3.95.dist-info → chat_console-0.3.993.dist-info}/WHEEL +1 -1
- {chat_console-0.3.95.dist-info → chat_console-0.3.993.dist-info}/entry_points.txt +0 -0
- {chat_console-0.3.95.dist-info → chat_console-0.3.993.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.3.95.dist-info → chat_console-0.3.993.dist-info}/top_level.txt +0 -0
app/__init__.py
CHANGED
app/api/base.py
CHANGED
@@ -61,6 +61,26 @@ class BaseModelClient(ABC):
|
|
61
61
|
logger.info(f"Found model in config with provider: {provider}")
|
62
62
|
# For custom models, try to infer provider
|
63
63
|
else:
|
64
|
+
# Check if this model was selected from a specific provider in the UI
|
65
|
+
# This would be stored in a temporary attribute on the app instance
|
66
|
+
try:
|
67
|
+
from ..main import SimpleChatApp
|
68
|
+
import inspect
|
69
|
+
frame = inspect.currentframe()
|
70
|
+
while frame:
|
71
|
+
if 'self' in frame.f_locals and isinstance(frame.f_locals['self'], SimpleChatApp):
|
72
|
+
app_instance = frame.f_locals['self']
|
73
|
+
if hasattr(app_instance, 'selected_provider'):
|
74
|
+
provider = app_instance.selected_provider
|
75
|
+
logger.info(f"Using provider from UI selection: {provider}")
|
76
|
+
return OllamaClient if provider == "ollama" else (
|
77
|
+
OpenAIClient if provider == "openai" else
|
78
|
+
AnthropicClient if provider == "anthropic" else None)
|
79
|
+
frame = frame.f_back
|
80
|
+
except Exception as e:
|
81
|
+
logger.error(f"Error checking for UI provider selection: {str(e)}")
|
82
|
+
|
83
|
+
# If we couldn't get the provider from the UI, infer it from the model name
|
64
84
|
# Check for common OpenAI model patterns or prefixes
|
65
85
|
if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
|
66
86
|
"gpt" in model_name_lower or
|
@@ -122,34 +142,57 @@ class BaseModelClient(ABC):
|
|
122
142
|
raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
|
123
143
|
# For custom models, try to infer provider
|
124
144
|
else:
|
125
|
-
# Check
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
145
|
+
# Check if this model was selected from a specific provider in the UI
|
146
|
+
provider = None
|
147
|
+
try:
|
148
|
+
from ..main import SimpleChatApp
|
149
|
+
import inspect
|
150
|
+
frame = inspect.currentframe()
|
151
|
+
while frame:
|
152
|
+
if 'self' in frame.f_locals and isinstance(frame.f_locals['self'], SimpleChatApp):
|
153
|
+
app_instance = frame.f_locals['self']
|
154
|
+
if hasattr(app_instance, 'selected_provider'):
|
155
|
+
provider = app_instance.selected_provider
|
156
|
+
logger.info(f"Using provider from UI selection: {provider}")
|
157
|
+
break
|
158
|
+
frame = frame.f_back
|
159
|
+
except Exception as e:
|
160
|
+
logger.error(f"Error checking for UI provider selection: {str(e)}")
|
161
|
+
|
162
|
+
# If we couldn't get the provider from the UI, infer it from the model name
|
163
|
+
if not provider:
|
164
|
+
# Check for common OpenAI model patterns or prefixes
|
165
|
+
if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
|
166
|
+
"gpt" in model_name_lower or
|
167
|
+
model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
|
168
|
+
if not AVAILABLE_PROVIDERS["openai"]:
|
169
|
+
raise Exception("OpenAI API key not found. Please set OPENAI_API_KEY environment variable.")
|
170
|
+
provider = "openai"
|
171
|
+
logger.info(f"Identified {model_name} as an OpenAI model")
|
172
|
+
# Then check for Anthropic models - these should ALWAYS use Anthropic client
|
173
|
+
elif any(name in model_name_lower for name in ["claude", "anthropic"]):
|
174
|
+
if not AVAILABLE_PROVIDERS["anthropic"]:
|
175
|
+
raise Exception("Anthropic API key not found. Please set ANTHROPIC_API_KEY environment variable.")
|
176
|
+
provider = "anthropic"
|
177
|
+
logger.info(f"Identified as Anthropic model: {model_name}")
|
178
|
+
# Then try Ollama for known model names or if selected from Ollama UI
|
179
|
+
elif (any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]) or
|
180
|
+
model_name in [m["id"] for m in CONFIG.get("ollama_models", [])]):
|
181
|
+
if not AVAILABLE_PROVIDERS["ollama"]:
|
182
|
+
raise Exception("Ollama server is not running. Please start Ollama and try again.")
|
149
183
|
provider = "ollama"
|
150
|
-
logger.info(f"
|
184
|
+
logger.info(f"Identified as Ollama model: {model_name}")
|
151
185
|
else:
|
152
|
-
|
186
|
+
# Default to Ollama for unknown models
|
187
|
+
if AVAILABLE_PROVIDERS["ollama"]:
|
188
|
+
provider = "ollama"
|
189
|
+
logger.info(f"Unknown model type, defaulting to Ollama: {model_name}")
|
190
|
+
else:
|
191
|
+
raise Exception(f"Unknown model: {model_name}")
|
192
|
+
|
193
|
+
# Verify the selected provider is available
|
194
|
+
if provider and not AVAILABLE_PROVIDERS.get(provider, False):
|
195
|
+
raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
|
153
196
|
|
154
197
|
# Return appropriate client
|
155
198
|
if provider == "ollama":
|
app/main.py
CHANGED
@@ -643,127 +643,94 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
643
643
|
# Update UI with user message first
|
644
644
|
await self.update_messages_ui()
|
645
645
|
|
646
|
-
# If this is the first message and dynamic titles are enabled,
|
647
|
-
# Only attempt title generation if the message has sufficient content (at least 3 characters)
|
646
|
+
# If this is the first message and dynamic titles are enabled, start background title generation
|
648
647
|
if is_first_message and self.current_conversation and CONFIG.get("generate_dynamic_titles", True) and len(content) >= 3:
|
649
|
-
log("First message detected,
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
# Show loading indicator for title generation
|
654
|
-
loading = self.query_one("#loading-indicator")
|
655
|
-
loading.remove_class("hidden")
|
656
|
-
loading.update("🔤 Generating title...")
|
648
|
+
log("First message detected, starting background title generation...")
|
649
|
+
debug_log(f"First message detected with length {len(content)}, creating background title task")
|
650
|
+
asyncio.create_task(self._generate_title_background(content))
|
657
651
|
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
print(f"Using model for title generation: {model}")
|
662
|
-
debug_log(f"Selected model for title generation: '{model}'")
|
663
|
-
|
664
|
-
# Check if model is valid
|
665
|
-
if not model:
|
666
|
-
debug_log("Model is empty, falling back to default")
|
667
|
-
# Fallback to a safe default model - preferring OpenAI if key exists
|
668
|
-
if OPENAI_API_KEY:
|
669
|
-
model = "gpt-3.5-turbo"
|
670
|
-
debug_log("Falling back to OpenAI gpt-3.5-turbo for title generation")
|
671
|
-
elif ANTHROPIC_API_KEY:
|
672
|
-
model = "claude-3-haiku-20240307" # Updated to newer Claude model
|
673
|
-
debug_log("Falling back to Anthropic Claude 3 Haiku for title generation")
|
674
|
-
else:
|
675
|
-
# Last resort - use a common Ollama model
|
676
|
-
model = "llama3" # Common default
|
677
|
-
debug_log("Falling back to Ollama model: llama3")
|
652
|
+
# Start main response generation immediately
|
653
|
+
debug_log(f"About to call generate_response with model: '{self.selected_model}'")
|
654
|
+
await self.generate_response()
|
678
655
|
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
if client is None:
|
683
|
-
debug_log(f"No client available for model: {model}, trying to initialize")
|
684
|
-
# Try to determine client type and initialize manually
|
685
|
-
client_type = BaseModelClient.get_client_type_for_model(model)
|
686
|
-
if client_type:
|
687
|
-
debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
|
688
|
-
try:
|
689
|
-
client = await client_type.create()
|
690
|
-
debug_log("Client initialized successfully")
|
691
|
-
except Exception as init_err:
|
692
|
-
debug_log(f"Error initializing client: {str(init_err)}")
|
693
|
-
|
694
|
-
if client is None:
|
695
|
-
debug_log("Could not initialize client, falling back to safer model")
|
696
|
-
# Try a different model as last resort
|
697
|
-
if OPENAI_API_KEY:
|
698
|
-
from app.api.openai import OpenAIClient
|
699
|
-
client = await OpenAIClient.create()
|
700
|
-
model = "gpt-3.5-turbo"
|
701
|
-
debug_log("Falling back to OpenAI for title generation")
|
702
|
-
elif ANTHROPIC_API_KEY:
|
703
|
-
from app.api.anthropic import AnthropicClient
|
704
|
-
client = await AnthropicClient.create()
|
705
|
-
model = "claude-3-haiku-20240307" # Updated to newer Claude model
|
706
|
-
debug_log("Falling back to Anthropic for title generation")
|
707
|
-
else:
|
708
|
-
raise Exception("No valid API clients available for title generation")
|
656
|
+
# Focus back on input
|
657
|
+
input_widget.focus()
|
709
658
|
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
if expected_client_type and not isinstance(client, expected_client_type):
|
718
|
-
debug_log(f"Warning: Client type mismatch. Expected {expected_client_type.__name__}, got {type(client).__name__}")
|
719
|
-
debug_log("Creating new client with correct type")
|
720
|
-
client = await BaseModelClient.get_client_for_model(model)
|
721
|
-
|
722
|
-
title = await generate_conversation_title(content, model, client)
|
723
|
-
debug_log(f"Generated title: {title}")
|
724
|
-
log(f"Generated title: {title}")
|
725
|
-
print(f"Generated title: {title}")
|
659
|
+
async def _generate_title_background(self, content: str) -> None:
|
660
|
+
"""Generates the conversation title in the background."""
|
661
|
+
if not self.current_conversation or not CONFIG.get("generate_dynamic_titles", True):
|
662
|
+
return
|
663
|
+
|
664
|
+
log("Starting background title generation...")
|
665
|
+
debug_log(f"Background title generation started for content: {content[:30]}...")
|
726
666
|
|
667
|
+
try:
|
668
|
+
# Use the logic from generate_conversation_title in utils.py
|
669
|
+
# It already prioritizes faster models (OpenAI/Anthropic)
|
670
|
+
# We need a client instance here. Let's get one based on priority.
|
671
|
+
title_client = None
|
672
|
+
title_model = None
|
673
|
+
from app.config import OPENAI_API_KEY, ANTHROPIC_API_KEY
|
674
|
+
from app.api.base import BaseModelClient
|
675
|
+
|
676
|
+
# Determine title client and model based on available keys
|
677
|
+
if OPENAI_API_KEY:
|
678
|
+
from app.api.openai import OpenAIClient
|
679
|
+
title_client = await OpenAIClient.create()
|
680
|
+
title_model = "gpt-3.5-turbo"
|
681
|
+
debug_log("Using OpenAI for background title generation")
|
682
|
+
elif ANTHROPIC_API_KEY:
|
683
|
+
from app.api.anthropic import AnthropicClient
|
684
|
+
title_client = await AnthropicClient.create()
|
685
|
+
title_model = "claude-3-haiku-20240307"
|
686
|
+
debug_log("Using Anthropic for background title generation")
|
687
|
+
else:
|
688
|
+
# Fallback to the currently selected model's client if no API keys
|
689
|
+
selected_model_resolved = resolve_model_id(self.selected_model)
|
690
|
+
title_client = await BaseModelClient.get_client_for_model(selected_model_resolved)
|
691
|
+
title_model = selected_model_resolved
|
692
|
+
debug_log(f"Using selected model's client ({type(title_client).__name__}) for background title generation")
|
693
|
+
|
694
|
+
if not title_client or not title_model:
|
695
|
+
raise Exception("Could not determine a client/model for title generation.")
|
696
|
+
|
697
|
+
# Call the utility function
|
698
|
+
from app.utils import generate_conversation_title # Import locally if needed
|
699
|
+
new_title = await generate_conversation_title(content, title_model, title_client)
|
700
|
+
debug_log(f"Background generated title: {new_title}")
|
701
|
+
|
702
|
+
# Check if title generation returned the default or a real title
|
703
|
+
if new_title and not new_title.startswith("Conversation ("):
|
727
704
|
# Update conversation title in database
|
728
705
|
self.db.update_conversation(
|
729
706
|
self.current_conversation.id,
|
730
|
-
title=
|
707
|
+
title=new_title
|
731
708
|
)
|
732
709
|
|
733
|
-
# Update UI title
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
#
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
# Hide loading indicator *only if* AI response generation isn't about to start
|
753
|
-
if not self.is_generating:
|
754
|
-
loading.add_class("hidden")
|
755
|
-
|
756
|
-
# Small delay to ensure state is updated
|
757
|
-
await asyncio.sleep(0.1)
|
758
|
-
|
759
|
-
# Log just before generate_response call
|
760
|
-
debug_log(f"About to call generate_response with model: '{self.selected_model}'")
|
761
|
-
|
762
|
-
# Generate AI response (will set self.is_generating and handle loading indicator)
|
763
|
-
await self.generate_response()
|
710
|
+
# Update UI title (if conversation hasn't changed)
|
711
|
+
# Check if the current conversation ID still matches
|
712
|
+
# Need to fetch the conversation again to be sure, or check against self.current_conversation.id
|
713
|
+
current_conv_id = self.current_conversation.id if self.current_conversation else None
|
714
|
+
if current_conv_id and self.db.get_conversation(current_conv_id): # Check if conversation still exists
|
715
|
+
# Check if the app's current conversation is still the same one
|
716
|
+
if self.current_conversation and self.current_conversation.id == current_conv_id:
|
717
|
+
title_widget = self.query_one("#conversation-title", Static)
|
718
|
+
title_widget.update(new_title)
|
719
|
+
self.current_conversation.title = new_title # Update local object too
|
720
|
+
log(f"Background title update successful: {new_title}")
|
721
|
+
# Maybe a subtle notification? Optional.
|
722
|
+
# self.notify(f"Title set: {new_title}", severity="information", timeout=2)
|
723
|
+
else:
|
724
|
+
log("Conversation changed before background title update could apply.")
|
725
|
+
else:
|
726
|
+
log(f"Conversation with ID {current_conv_id} no longer exists. Skipping title update.")
|
727
|
+
else:
|
728
|
+
log(f"Background title generation resulted in default or empty title: '{new_title}'. Not updating.")
|
764
729
|
|
765
|
-
|
766
|
-
|
730
|
+
except Exception as e:
|
731
|
+
debug_log(f"Background title generation failed: {str(e)}")
|
732
|
+
log.error(f"Background title generation failed: {str(e)}")
|
733
|
+
# Do not notify the user, just log the error.
|
767
734
|
|
768
735
|
async def generate_response(self) -> None:
|
769
736
|
"""Generate an AI response using a non-blocking worker with fallback."""
|
@@ -1248,9 +1215,16 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1248
1215
|
debug_log(f"State change event from unrelated worker: {event.worker.name}")
|
1249
1216
|
|
1250
1217
|
|
1251
|
-
def on_model_selector_model_selected(self, event: ModelSelector.ModelSelected) -> None:
|
1252
|
-
"""Handle model selection"""
|
1253
|
-
self.selected_model = event.model_id
|
1218
|
+
def on_model_selector_model_selected(self, event: ModelSelector.ModelSelected) -> None:
|
1219
|
+
"""Handle model selection"""
|
1220
|
+
self.selected_model = event.model_id
|
1221
|
+
|
1222
|
+
# Store the selected provider for use in client resolution
|
1223
|
+
model_selector = self.query_one(ModelSelector)
|
1224
|
+
if model_selector:
|
1225
|
+
self.selected_provider = model_selector.selected_provider
|
1226
|
+
log(f"Stored selected provider: {self.selected_provider} for model: {self.selected_model}")
|
1227
|
+
|
1254
1228
|
self.update_app_info() # Update the displayed model info
|
1255
1229
|
|
1256
1230
|
def on_style_selector_style_selected(self, event: StyleSelector.StyleSelected) -> None: # Keep SimpleChatApp on_style_selector_style_selected
|
app/ui/model_selector.py
CHANGED
@@ -251,6 +251,11 @@ class ModelSelector(Container):
|
|
251
251
|
# IMPORTANT: Clear any cached client
|
252
252
|
if hasattr(self.app, 'cached_client'):
|
253
253
|
self.app.cached_client = None
|
254
|
+
|
255
|
+
# Store the selected provider in the app instance for client resolution
|
256
|
+
if hasattr(self.app, 'selected_provider'):
|
257
|
+
self.app.selected_provider = self.selected_provider
|
258
|
+
logger.info(f"Updated app.selected_provider to: {self.selected_provider}")
|
254
259
|
|
255
260
|
# Update model options
|
256
261
|
model_select = self.query_one("#model-select", Select)
|
app/utils.py
CHANGED
@@ -33,15 +33,15 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
|
|
33
33
|
# Try-except the entire function to ensure we always return a title
|
34
34
|
try:
|
35
35
|
# Pick a reliable title generation model - prefer OpenAI if available
|
36
|
-
from
|
36
|
+
from app.config import OPENAI_API_KEY, ANTHROPIC_API_KEY
|
37
37
|
|
38
38
|
if OPENAI_API_KEY:
|
39
|
-
from
|
39
|
+
from app.api.openai import OpenAIClient
|
40
40
|
title_client = await OpenAIClient.create()
|
41
41
|
title_model = "gpt-3.5-turbo"
|
42
42
|
debug_log("Using OpenAI for title generation")
|
43
43
|
elif ANTHROPIC_API_KEY:
|
44
|
-
from
|
44
|
+
from app.api.anthropic import AnthropicClient
|
45
45
|
title_client = await AnthropicClient.create()
|
46
46
|
title_model = "claude-3-haiku-20240307"
|
47
47
|
debug_log("Using Anthropic for title generation")
|
@@ -774,6 +774,13 @@ def resolve_model_id(model_id_or_name: str) -> str:
|
|
774
774
|
"o4-vision": "04-vision"
|
775
775
|
}
|
776
776
|
|
777
|
+
# Check for more complex typo patterns with dates
|
778
|
+
if input_lower.startswith("o1-") and "-202" in input_lower:
|
779
|
+
corrected = "01" + input_lower[2:]
|
780
|
+
logger.info(f"Converting '{input_lower}' to '{corrected}' (letter 'o' to zero '0')")
|
781
|
+
input_lower = corrected
|
782
|
+
model_id_or_name = corrected
|
783
|
+
|
777
784
|
if input_lower in typo_corrections:
|
778
785
|
corrected = typo_corrections[input_lower]
|
779
786
|
logger.info(f"Converting '{input_lower}' to '{corrected}' (letter 'o' to zero '0')")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.993
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -1,24 +1,24 @@
|
|
1
|
-
app/__init__.py,sha256=
|
1
|
+
app/__init__.py,sha256=QfLH7sifN5qjEB7LzWob9KE-LerEtIww93XlEveeJFo,132
|
2
2
|
app/config.py,sha256=xeRGXcKbNvAdQGkaJJBipM4yHZJTM1y4ZFoW764APOU,7661
|
3
3
|
app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
|
4
|
-
app/main.py,sha256=
|
4
|
+
app/main.py,sha256=WOcMP6yRwoEzftTSHf0e3zVK1aEuBgKMAsNbzHyKgiA,77427
|
5
5
|
app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
|
6
|
-
app/utils.py,sha256=
|
6
|
+
app/utils.py,sha256=AfB6USZdSwkUj75TQzGt_WPAUt1K8wlghON4vRVHUbE,39158
|
7
7
|
app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
|
8
8
|
app/api/anthropic.py,sha256=uInwNvGLJ_iPUs4BjdwaqXTU6NfmK1SzX7498Pt44fI,10667
|
9
|
-
app/api/base.py,sha256=
|
9
|
+
app/api/base.py,sha256=ELHl7K0jn0OuOfub7lVboigIbym0sv1se_-bCLscPJ8,10232
|
10
10
|
app/api/ollama.py,sha256=eFG24nI2MlF57z9EHiA97v02NgFJ0kxaPUX26xAXFsg,66154
|
11
11
|
app/api/openai.py,sha256=hLPr955tUx_2vwRuLP8Zrl3vu7kQZgUETi4cJuaYnFE,10810
|
12
12
|
app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
|
13
13
|
app/ui/chat_interface.py,sha256=oSDZi0Jgj_L8WnBh1RuJpIeIcN-RQ38CNejwsXiWTVg,18267
|
14
14
|
app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
|
15
15
|
app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
|
16
|
-
app/ui/model_selector.py,sha256=
|
16
|
+
app/ui/model_selector.py,sha256=2G0TOXfcNodrXZOhLeaJJ2iG3Nck4c_NN1AvUAmaF3M,19172
|
17
17
|
app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
|
18
18
|
app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
|
19
|
-
chat_console-0.3.
|
20
|
-
chat_console-0.3.
|
21
|
-
chat_console-0.3.
|
22
|
-
chat_console-0.3.
|
23
|
-
chat_console-0.3.
|
24
|
-
chat_console-0.3.
|
19
|
+
chat_console-0.3.993.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
|
20
|
+
chat_console-0.3.993.dist-info/METADATA,sha256=UNHfkJ2M-GN1LnJEMKoI6eqPHIQYlYvwMXLmP4YvY5I,2923
|
21
|
+
chat_console-0.3.993.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
|
22
|
+
chat_console-0.3.993.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
|
23
|
+
chat_console-0.3.993.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
|
24
|
+
chat_console-0.3.993.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|