chat-console 0.2.98__py3-none-any.whl → 0.2.99__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/anthropic.py +8 -1
- app/api/base.py +2 -2
- app/api/ollama.py +9 -3
- app/api/openai.py +8 -1
- app/main.py +56 -31
- app/ui/chat_interface.py +13 -9
- app/utils.py +5 -3
- {chat_console-0.2.98.dist-info → chat_console-0.2.99.dist-info}/METADATA +1 -1
- chat_console-0.2.99.dist-info/RECORD +24 -0
- chat_console-0.2.98.dist-info/RECORD +0 -24
- {chat_console-0.2.98.dist-info → chat_console-0.2.99.dist-info}/WHEEL +0 -0
- {chat_console-0.2.98.dist-info → chat_console-0.2.99.dist-info}/entry_points.txt +0 -0
- {chat_console-0.2.98.dist-info → chat_console-0.2.99.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.2.98.dist-info → chat_console-0.2.99.dist-info}/top_level.txt +0 -0
app/__init__.py
CHANGED
app/api/anthropic.py
CHANGED
@@ -7,7 +7,14 @@ from ..utils import resolve_model_id # Import the resolve_model_id function
|
|
7
7
|
|
8
8
|
class AnthropicClient(BaseModelClient):
|
9
9
|
def __init__(self):
|
10
|
-
self.client =
|
10
|
+
self.client = None # Initialize in create()
|
11
|
+
|
12
|
+
@classmethod
|
13
|
+
async def create(cls) -> 'AnthropicClient':
|
14
|
+
"""Create a new instance with async initialization."""
|
15
|
+
instance = cls()
|
16
|
+
instance.client = anthropic.AsyncAnthropic(api_key=ANTHROPIC_API_KEY)
|
17
|
+
return instance
|
11
18
|
|
12
19
|
def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
|
13
20
|
"""Prepare messages for Claude API"""
|
app/api/base.py
CHANGED
@@ -71,7 +71,7 @@ class BaseModelClient(ABC):
|
|
71
71
|
return None
|
72
72
|
|
73
73
|
@staticmethod
|
74
|
-
def get_client_for_model(model_name: str) -> 'BaseModelClient':
|
74
|
+
async def get_client_for_model(model_name: str) -> 'BaseModelClient':
|
75
75
|
"""Factory method to get appropriate client for model"""
|
76
76
|
from ..config import CONFIG, AVAILABLE_PROVIDERS
|
77
77
|
from .anthropic import AnthropicClient
|
@@ -118,7 +118,7 @@ class BaseModelClient(ABC):
|
|
118
118
|
|
119
119
|
# Return appropriate client
|
120
120
|
if provider == "ollama":
|
121
|
-
return OllamaClient()
|
121
|
+
return await OllamaClient.create()
|
122
122
|
elif provider == "openai":
|
123
123
|
return OpenAIClient()
|
124
124
|
elif provider == "anthropic":
|
app/api/ollama.py
CHANGED
@@ -3,7 +3,6 @@ import asyncio
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import os
|
6
|
-
import time
|
7
6
|
from datetime import datetime, timedelta
|
8
7
|
from pathlib import Path
|
9
8
|
from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
|
@@ -15,7 +14,6 @@ logger = logging.getLogger(__name__)
|
|
15
14
|
class OllamaClient(BaseModelClient):
|
16
15
|
def __init__(self):
|
17
16
|
from ..config import OLLAMA_BASE_URL
|
18
|
-
from ..utils import ensure_ollama_running
|
19
17
|
self.base_url = OLLAMA_BASE_URL.rstrip('/')
|
20
18
|
logger.info(f"Initializing Ollama client with base URL: {self.base_url}")
|
21
19
|
|
@@ -27,10 +25,18 @@ class OllamaClient(BaseModelClient):
|
|
27
25
|
|
28
26
|
# Path to the cached models file
|
29
27
|
self.models_cache_path = Path(__file__).parent.parent / "data" / "ollama-models.json"
|
28
|
+
|
29
|
+
@classmethod
|
30
|
+
async def create(cls) -> 'OllamaClient':
|
31
|
+
"""Factory method to create and initialize an OllamaClient instance"""
|
32
|
+
from ..utils import ensure_ollama_running
|
33
|
+
client = cls()
|
30
34
|
|
31
35
|
# Try to start Ollama if not running
|
32
|
-
if not ensure_ollama_running():
|
36
|
+
if not await ensure_ollama_running():
|
33
37
|
raise Exception(f"Failed to start Ollama server. Please ensure Ollama is installed and try again.")
|
38
|
+
|
39
|
+
return client
|
34
40
|
|
35
41
|
def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> str:
|
36
42
|
"""Convert chat messages to Ollama format"""
|
app/api/openai.py
CHANGED
@@ -5,7 +5,14 @@ from ..config import OPENAI_API_KEY
|
|
5
5
|
|
6
6
|
class OpenAIClient(BaseModelClient):
|
7
7
|
def __init__(self):
|
8
|
-
self.client =
|
8
|
+
self.client = None # Initialize in create()
|
9
|
+
|
10
|
+
@classmethod
|
11
|
+
async def create(cls) -> 'OpenAIClient':
|
12
|
+
"""Create a new instance with async initialization."""
|
13
|
+
instance = cls()
|
14
|
+
instance.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
|
15
|
+
return instance
|
9
16
|
|
10
17
|
def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
|
11
18
|
"""Prepare messages for OpenAI API"""
|
app/main.py
CHANGED
@@ -6,6 +6,7 @@ import os
|
|
6
6
|
import asyncio
|
7
7
|
import typer
|
8
8
|
import logging
|
9
|
+
import time
|
9
10
|
from typing import List, Optional, Callable, Awaitable
|
10
11
|
from datetime import datetime
|
11
12
|
|
@@ -161,6 +162,15 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
161
162
|
TITLE = "Chat Console"
|
162
163
|
SUB_TITLE = "AI Chat Interface" # Keep SimpleChatApp SUB_TITLE
|
163
164
|
DARK = True # Keep SimpleChatApp DARK
|
165
|
+
|
166
|
+
# Add better terminal handling to fix UI glitches
|
167
|
+
SCREENS = {}
|
168
|
+
|
169
|
+
# Force full screen mode and prevent background terminal showing through
|
170
|
+
FULL_SCREEN = True
|
171
|
+
|
172
|
+
# Force capturing all mouse events for better stability
|
173
|
+
CAPTURE_MOUSE = True
|
164
174
|
|
165
175
|
# Ensure the log directory exists in a standard cache location
|
166
176
|
log_dir = os.path.expanduser("~/.cache/chat-cli")
|
@@ -424,7 +434,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
424
434
|
# Check for available models # Keep SimpleChatApp on_mount
|
425
435
|
from app.api.ollama import OllamaClient # Keep SimpleChatApp on_mount
|
426
436
|
try: # Keep SimpleChatApp on_mount
|
427
|
-
ollama = OllamaClient() # Keep SimpleChatApp on_mount
|
437
|
+
ollama = await OllamaClient.create() # Keep SimpleChatApp on_mount
|
428
438
|
models = await ollama.get_available_models() # Keep SimpleChatApp on_mount
|
429
439
|
if not models: # Keep SimpleChatApp on_mount
|
430
440
|
api_issues.append("- No Ollama models found") # Keep SimpleChatApp on_mount
|
@@ -511,7 +521,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
511
521
|
# Get the client for the current model first and cancel the connection
|
512
522
|
try:
|
513
523
|
model = self.selected_model
|
514
|
-
client = BaseModelClient.get_client_for_model(model)
|
524
|
+
client = await BaseModelClient.get_client_for_model(model)
|
515
525
|
|
516
526
|
# Call the client's cancel method if it's supported
|
517
527
|
if hasattr(client, 'cancel_stream'):
|
@@ -581,19 +591,21 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
581
591
|
messages_container = self.query_one("#messages-container") # Keep SimpleChatApp update_messages_ui
|
582
592
|
messages_container.remove_children() # Keep SimpleChatApp update_messages_ui
|
583
593
|
|
584
|
-
#
|
585
|
-
# This avoids
|
586
|
-
|
587
|
-
|
588
|
-
|
594
|
+
# Temporarily disable automatic refresh while mounting messages
|
595
|
+
# This avoids excessive layout calculations and reduces flickering
|
596
|
+
with self.batch_update():
|
597
|
+
# Batch add all messages first without any refresh/layout
|
598
|
+
for message in self.messages: # Keep SimpleChatApp update_messages_ui
|
599
|
+
display = MessageDisplay(message, highlight_code=CONFIG["highlight_code"]) # Keep SimpleChatApp update_messages_ui
|
600
|
+
messages_container.mount(display) # Keep SimpleChatApp update_messages_ui
|
601
|
+
|
602
|
+
# A small delay after mounting all messages helps with layout stability
|
603
|
+
await asyncio.sleep(0.05)
|
589
604
|
|
590
|
-
#
|
591
|
-
# This significantly reduces the visual bouncing effect
|
592
|
-
# A small delay before scrolling helps ensure stable layout
|
593
|
-
await asyncio.sleep(0.05) # Single delay after all messages are mounted
|
605
|
+
# Scroll after all messages are added without animation
|
594
606
|
messages_container.scroll_end(animate=False) # Keep SimpleChatApp update_messages_ui
|
595
607
|
|
596
|
-
#
|
608
|
+
# Minimal refresh without full layout recalculation
|
597
609
|
self.refresh(layout=False)
|
598
610
|
|
599
611
|
async def on_input_submitted(self, event: Input.Submitted) -> None: # Keep SimpleChatApp on_input_submitted
|
@@ -656,7 +668,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
656
668
|
# Last resort - check for a common Ollama model
|
657
669
|
try:
|
658
670
|
from app.api.ollama import OllamaClient
|
659
|
-
ollama = OllamaClient()
|
671
|
+
ollama = await OllamaClient.create()
|
660
672
|
models = await ollama.get_available_models()
|
661
673
|
if models and len(models) > 0:
|
662
674
|
debug_log(f"Found {len(models)} Ollama models, using first one")
|
@@ -670,7 +682,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
670
682
|
debug_log("Final fallback to llama3")
|
671
683
|
|
672
684
|
debug_log(f"Getting client for model: {model}")
|
673
|
-
client = BaseModelClient.get_client_for_model(model)
|
685
|
+
client = await BaseModelClient.get_client_for_model(model)
|
674
686
|
|
675
687
|
if client is None:
|
676
688
|
debug_log(f"No client available for model: {model}, trying to initialize")
|
@@ -679,7 +691,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
679
691
|
if client_type:
|
680
692
|
debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
|
681
693
|
try:
|
682
|
-
client = client_type()
|
694
|
+
client = await client_type.create()
|
683
695
|
debug_log("Client initialized successfully")
|
684
696
|
except Exception as init_err:
|
685
697
|
debug_log(f"Error initializing client: {str(init_err)}")
|
@@ -689,12 +701,12 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
689
701
|
# Try a different model as last resort
|
690
702
|
if OPENAI_API_KEY:
|
691
703
|
from app.api.openai import OpenAIClient
|
692
|
-
client = OpenAIClient()
|
704
|
+
client = await OpenAIClient.create()
|
693
705
|
model = "gpt-3.5-turbo"
|
694
706
|
debug_log("Falling back to OpenAI for title generation")
|
695
707
|
elif ANTHROPIC_API_KEY:
|
696
708
|
from app.api.anthropic import AnthropicClient
|
697
|
-
client = AnthropicClient()
|
709
|
+
client = await AnthropicClient.create()
|
698
710
|
model = "claude-instant-1.2"
|
699
711
|
debug_log("Falling back to Anthropic for title generation")
|
700
712
|
else:
|
@@ -811,7 +823,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
811
823
|
else:
|
812
824
|
# Check for a common Ollama model
|
813
825
|
try:
|
814
|
-
ollama = OllamaClient()
|
826
|
+
ollama = await OllamaClient.create()
|
815
827
|
models = await ollama.get_available_models()
|
816
828
|
if models and len(models) > 0:
|
817
829
|
debug_log(f"Found {len(models)} Ollama models, using first one")
|
@@ -856,7 +868,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
856
868
|
# Get appropriate client
|
857
869
|
debug_log(f"Getting client for model: {model}")
|
858
870
|
try:
|
859
|
-
client = BaseModelClient.get_client_for_model(model)
|
871
|
+
client = await BaseModelClient.get_client_for_model(model)
|
860
872
|
debug_log(f"Client: {client.__class__.__name__ if client else 'None'}")
|
861
873
|
|
862
874
|
if client is None:
|
@@ -866,7 +878,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
866
878
|
if client_type:
|
867
879
|
debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
|
868
880
|
try:
|
869
|
-
client = client_type()
|
881
|
+
client = await client_type.create()
|
870
882
|
debug_log(f"Successfully initialized {client_type.__name__}")
|
871
883
|
except Exception as init_err:
|
872
884
|
debug_log(f"Error initializing client: {str(init_err)}")
|
@@ -876,12 +888,12 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
876
888
|
# Try a different model as last resort
|
877
889
|
if OPENAI_API_KEY:
|
878
890
|
from app.api.openai import OpenAIClient
|
879
|
-
client = OpenAIClient()
|
891
|
+
client = await OpenAIClient.create()
|
880
892
|
model = "gpt-3.5-turbo"
|
881
893
|
debug_log("Falling back to OpenAI client")
|
882
894
|
elif ANTHROPIC_API_KEY:
|
883
895
|
from app.api.anthropic import AnthropicClient
|
884
|
-
client = AnthropicClient()
|
896
|
+
client = await AnthropicClient.create()
|
885
897
|
model = "claude-instant-1.2"
|
886
898
|
debug_log("Falling back to Anthropic client")
|
887
899
|
else:
|
@@ -907,6 +919,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
907
919
|
|
908
920
|
# Stream chunks to the UI with synchronization
|
909
921
|
update_lock = asyncio.Lock()
|
922
|
+
last_refresh_time = time.time() # Initialize refresh throttling timer
|
910
923
|
|
911
924
|
async def update_ui(content: str):
|
912
925
|
# This function remains the same, called by the worker
|
@@ -914,6 +927,9 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
914
927
|
debug_log("update_ui called but is_generating is False, returning.")
|
915
928
|
return
|
916
929
|
|
930
|
+
# Make last_refresh_time accessible in inner scope
|
931
|
+
nonlocal last_refresh_time
|
932
|
+
|
917
933
|
async with update_lock:
|
918
934
|
try:
|
919
935
|
# Clear thinking indicator on first content
|
@@ -927,20 +943,29 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
927
943
|
# Update UI with the content - this no longer triggers refresh itself
|
928
944
|
await message_display.update_content(content)
|
929
945
|
|
930
|
-
#
|
931
|
-
#
|
946
|
+
# Much more aggressive throttling of UI updates to eliminate visual jitter
|
947
|
+
# By using a larger modulo value, we significantly reduce refresh frequency
|
948
|
+
# This improves stability at the cost of slightly choppier animations
|
932
949
|
content_length = len(content)
|
950
|
+
|
951
|
+
# Define some key refresh points
|
952
|
+
new_paragraph = content.endswith("\n") and content.count("\n") > 0
|
933
953
|
do_refresh = (
|
934
|
-
content_length <
|
935
|
-
content_length %
|
936
|
-
|
954
|
+
content_length < 5 or # Only first few tokens
|
955
|
+
content_length % 64 == 0 or # Very infrequent periodic updates
|
956
|
+
new_paragraph # Refresh on paragraph breaks
|
937
957
|
)
|
938
958
|
|
939
|
-
if
|
940
|
-
|
959
|
+
# Check if it's been enough time since last refresh (250ms minimum)
|
960
|
+
current_time = time.time()
|
961
|
+
time_since_refresh = current_time - last_refresh_time
|
962
|
+
|
963
|
+
if do_refresh and time_since_refresh > 0.25:
|
964
|
+
# Store the time we did the refresh
|
965
|
+
last_refresh_time = current_time
|
966
|
+
# Skip layout updates completely during streaming
|
967
|
+
# Just ensure content is still visible by scrolling
|
941
968
|
messages_container.scroll_end(animate=False)
|
942
|
-
# Light refresh without full layout recalculation
|
943
|
-
self.refresh(layout=False)
|
944
969
|
except Exception as e:
|
945
970
|
debug_log(f"Error updating UI: {str(e)}")
|
946
971
|
log.error(f"Error updating UI: {str(e)}")
|
app/ui/chat_interface.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1
1
|
from typing import List, Dict, Any, Optional, Callable, Awaitable
|
2
|
-
import time
|
3
2
|
import asyncio
|
4
3
|
from datetime import datetime
|
5
4
|
import re
|
@@ -66,9 +65,9 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
66
65
|
padding: 1;
|
67
66
|
text-wrap: wrap; /* Explicitly enable text wrapping via CSS */
|
68
67
|
content-align: left top; /* Anchor content to top-left */
|
69
|
-
overflow-y:
|
68
|
+
overflow-y: auto; /* Changed from 'visible' to valid 'auto' value */
|
70
69
|
box-sizing: border-box; /* Include padding in size calculations */
|
71
|
-
|
70
|
+
transition: none; /* Fixed property name from 'transitions' to 'transition' */
|
72
71
|
}
|
73
72
|
|
74
73
|
MessageDisplay.user-message {
|
@@ -121,7 +120,11 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
121
120
|
self.update(self._format_content(self.message.content))
|
122
121
|
|
123
122
|
async def update_content(self, content: str) -> None:
|
124
|
-
"""Update the message content using Static.update()"""
|
123
|
+
"""Update the message content using Static.update() with optimizations for streaming"""
|
124
|
+
# Quick unchanged content check to avoid unnecessary updates
|
125
|
+
if self.message.content == content:
|
126
|
+
return
|
127
|
+
|
125
128
|
# Update the stored message object content first
|
126
129
|
self.message.content = content
|
127
130
|
|
@@ -129,11 +132,12 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
129
132
|
# This avoids text reflowing as new tokens arrive
|
130
133
|
formatted_content = self._format_content(content)
|
131
134
|
|
132
|
-
#
|
133
|
-
|
135
|
+
# Use minimal update that doesn't trigger a refresh
|
136
|
+
# This allows parent to control refresh timing and avoid flickering
|
137
|
+
self.update(formatted_content, refresh=False)
|
134
138
|
|
135
|
-
#
|
136
|
-
#
|
139
|
+
# No refresh or layout recalculation is performed here
|
140
|
+
# The parent container will handle refresh timing for better stability
|
137
141
|
|
138
142
|
def _format_content(self, content: str) -> str:
|
139
143
|
"""Format message content with timestamp"""
|
@@ -179,7 +183,7 @@ class ChatInterface(Container):
|
|
179
183
|
padding: 0 1;
|
180
184
|
content-align: left top; /* Keep content anchored at top */
|
181
185
|
box-sizing: border-box;
|
182
|
-
scrollbar-
|
186
|
+
scrollbar-gutter: stable; /* Better than scrollbar-size which isn't valid */
|
183
187
|
}
|
184
188
|
|
185
189
|
#input-area {
|
app/utils.py
CHANGED
@@ -165,6 +165,9 @@ async def generate_streaming_response(
|
|
165
165
|
|
166
166
|
debug_log(f"Messages validation complete: {len(messages)} total messages")
|
167
167
|
|
168
|
+
# Import time module within the worker function scope
|
169
|
+
import time
|
170
|
+
|
168
171
|
full_response = ""
|
169
172
|
buffer = []
|
170
173
|
last_update = time.time()
|
@@ -361,7 +364,7 @@ async def generate_streaming_response(
|
|
361
364
|
return full_response
|
362
365
|
return None # Indicate completion without full response (e.g., error before loop)
|
363
366
|
|
364
|
-
def ensure_ollama_running() -> bool:
|
367
|
+
async def ensure_ollama_running() -> bool:
|
365
368
|
"""
|
366
369
|
Check if Ollama is running and try to start it if not.
|
367
370
|
Returns True if Ollama is running after check/start attempt.
|
@@ -388,8 +391,7 @@ def ensure_ollama_running() -> bool:
|
|
388
391
|
)
|
389
392
|
|
390
393
|
# Wait a moment for it to start
|
391
|
-
|
392
|
-
time.sleep(2)
|
394
|
+
await asyncio.sleep(2) # Use asyncio.sleep instead of time.sleep
|
393
395
|
|
394
396
|
# Check if process is still running
|
395
397
|
if process.poll() is None:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.99
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -0,0 +1,24 @@
|
|
1
|
+
app/__init__.py,sha256=sj_ZaaiYluWSCqDTjASHuPv8IDldwoemQfimWN2okt8,131
|
2
|
+
app/config.py,sha256=KawltE7cK2bR9wbe1NSlepwWIjkiFw2bg3vbLmUnP38,7626
|
3
|
+
app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
|
4
|
+
app/main.py,sha256=RmVCecgpAvRu6mzX2bu5kXy_wyDdjGpuGYbTb33vM_8,70711
|
5
|
+
app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
|
6
|
+
app/utils.py,sha256=5AbHvQpiMCDNyVgbjUwNJmrZsx6DpQ9hxm_CsKWjPoI,27541
|
7
|
+
app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
|
8
|
+
app/api/anthropic.py,sha256=q3TeniuiYDw5AWK1isESmtWvN1HnQowcDlkFm0lp5wE,12317
|
9
|
+
app/api/base.py,sha256=e4SdUFmpeZPK3nNyvWnPOGQaiV1v5gwL1QMq445Qzoo,5743
|
10
|
+
app/api/ollama.py,sha256=Yg2K3iqZvlmHhINISSWBQezP3HOzBHvoIIH0TdiKpds,60938
|
11
|
+
app/api/openai.py,sha256=TsxbWOGTdiAa-swMBN3VBkKKkc7nucyMQAhj6fNANV8,6074
|
12
|
+
app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
|
13
|
+
app/ui/chat_interface.py,sha256=fzc6-_12zf1yflSJi7pX5zZaBy5Ar9APfqYISVMLrg4,15971
|
14
|
+
app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
|
15
|
+
app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
|
16
|
+
app/ui/model_selector.py,sha256=eqwJamLddgt4fS0pJbCyCBe-_shqESm3gM8vJTOWDAs,16956
|
17
|
+
app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
|
18
|
+
app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
|
19
|
+
chat_console-0.2.99.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
|
20
|
+
chat_console-0.2.99.dist-info/METADATA,sha256=ybXgjn-sJk32u9DaSkrSikyGnC8gNaCEO-GaRCkpTSY,2922
|
21
|
+
chat_console-0.2.99.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
22
|
+
chat_console-0.2.99.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
|
23
|
+
chat_console-0.2.99.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
|
24
|
+
chat_console-0.2.99.dist-info/RECORD,,
|
@@ -1,24 +0,0 @@
|
|
1
|
-
app/__init__.py,sha256=Mx4VF_U7IhLbSFel6dTS0LmWyZ6eBpnmhRlOw9sXLfE,131
|
2
|
-
app/config.py,sha256=KawltE7cK2bR9wbe1NSlepwWIjkiFw2bg3vbLmUnP38,7626
|
3
|
-
app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
|
4
|
-
app/main.py,sha256=cvAdboaSLNB_eilgrPe0nuAa1bCtsSHnaSURFyJt5zk,69475
|
5
|
-
app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
|
6
|
-
app/utils.py,sha256=y-U3vWGeJaaynQ1vNkht_DYLnRdzJDJh-u2bAinfj2Y,27428
|
7
|
-
app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
|
8
|
-
app/api/anthropic.py,sha256=jpvx_eKd5WqKc2KvpxjbInEfEmgw9o4YX1SXoUOaQ3M,12082
|
9
|
-
app/api/base.py,sha256=PB6loU2_SbnKvYuA-KFqR86xUZg1sX-1IgfMl9HKhR8,5724
|
10
|
-
app/api/ollama.py,sha256=B9jTeOmJpeAOg6UvvkcDt0xIe5PDkyUryMlhHBt3plA,60744
|
11
|
-
app/api/openai.py,sha256=K_fVJ6YNFgUyE_sRAZMnUaCXuiXNm4iEqzTI0I1sdic,5842
|
12
|
-
app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
|
13
|
-
app/ui/chat_interface.py,sha256=xU4yFcVS4etS5kx7cmnnUnF5p_nWDNmf68VKbYemJRg,15677
|
14
|
-
app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
|
15
|
-
app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
|
16
|
-
app/ui/model_selector.py,sha256=eqwJamLddgt4fS0pJbCyCBe-_shqESm3gM8vJTOWDAs,16956
|
17
|
-
app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
|
18
|
-
app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
|
19
|
-
chat_console-0.2.98.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
|
20
|
-
chat_console-0.2.98.dist-info/METADATA,sha256=qJwneYlSKgSj2HrjWs9Gj8sLYFiV5nULI31Xv_kmE68,2922
|
21
|
-
chat_console-0.2.98.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
22
|
-
chat_console-0.2.98.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
|
23
|
-
chat_console-0.2.98.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
|
24
|
-
chat_console-0.2.98.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|