chat-console 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,111 @@
1
+ Metadata-Version: 2.2
2
+ Name: chat-console
3
+ Version: 0.1.1
4
+ Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
+ Home-page: https://github.com/wazacraftrfid/chat-console
6
+ Author: Johnathan Greenaway
7
+ Author-email: john@fimbriata.dev
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.7
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: textual>=0.11.1
15
+ Requires-Dist: typer>=0.7.0
16
+ Requires-Dist: requests>=2.28.1
17
+ Requires-Dist: anthropic>=0.5.0
18
+ Requires-Dist: openai>=0.27.0
19
+ Requires-Dist: python-dotenv>=0.21.0
20
+ Dynamic: author
21
+ Dynamic: author-email
22
+ Dynamic: classifier
23
+ Dynamic: description
24
+ Dynamic: description-content-type
25
+ Dynamic: home-page
26
+ Dynamic: requires-dist
27
+ Dynamic: requires-python
28
+ Dynamic: summary
29
+
30
+ # Chat CLI
31
+
32
+ A comprehensive command-line interface for chatting with various AI language models. This application allows you to interact with different LLM providers through an intuitive terminal-based interface.
33
+
34
+ ## Features
35
+
36
+ - Interactive terminal UI with Textual library
37
+ - Support for multiple AI models:
38
+ - OpenAI models (GPT-3.5, GPT-4)
39
+ - Anthropic models (Claude 3 Opus, Sonnet, Haiku)
40
+ - Conversation history with search functionality
41
+ - Customizable response styles (concise, detailed, technical, friendly)
42
+ - Code syntax highlighting
43
+ - Markdown rendering
44
+
45
+ ## Installation
46
+
47
+ 1. Clone this repository:
48
+ ```
49
+ git clone https://github.com/yourusername/chat-cli.git
50
+ cd chat-cli
51
+ ```
52
+
53
+ 2. Install the required dependencies:
54
+ ```
55
+ pip install -r requirements.txt
56
+ ```
57
+
58
+ 3. Set up your API keys:
59
+
60
+ Create a `.env` file in the project root directory with your API keys:
61
+ ```
62
+ OPENAI_API_KEY=your_openai_api_key_here
63
+ ANTHROPIC_API_KEY=your_anthropic_api_key_here
64
+ ```
65
+
66
+ ## Usage
67
+
68
+ Run the application:
69
+ ```
70
+ chat-cli
71
+ ```
72
+
73
+ ### Keyboard Shortcuts
74
+
75
+ - `q` - Quit the application
76
+ - `n` - Start a new conversation
77
+ - `s` - Toggle sidebar
78
+ - `f` - Focus search box
79
+ - `Escape` - Cancel current generation
80
+ - `Ctrl+C` - Quit the application
81
+
82
+ ### Configuration
83
+
84
+ The application creates a configuration file at `~/.chatcli/config.json` on first run. You can edit this file to:
85
+
86
+ - Change the default model
87
+ - Modify available models
88
+ - Add or edit response styles
89
+ - Change the theme
90
+ - Adjust other settings
91
+
92
+ ## Data Storage
93
+
94
+ Conversation history is stored in a SQLite database at `~/.chatcli/chat_history.db`.
95
+
96
+ ## Development
97
+
98
+ The application is structured as follows:
99
+
100
+ - `main.py` - Main application entry point
101
+ - `app/` - Application modules
102
+ - `api/` - LLM provider API client implementations
103
+ - `ui/` - User interface components
104
+ - `config.py` - Configuration management
105
+ - `database.py` - Database operations
106
+ - `models.py` - Data models
107
+ - `utils.py` - Utility functions
108
+
109
+ ## License
110
+
111
+ MIT
@@ -0,0 +1,82 @@
1
+ # Chat CLI
2
+
3
+ A comprehensive command-line interface for chatting with various AI language models. This application allows you to interact with different LLM providers through an intuitive terminal-based interface.
4
+
5
+ ## Features
6
+
7
+ - Interactive terminal UI with Textual library
8
+ - Support for multiple AI models:
9
+ - OpenAI models (GPT-3.5, GPT-4)
10
+ - Anthropic models (Claude 3 Opus, Sonnet, Haiku)
11
+ - Conversation history with search functionality
12
+ - Customizable response styles (concise, detailed, technical, friendly)
13
+ - Code syntax highlighting
14
+ - Markdown rendering
15
+
16
+ ## Installation
17
+
18
+ 1. Clone this repository:
19
+ ```
20
+ git clone https://github.com/yourusername/chat-cli.git
21
+ cd chat-cli
22
+ ```
23
+
24
+ 2. Install the required dependencies:
25
+ ```
26
+ pip install -r requirements.txt
27
+ ```
28
+
29
+ 3. Set up your API keys:
30
+
31
+ Create a `.env` file in the project root directory with your API keys:
32
+ ```
33
+ OPENAI_API_KEY=your_openai_api_key_here
34
+ ANTHROPIC_API_KEY=your_anthropic_api_key_here
35
+ ```
36
+
37
+ ## Usage
38
+
39
+ Run the application:
40
+ ```
41
+ chat-cli
42
+ ```
43
+
44
+ ### Keyboard Shortcuts
45
+
46
+ - `q` - Quit the application
47
+ - `n` - Start a new conversation
48
+ - `s` - Toggle sidebar
49
+ - `f` - Focus search box
50
+ - `Escape` - Cancel current generation
51
+ - `Ctrl+C` - Quit the application
52
+
53
+ ### Configuration
54
+
55
+ The application creates a configuration file at `~/.chatcli/config.json` on first run. You can edit this file to:
56
+
57
+ - Change the default model
58
+ - Modify available models
59
+ - Add or edit response styles
60
+ - Change the theme
61
+ - Adjust other settings
62
+
63
+ ## Data Storage
64
+
65
+ Conversation history is stored in a SQLite database at `~/.chatcli/chat_history.db`.
66
+
67
+ ## Development
68
+
69
+ The application is structured as follows:
70
+
71
+ - `main.py` - Main application entry point
72
+ - `app/` - Application modules
73
+ - `api/` - LLM provider API client implementations
74
+ - `ui/` - User interface components
75
+ - `config.py` - Configuration management
76
+ - `database.py` - Database operations
77
+ - `models.py` - Data models
78
+ - `utils.py` - Utility functions
79
+
80
+ ## License
81
+
82
+ MIT
@@ -0,0 +1,6 @@
1
+ """
2
+ Chat CLI
3
+ A command-line interface for chatting with various LLM providers like ChatGPT and Claude.
4
+ """
5
+
6
+ __version__ = "1.0.0"
@@ -0,0 +1 @@
1
+ """API client implementations for different LLM providers."""
@@ -0,0 +1,92 @@
1
+ import anthropic
2
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
3
+ from .base import BaseModelClient
4
+ from ..config import ANTHROPIC_API_KEY
5
+
6
+ class AnthropicClient(BaseModelClient):
7
+ def __init__(self):
8
+ self.client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
9
+
10
+ def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
+ """Prepare messages for Claude API"""
12
+ # Anthropic expects role to be 'user' or 'assistant'
13
+ processed_messages = []
14
+
15
+ for msg in messages:
16
+ role = msg["role"]
17
+ if role == "system":
18
+ # For Claude, we'll convert system messages to user messages with a special prefix
19
+ processed_messages.append({
20
+ "role": "user",
21
+ "content": f"<system>\n{msg['content']}\n</system>"
22
+ })
23
+ else:
24
+ processed_messages.append(msg)
25
+
26
+ # Add style instructions if provided
27
+ if style and style != "default":
28
+ # Find first non-system message to attach style to
29
+ for i, msg in enumerate(processed_messages):
30
+ if msg["role"] == "user":
31
+ content = msg["content"]
32
+ if "<userStyle>" not in content:
33
+ style_instructions = self._get_style_instructions(style)
34
+ msg["content"] = f"<userStyle>{style_instructions}</userStyle>\n\n{content}"
35
+ break
36
+
37
+ return processed_messages
38
+
39
+ def _get_style_instructions(self, style: str) -> str:
40
+ """Get formatting instructions for different styles"""
41
+ styles = {
42
+ "concise": "Be extremely concise and to the point. Use short sentences and paragraphs. Avoid unnecessary details.",
43
+ "detailed": "Be comprehensive and thorough in your responses. Provide detailed explanations, examples, and cover all relevant aspects of the topic.",
44
+ "technical": "Use precise technical language and terminology. Be formal and focus on accuracy and technical details.",
45
+ "friendly": "Be warm, approachable and conversational. Use casual language, personal examples, and a friendly tone.",
46
+ }
47
+
48
+ return styles.get(style, "")
49
+
50
+ def generate_completion(self, messages: List[Dict[str, str]],
51
+ model: str,
52
+ style: Optional[str] = None,
53
+ temperature: float = 0.7,
54
+ max_tokens: Optional[int] = None) -> str:
55
+ """Generate a text completion using Claude"""
56
+ processed_messages = self._prepare_messages(messages, style)
57
+
58
+ response = self.client.messages.create(
59
+ model=model,
60
+ messages=processed_messages,
61
+ temperature=temperature,
62
+ max_tokens=max_tokens or 1024,
63
+ )
64
+
65
+ return response.content[0].text
66
+
67
+ async def generate_stream(self, messages: List[Dict[str, str]],
68
+ model: str,
69
+ style: Optional[str] = None,
70
+ temperature: float = 0.7,
71
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
72
+ """Generate a streaming text completion using Claude"""
73
+ processed_messages = self._prepare_messages(messages, style)
74
+
75
+ stream = self.client.messages.stream(
76
+ model=model,
77
+ messages=processed_messages,
78
+ temperature=temperature,
79
+ max_tokens=max_tokens or 1024,
80
+ )
81
+ async for chunk in stream:
82
+ if chunk.type == "content_block":
83
+ yield chunk.text
84
+
85
+ def get_available_models(self) -> List[Dict[str, Any]]:
86
+ """Get list of available Claude models"""
87
+ return [
88
+ {"id": "claude-3-opus", "name": "Claude 3 Opus"},
89
+ {"id": "claude-3-sonnet", "name": "Claude 3 Sonnet"},
90
+ {"id": "claude-3-haiku", "name": "Claude 3 Haiku"},
91
+ {"id": "claude-3.7-sonnet", "name": "Claude 3.7 Sonnet"},
92
+ ]
@@ -0,0 +1,74 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
3
+
4
+ class BaseModelClient(ABC):
5
+ """Base class for AI model clients"""
6
+
7
+ @abstractmethod
8
+ async def generate_completion(self, messages: List[Dict[str, str]],
9
+ model: str,
10
+ style: Optional[str] = None,
11
+ temperature: float = 0.7,
12
+ max_tokens: Optional[int] = None) -> str:
13
+ """Generate a text completion"""
14
+ pass
15
+
16
+ @abstractmethod
17
+ @abstractmethod
18
+ async def generate_stream(self, messages: List[Dict[str, str]],
19
+ model: str,
20
+ style: Optional[str] = None,
21
+ temperature: float = 0.7,
22
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
23
+ """Generate a streaming text completion"""
24
+ yield "" # Placeholder implementation
25
+
26
+ @abstractmethod
27
+ def get_available_models(self) -> List[Dict[str, Any]]:
28
+ """Get list of available models from this provider"""
29
+ pass
30
+
31
+ @staticmethod
32
+ def get_client_for_model(model_name: str) -> 'BaseModelClient':
33
+ """Factory method to get appropriate client for model"""
34
+ from ..config import CONFIG
35
+ from .anthropic import AnthropicClient
36
+ from .openai import OpenAIClient
37
+
38
+ # For known models, use their configured provider
39
+ model_info = CONFIG["available_models"].get(model_name)
40
+ if model_info:
41
+ provider = model_info["provider"]
42
+ else:
43
+ # For custom models, infer provider from name prefix
44
+ model_name_lower = model_name.lower()
45
+ if any(name in model_name_lower for name in ["gpt", "text-", "davinci"]):
46
+ provider = "openai"
47
+ elif any(name in model_name_lower for name in ["claude", "anthropic"]):
48
+ provider = "anthropic"
49
+ elif any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]):
50
+ provider = "ollama"
51
+ else:
52
+ # Try to get from Ollama API first
53
+ from .ollama import OllamaClient
54
+ try:
55
+ client = OllamaClient()
56
+ models = client.get_available_models()
57
+ if any(model["id"] == model_name for model in models):
58
+ provider = "ollama"
59
+ else:
60
+ # Default to OpenAI if not found
61
+ provider = "openai"
62
+ except:
63
+ # Default to OpenAI if Ollama not available
64
+ provider = "openai"
65
+
66
+ if provider == "anthropic":
67
+ return AnthropicClient()
68
+ elif provider == "openai":
69
+ return OpenAIClient()
70
+ elif provider == "ollama":
71
+ from .ollama import OllamaClient
72
+ return OllamaClient()
73
+ else:
74
+ raise ValueError(f"Unknown provider: {provider}")
@@ -0,0 +1,116 @@
1
+ import aiohttp
2
+ import json
3
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
4
+ from .base import BaseModelClient
5
+ from ..config import CONFIG
6
+
7
+ class OllamaClient(BaseModelClient):
8
+ def __init__(self):
9
+ self.base_url = "http://localhost:11434"
10
+
11
+ def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> str:
12
+ """Convert chat messages to Ollama format"""
13
+ # Convert messages to a single string with role prefixes
14
+ formatted_messages = []
15
+
16
+ for msg in messages:
17
+ role = msg["role"]
18
+ content = msg["content"]
19
+
20
+ if role == "system":
21
+ formatted_messages.append(f"System: {content}")
22
+ elif role == "user":
23
+ formatted_messages.append(f"Human: {content}")
24
+ elif role == "assistant":
25
+ formatted_messages.append(f"Assistant: {content}")
26
+
27
+ # Add style instructions if provided
28
+ if style and style != "default":
29
+ style_instructions = self._get_style_instructions(style)
30
+ formatted_messages.insert(0, f"System: {style_instructions}")
31
+
32
+ return "\n\n".join(formatted_messages)
33
+
34
+ def _get_style_instructions(self, style: str) -> str:
35
+ """Get formatting instructions for different styles"""
36
+ styles = {
37
+ "concise": "Be extremely concise and to the point. Use short sentences and avoid unnecessary details.",
38
+ "detailed": "Be comprehensive and thorough. Provide detailed explanations and examples.",
39
+ "technical": "Use precise technical language and terminology. Focus on accuracy and technical details.",
40
+ "friendly": "Be warm and conversational. Use casual language and a friendly tone.",
41
+ }
42
+
43
+ return styles.get(style, "")
44
+
45
+ async def generate_completion(self, messages: List[Dict[str, str]],
46
+ model: str,
47
+ style: Optional[str] = None,
48
+ temperature: float = 0.7,
49
+ max_tokens: Optional[int] = None) -> str:
50
+ """Generate a text completion using Ollama"""
51
+ prompt = self._prepare_messages(messages, style)
52
+
53
+ async with aiohttp.ClientSession() as session:
54
+ async with session.post(
55
+ f"{self.base_url}/api/generate",
56
+ json={
57
+ "model": model,
58
+ "prompt": prompt,
59
+ "temperature": temperature,
60
+ "stream": False
61
+ }
62
+ ) as response:
63
+ response.raise_for_status()
64
+ data = await response.json()
65
+ return data["response"]
66
+
67
+ async def generate_stream(self, messages: List[Dict[str, str]],
68
+ model: str,
69
+ style: Optional[str] = None,
70
+ temperature: float = 0.7,
71
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
72
+ """Generate a streaming text completion using Ollama"""
73
+ prompt = self._prepare_messages(messages, style)
74
+
75
+ async with aiohttp.ClientSession() as session:
76
+ async with session.post(
77
+ f"{self.base_url}/api/generate",
78
+ json={
79
+ "model": model,
80
+ "prompt": prompt,
81
+ "temperature": temperature,
82
+ "stream": True
83
+ }
84
+ ) as response:
85
+ response.raise_for_status()
86
+ async for line in response.content:
87
+ if line:
88
+ chunk = line.decode().strip()
89
+ try:
90
+ data = json.loads(chunk)
91
+ if "response" in data:
92
+ yield data["response"]
93
+ except json.JSONDecodeError:
94
+ continue
95
+
96
+ async def get_available_models(self) -> List[Dict[str, Any]]:
97
+ """Get list of available Ollama models"""
98
+ try:
99
+ async with aiohttp.ClientSession() as session:
100
+ async with session.get(f"{self.base_url}/api/tags") as response:
101
+ response.raise_for_status()
102
+ data = await response.json()
103
+ models = data["models"]
104
+
105
+ return [
106
+ {"id": model["name"], "name": model["name"].title()}
107
+ for model in models
108
+ ]
109
+ except:
110
+ # Return some default models if Ollama is not running
111
+ return [
112
+ {"id": "llama2", "name": "Llama 2"},
113
+ {"id": "mistral", "name": "Mistral"},
114
+ {"id": "codellama", "name": "Code Llama"},
115
+ {"id": "gemma", "name": "Gemma"}
116
+ ]
@@ -0,0 +1,78 @@
1
+ from openai import AsyncOpenAI
2
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
3
+ from .base import BaseModelClient
4
+ from ..config import OPENAI_API_KEY
5
+
6
+ class OpenAIClient(BaseModelClient):
7
+ def __init__(self):
8
+ self.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
9
+
10
+ def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
+ """Prepare messages for OpenAI API"""
12
+ processed_messages = messages.copy()
13
+
14
+ # Add style instructions if provided
15
+ if style and style != "default":
16
+ style_instructions = self._get_style_instructions(style)
17
+ processed_messages.insert(0, {
18
+ "role": "system",
19
+ "content": style_instructions
20
+ })
21
+
22
+ return processed_messages
23
+
24
+ def _get_style_instructions(self, style: str) -> str:
25
+ """Get formatting instructions for different styles"""
26
+ styles = {
27
+ "concise": "You are a concise assistant. Provide brief, to-the-point responses without unnecessary elaboration.",
28
+ "detailed": "You are a detailed assistant. Provide comprehensive responses with thorough explanations and examples.",
29
+ "technical": "You are a technical assistant. Use precise technical language and focus on accuracy and technical details.",
30
+ "friendly": "You are a friendly assistant. Use a warm, conversational tone and relatable examples.",
31
+ }
32
+
33
+ return styles.get(style, "")
34
+
35
+ async def generate_completion(self, messages: List[Dict[str, str]],
36
+ model: str,
37
+ style: Optional[str] = None,
38
+ temperature: float = 0.7,
39
+ max_tokens: Optional[int] = None) -> str:
40
+ """Generate a text completion using OpenAI"""
41
+ processed_messages = self._prepare_messages(messages, style)
42
+
43
+ response = await self.client.chat.completions.create(
44
+ model=model,
45
+ messages=processed_messages,
46
+ temperature=temperature,
47
+ max_tokens=max_tokens,
48
+ )
49
+
50
+ return response.choices[0].message.content
51
+
52
+ async def generate_stream(self, messages: List[Dict[str, str]],
53
+ model: str,
54
+ style: Optional[str] = None,
55
+ temperature: float = 0.7,
56
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
57
+ """Generate a streaming text completion using OpenAI"""
58
+ processed_messages = self._prepare_messages(messages, style)
59
+
60
+ stream = await self.client.chat.completions.create(
61
+ model=model,
62
+ messages=processed_messages,
63
+ temperature=temperature,
64
+ max_tokens=max_tokens,
65
+ stream=True,
66
+ )
67
+
68
+ async for chunk in stream:
69
+ if chunk.choices and chunk.choices[0].delta.content:
70
+ yield chunk.choices[0].delta.content
71
+
72
+ def get_available_models(self) -> List[Dict[str, Any]]:
73
+ """Get list of available OpenAI models"""
74
+ return [
75
+ {"id": "gpt-3.5-turbo", "name": "GPT-3.5 Turbo"},
76
+ {"id": "gpt-4", "name": "GPT-4"},
77
+ {"id": "gpt-4-turbo", "name": "GPT-4 Turbo"}
78
+ ]