chat-console 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/__init__.py ADDED
@@ -0,0 +1,6 @@
1
+ """
2
+ Chat CLI
3
+ A command-line interface for chatting with various LLM providers like ChatGPT and Claude.
4
+ """
5
+
6
+ __version__ = "1.0.0"
app/api/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """API client implementations for different LLM providers."""
app/api/anthropic.py ADDED
@@ -0,0 +1,92 @@
1
+ import anthropic
2
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
3
+ from .base import BaseModelClient
4
+ from ..config import ANTHROPIC_API_KEY
5
+
6
+ class AnthropicClient(BaseModelClient):
7
+ def __init__(self):
8
+ self.client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
9
+
10
+ def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
+ """Prepare messages for Claude API"""
12
+ # Anthropic expects role to be 'user' or 'assistant'
13
+ processed_messages = []
14
+
15
+ for msg in messages:
16
+ role = msg["role"]
17
+ if role == "system":
18
+ # For Claude, we'll convert system messages to user messages with a special prefix
19
+ processed_messages.append({
20
+ "role": "user",
21
+ "content": f"<system>\n{msg['content']}\n</system>"
22
+ })
23
+ else:
24
+ processed_messages.append(msg)
25
+
26
+ # Add style instructions if provided
27
+ if style and style != "default":
28
+ # Find first non-system message to attach style to
29
+ for i, msg in enumerate(processed_messages):
30
+ if msg["role"] == "user":
31
+ content = msg["content"]
32
+ if "<userStyle>" not in content:
33
+ style_instructions = self._get_style_instructions(style)
34
+ msg["content"] = f"<userStyle>{style_instructions}</userStyle>\n\n{content}"
35
+ break
36
+
37
+ return processed_messages
38
+
39
+ def _get_style_instructions(self, style: str) -> str:
40
+ """Get formatting instructions for different styles"""
41
+ styles = {
42
+ "concise": "Be extremely concise and to the point. Use short sentences and paragraphs. Avoid unnecessary details.",
43
+ "detailed": "Be comprehensive and thorough in your responses. Provide detailed explanations, examples, and cover all relevant aspects of the topic.",
44
+ "technical": "Use precise technical language and terminology. Be formal and focus on accuracy and technical details.",
45
+ "friendly": "Be warm, approachable and conversational. Use casual language, personal examples, and a friendly tone.",
46
+ }
47
+
48
+ return styles.get(style, "")
49
+
50
+ def generate_completion(self, messages: List[Dict[str, str]],
51
+ model: str,
52
+ style: Optional[str] = None,
53
+ temperature: float = 0.7,
54
+ max_tokens: Optional[int] = None) -> str:
55
+ """Generate a text completion using Claude"""
56
+ processed_messages = self._prepare_messages(messages, style)
57
+
58
+ response = self.client.messages.create(
59
+ model=model,
60
+ messages=processed_messages,
61
+ temperature=temperature,
62
+ max_tokens=max_tokens or 1024,
63
+ )
64
+
65
+ return response.content[0].text
66
+
67
+ async def generate_stream(self, messages: List[Dict[str, str]],
68
+ model: str,
69
+ style: Optional[str] = None,
70
+ temperature: float = 0.7,
71
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
72
+ """Generate a streaming text completion using Claude"""
73
+ processed_messages = self._prepare_messages(messages, style)
74
+
75
+ stream = self.client.messages.stream(
76
+ model=model,
77
+ messages=processed_messages,
78
+ temperature=temperature,
79
+ max_tokens=max_tokens or 1024,
80
+ )
81
+ async for chunk in stream:
82
+ if chunk.type == "content_block":
83
+ yield chunk.text
84
+
85
+ def get_available_models(self) -> List[Dict[str, Any]]:
86
+ """Get list of available Claude models"""
87
+ return [
88
+ {"id": "claude-3-opus", "name": "Claude 3 Opus"},
89
+ {"id": "claude-3-sonnet", "name": "Claude 3 Sonnet"},
90
+ {"id": "claude-3-haiku", "name": "Claude 3 Haiku"},
91
+ {"id": "claude-3.7-sonnet", "name": "Claude 3.7 Sonnet"},
92
+ ]
app/api/base.py ADDED
@@ -0,0 +1,74 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
3
+
4
+ class BaseModelClient(ABC):
5
+ """Base class for AI model clients"""
6
+
7
+ @abstractmethod
8
+ async def generate_completion(self, messages: List[Dict[str, str]],
9
+ model: str,
10
+ style: Optional[str] = None,
11
+ temperature: float = 0.7,
12
+ max_tokens: Optional[int] = None) -> str:
13
+ """Generate a text completion"""
14
+ pass
15
+
16
+ @abstractmethod
17
+ @abstractmethod
18
+ async def generate_stream(self, messages: List[Dict[str, str]],
19
+ model: str,
20
+ style: Optional[str] = None,
21
+ temperature: float = 0.7,
22
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
23
+ """Generate a streaming text completion"""
24
+ yield "" # Placeholder implementation
25
+
26
+ @abstractmethod
27
+ def get_available_models(self) -> List[Dict[str, Any]]:
28
+ """Get list of available models from this provider"""
29
+ pass
30
+
31
+ @staticmethod
32
+ def get_client_for_model(model_name: str) -> 'BaseModelClient':
33
+ """Factory method to get appropriate client for model"""
34
+ from ..config import CONFIG
35
+ from .anthropic import AnthropicClient
36
+ from .openai import OpenAIClient
37
+
38
+ # For known models, use their configured provider
39
+ model_info = CONFIG["available_models"].get(model_name)
40
+ if model_info:
41
+ provider = model_info["provider"]
42
+ else:
43
+ # For custom models, infer provider from name prefix
44
+ model_name_lower = model_name.lower()
45
+ if any(name in model_name_lower for name in ["gpt", "text-", "davinci"]):
46
+ provider = "openai"
47
+ elif any(name in model_name_lower for name in ["claude", "anthropic"]):
48
+ provider = "anthropic"
49
+ elif any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]):
50
+ provider = "ollama"
51
+ else:
52
+ # Try to get from Ollama API first
53
+ from .ollama import OllamaClient
54
+ try:
55
+ client = OllamaClient()
56
+ models = client.get_available_models()
57
+ if any(model["id"] == model_name for model in models):
58
+ provider = "ollama"
59
+ else:
60
+ # Default to OpenAI if not found
61
+ provider = "openai"
62
+ except:
63
+ # Default to OpenAI if Ollama not available
64
+ provider = "openai"
65
+
66
+ if provider == "anthropic":
67
+ return AnthropicClient()
68
+ elif provider == "openai":
69
+ return OpenAIClient()
70
+ elif provider == "ollama":
71
+ from .ollama import OllamaClient
72
+ return OllamaClient()
73
+ else:
74
+ raise ValueError(f"Unknown provider: {provider}")
app/api/ollama.py ADDED
@@ -0,0 +1,116 @@
1
+ import aiohttp
2
+ import json
3
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
4
+ from .base import BaseModelClient
5
+ from ..config import CONFIG
6
+
7
+ class OllamaClient(BaseModelClient):
8
+ def __init__(self):
9
+ self.base_url = "http://localhost:11434"
10
+
11
+ def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> str:
12
+ """Convert chat messages to Ollama format"""
13
+ # Convert messages to a single string with role prefixes
14
+ formatted_messages = []
15
+
16
+ for msg in messages:
17
+ role = msg["role"]
18
+ content = msg["content"]
19
+
20
+ if role == "system":
21
+ formatted_messages.append(f"System: {content}")
22
+ elif role == "user":
23
+ formatted_messages.append(f"Human: {content}")
24
+ elif role == "assistant":
25
+ formatted_messages.append(f"Assistant: {content}")
26
+
27
+ # Add style instructions if provided
28
+ if style and style != "default":
29
+ style_instructions = self._get_style_instructions(style)
30
+ formatted_messages.insert(0, f"System: {style_instructions}")
31
+
32
+ return "\n\n".join(formatted_messages)
33
+
34
+ def _get_style_instructions(self, style: str) -> str:
35
+ """Get formatting instructions for different styles"""
36
+ styles = {
37
+ "concise": "Be extremely concise and to the point. Use short sentences and avoid unnecessary details.",
38
+ "detailed": "Be comprehensive and thorough. Provide detailed explanations and examples.",
39
+ "technical": "Use precise technical language and terminology. Focus on accuracy and technical details.",
40
+ "friendly": "Be warm and conversational. Use casual language and a friendly tone.",
41
+ }
42
+
43
+ return styles.get(style, "")
44
+
45
+ async def generate_completion(self, messages: List[Dict[str, str]],
46
+ model: str,
47
+ style: Optional[str] = None,
48
+ temperature: float = 0.7,
49
+ max_tokens: Optional[int] = None) -> str:
50
+ """Generate a text completion using Ollama"""
51
+ prompt = self._prepare_messages(messages, style)
52
+
53
+ async with aiohttp.ClientSession() as session:
54
+ async with session.post(
55
+ f"{self.base_url}/api/generate",
56
+ json={
57
+ "model": model,
58
+ "prompt": prompt,
59
+ "temperature": temperature,
60
+ "stream": False
61
+ }
62
+ ) as response:
63
+ response.raise_for_status()
64
+ data = await response.json()
65
+ return data["response"]
66
+
67
+ async def generate_stream(self, messages: List[Dict[str, str]],
68
+ model: str,
69
+ style: Optional[str] = None,
70
+ temperature: float = 0.7,
71
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
72
+ """Generate a streaming text completion using Ollama"""
73
+ prompt = self._prepare_messages(messages, style)
74
+
75
+ async with aiohttp.ClientSession() as session:
76
+ async with session.post(
77
+ f"{self.base_url}/api/generate",
78
+ json={
79
+ "model": model,
80
+ "prompt": prompt,
81
+ "temperature": temperature,
82
+ "stream": True
83
+ }
84
+ ) as response:
85
+ response.raise_for_status()
86
+ async for line in response.content:
87
+ if line:
88
+ chunk = line.decode().strip()
89
+ try:
90
+ data = json.loads(chunk)
91
+ if "response" in data:
92
+ yield data["response"]
93
+ except json.JSONDecodeError:
94
+ continue
95
+
96
+ async def get_available_models(self) -> List[Dict[str, Any]]:
97
+ """Get list of available Ollama models"""
98
+ try:
99
+ async with aiohttp.ClientSession() as session:
100
+ async with session.get(f"{self.base_url}/api/tags") as response:
101
+ response.raise_for_status()
102
+ data = await response.json()
103
+ models = data["models"]
104
+
105
+ return [
106
+ {"id": model["name"], "name": model["name"].title()}
107
+ for model in models
108
+ ]
109
+ except:
110
+ # Return some default models if Ollama is not running
111
+ return [
112
+ {"id": "llama2", "name": "Llama 2"},
113
+ {"id": "mistral", "name": "Mistral"},
114
+ {"id": "codellama", "name": "Code Llama"},
115
+ {"id": "gemma", "name": "Gemma"}
116
+ ]
app/api/openai.py ADDED
@@ -0,0 +1,78 @@
1
+ from openai import AsyncOpenAI
2
+ from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
3
+ from .base import BaseModelClient
4
+ from ..config import OPENAI_API_KEY
5
+
6
+ class OpenAIClient(BaseModelClient):
7
+ def __init__(self):
8
+ self.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
9
+
10
+ def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
+ """Prepare messages for OpenAI API"""
12
+ processed_messages = messages.copy()
13
+
14
+ # Add style instructions if provided
15
+ if style and style != "default":
16
+ style_instructions = self._get_style_instructions(style)
17
+ processed_messages.insert(0, {
18
+ "role": "system",
19
+ "content": style_instructions
20
+ })
21
+
22
+ return processed_messages
23
+
24
+ def _get_style_instructions(self, style: str) -> str:
25
+ """Get formatting instructions for different styles"""
26
+ styles = {
27
+ "concise": "You are a concise assistant. Provide brief, to-the-point responses without unnecessary elaboration.",
28
+ "detailed": "You are a detailed assistant. Provide comprehensive responses with thorough explanations and examples.",
29
+ "technical": "You are a technical assistant. Use precise technical language and focus on accuracy and technical details.",
30
+ "friendly": "You are a friendly assistant. Use a warm, conversational tone and relatable examples.",
31
+ }
32
+
33
+ return styles.get(style, "")
34
+
35
+ async def generate_completion(self, messages: List[Dict[str, str]],
36
+ model: str,
37
+ style: Optional[str] = None,
38
+ temperature: float = 0.7,
39
+ max_tokens: Optional[int] = None) -> str:
40
+ """Generate a text completion using OpenAI"""
41
+ processed_messages = self._prepare_messages(messages, style)
42
+
43
+ response = await self.client.chat.completions.create(
44
+ model=model,
45
+ messages=processed_messages,
46
+ temperature=temperature,
47
+ max_tokens=max_tokens,
48
+ )
49
+
50
+ return response.choices[0].message.content
51
+
52
+ async def generate_stream(self, messages: List[Dict[str, str]],
53
+ model: str,
54
+ style: Optional[str] = None,
55
+ temperature: float = 0.7,
56
+ max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
57
+ """Generate a streaming text completion using OpenAI"""
58
+ processed_messages = self._prepare_messages(messages, style)
59
+
60
+ stream = await self.client.chat.completions.create(
61
+ model=model,
62
+ messages=processed_messages,
63
+ temperature=temperature,
64
+ max_tokens=max_tokens,
65
+ stream=True,
66
+ )
67
+
68
+ async for chunk in stream:
69
+ if chunk.choices and chunk.choices[0].delta.content:
70
+ yield chunk.choices[0].delta.content
71
+
72
+ def get_available_models(self) -> List[Dict[str, Any]]:
73
+ """Get list of available OpenAI models"""
74
+ return [
75
+ {"id": "gpt-3.5-turbo", "name": "GPT-3.5 Turbo"},
76
+ {"id": "gpt-4", "name": "GPT-4"},
77
+ {"id": "gpt-4-turbo", "name": "GPT-4 Turbo"}
78
+ ]
app/config.py ADDED
@@ -0,0 +1,127 @@
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from pathlib import Path
4
+ import json
5
+
6
+ # Load environment variables
7
+ load_dotenv()
8
+
9
+ # Base paths
10
+ APP_DIR = Path.home() / ".terminalchat"
11
+ APP_DIR.mkdir(exist_ok=True)
12
+ DB_PATH = APP_DIR / "chat_history.db"
13
+ CONFIG_PATH = APP_DIR / "config.json"
14
+
15
+ # API Keys
16
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
17
+ ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
18
+ OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
19
+
20
+ # Default configuration
21
+ DEFAULT_CONFIG = {
22
+ "default_model": "gpt-3.5-turbo",
23
+ "available_models": {
24
+ "gpt-3.5-turbo": {
25
+ "provider": "openai",
26
+ "max_tokens": 4096,
27
+ "display_name": "GPT-3.5 Turbo"
28
+ },
29
+ "gpt-4": {
30
+ "provider": "openai",
31
+ "max_tokens": 8192,
32
+ "display_name": "GPT-4"
33
+ },
34
+ "claude-3-opus": {
35
+ "provider": "anthropic",
36
+ "max_tokens": 4096,
37
+ "display_name": "Claude 3 Opus"
38
+ },
39
+ "claude-3-sonnet": {
40
+ "provider": "anthropic",
41
+ "max_tokens": 4096,
42
+ "display_name": "Claude 3 Sonnet"
43
+ },
44
+ "claude-3-haiku": {
45
+ "provider": "anthropic",
46
+ "max_tokens": 4096,
47
+ "display_name": "Claude 3 Haiku"
48
+ },
49
+ "claude-3.7-sonnet": {
50
+ "provider": "anthropic",
51
+ "max_tokens": 4096,
52
+ "display_name": "Claude 3.7 Sonnet"
53
+ },
54
+ "llama2": {
55
+ "provider": "ollama",
56
+ "max_tokens": 4096,
57
+ "display_name": "Llama 2"
58
+ },
59
+ "mistral": {
60
+ "provider": "ollama",
61
+ "max_tokens": 4096,
62
+ "display_name": "Mistral"
63
+ },
64
+ "codellama": {
65
+ "provider": "ollama",
66
+ "max_tokens": 4096,
67
+ "display_name": "Code Llama"
68
+ },
69
+ "gemma": {
70
+ "provider": "ollama",
71
+ "max_tokens": 4096,
72
+ "display_name": "Gemma"
73
+ }
74
+ },
75
+ "theme": "dark",
76
+ "user_styles": {
77
+ "default": {
78
+ "name": "Default",
79
+ "description": "Standard assistant responses"
80
+ },
81
+ "concise": {
82
+ "name": "Concise",
83
+ "description": "Brief and to the point responses"
84
+ },
85
+ "detailed": {
86
+ "name": "Detailed",
87
+ "description": "Comprehensive and thorough responses"
88
+ },
89
+ "technical": {
90
+ "name": "Technical",
91
+ "description": "Technical and precise language"
92
+ },
93
+ "friendly": {
94
+ "name": "Friendly",
95
+ "description": "Warm and conversational tone"
96
+ }
97
+ },
98
+ "default_style": "default",
99
+ "max_history_items": 100,
100
+ "highlight_code": True,
101
+ "auto_save": True
102
+ }
103
+
104
+ def load_config():
105
+ """Load the user configuration or create default if not exists"""
106
+ if not CONFIG_PATH.exists():
107
+ save_config(DEFAULT_CONFIG)
108
+ return DEFAULT_CONFIG
109
+
110
+ try:
111
+ with open(CONFIG_PATH, 'r') as f:
112
+ config = json.load(f)
113
+ # Merge with defaults to ensure all keys exist
114
+ merged_config = DEFAULT_CONFIG.copy()
115
+ merged_config.update(config)
116
+ return merged_config
117
+ except Exception as e:
118
+ print(f"Error loading config: {e}. Using defaults.")
119
+ return DEFAULT_CONFIG
120
+
121
+ def save_config(config):
122
+ """Save the configuration to disk"""
123
+ with open(CONFIG_PATH, 'w') as f:
124
+ json.dump(config, f, indent=2)
125
+
126
+ # Current configuration
127
+ CONFIG = load_config()