llm-to-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_to_cli-0.1.0/PKG-INFO +105 -0
- llm_to_cli-0.1.0/README.md +89 -0
- llm_to_cli-0.1.0/cli/main.py +259 -0
- llm_to_cli-0.1.0/cli/main_extend.py +30 -0
- llm_to_cli-0.1.0/cli/utils.py +71 -0
- llm_to_cli-0.1.0/core/__init__.py +0 -0
- llm_to_cli-0.1.0/core/chat.py +101 -0
- llm_to_cli-0.1.0/core/config.py +39 -0
- llm_to_cli-0.1.0/core/prompt.py +2 -0
- llm_to_cli-0.1.0/pyproject.toml +25 -0
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: llm-to-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary:
|
|
5
|
+
Author: tikendraw
|
|
6
|
+
Author-email: tikendraksahu1029@gmail.com
|
|
7
|
+
Requires-Python: >=3.11,<4.0
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
11
|
+
Requires-Dist: click (>=8.1.7,<9.0.0)
|
|
12
|
+
Requires-Dist: litellm (>=1.52.16,<2.0.0)
|
|
13
|
+
Requires-Dist: rich (>=13.9.4,<14.0.0)
|
|
14
|
+
Description-Content-Type: text/markdown
|
|
15
|
+
|
|
16
|
+
# LLM-cli
|
|
17
|
+
A lightweight Command Line Interface (CLI) for interacting with Large Language Models (LLMs) using LiteLLM.
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
## 💡 Why This Project?
|
|
21
|
+
Sometimes network constraints or data limitations make it difficult to access large language models via web interfaces. This CLI provides a lightweight, flexible solution for LLM interactions directly from the terminal.
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
## 🚀 Features
|
|
25
|
+
|
|
26
|
+
- **Simple CLI Interface**: Easily chat with different LLMs from your terminal
|
|
27
|
+
- **Multiple Chat Modes**:
|
|
28
|
+
- Direct single-message chat
|
|
29
|
+
- Interactive chat UI with markdown rendering
|
|
30
|
+
- Image support for vision-capable models
|
|
31
|
+
- **Flexible Configuration**: Customize model, temperature, and system prompts
|
|
32
|
+
- **Easy Configuration Management**: Update settings with a simple command
|
|
33
|
+
|
|
34
|
+
## 🔧 Prerequisites
|
|
35
|
+
|
|
36
|
+
- Api keys to the llms
|
|
37
|
+
|
|
38
|
+
## 💾 Installation
|
|
39
|
+
|
|
40
|
+
1. Via Pip
|
|
41
|
+
```bash
|
|
42
|
+
pip install llm-cli
|
|
43
|
+
```
|
|
44
|
+
2. From Repo
|
|
45
|
+
```bash
|
|
46
|
+
# Clone the repository
|
|
47
|
+
git clone https://github.com/tikendraw/llm-cli.git
|
|
48
|
+
cd llm-cli
|
|
49
|
+
|
|
50
|
+
# Install
|
|
51
|
+
pip install .
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## 🖥️ Usage
|
|
55
|
+
|
|
56
|
+
### Basic Chat
|
|
57
|
+
|
|
58
|
+
Send a single message to an LLM:
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
llm-cli chat "Hello, how are you?"
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
### Interactive Chat UI
|
|
65
|
+
|
|
66
|
+
Start an interactive chat session:
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
llm-cli chatui
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Image Support
|
|
73
|
+
|
|
74
|
+
Chat with an image:
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
llm-cli chatui2 --model openai/gpt-4o-somthing
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Configuration
|
|
81
|
+
|
|
82
|
+
View current configuration:
|
|
83
|
+
```bash
|
|
84
|
+
llm-cli config
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Update configuration:
|
|
88
|
+
```bash
|
|
89
|
+
llm-cli config model "anthropic/claude-3-haiku"
|
|
90
|
+
llm-cli config temperature 0.7
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## 🛠️ Commands
|
|
94
|
+
|
|
95
|
+
- `chat`: Send a single message
|
|
96
|
+
- `chatui`: Interactive chat with markdown rendering
|
|
97
|
+
- `chatui2`: Interactive chat with image support
|
|
98
|
+
- `config`: Manage CLI configuration
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
## 🤝 Contributing
|
|
102
|
+
|
|
103
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
|
104
|
+
|
|
105
|
+
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# LLM-cli
|
|
2
|
+
A lightweight Command Line Interface (CLI) for interacting with Large Language Models (LLMs) using LiteLLM.
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
## 💡 Why This Project?
|
|
6
|
+
Sometimes network constraints or data limitations make it difficult to access large language models via web interfaces. This CLI provides a lightweight, flexible solution for LLM interactions directly from the terminal.
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
## 🚀 Features
|
|
10
|
+
|
|
11
|
+
- **Simple CLI Interface**: Easily chat with different LLMs from your terminal
|
|
12
|
+
- **Multiple Chat Modes**:
|
|
13
|
+
- Direct single-message chat
|
|
14
|
+
- Interactive chat UI with markdown rendering
|
|
15
|
+
- Image support for vision-capable models
|
|
16
|
+
- **Flexible Configuration**: Customize model, temperature, and system prompts
|
|
17
|
+
- **Easy Configuration Management**: Update settings with a simple command
|
|
18
|
+
|
|
19
|
+
## 🔧 Prerequisites
|
|
20
|
+
|
|
21
|
+
- Api keys to the llms
|
|
22
|
+
|
|
23
|
+
## 💾 Installation
|
|
24
|
+
|
|
25
|
+
1. Via Pip
|
|
26
|
+
```bash
|
|
27
|
+
pip install llm-cli
|
|
28
|
+
```
|
|
29
|
+
2. From Repo
|
|
30
|
+
```bash
|
|
31
|
+
# Clone the repository
|
|
32
|
+
git clone https://github.com/tikendraw/llm-cli.git
|
|
33
|
+
cd llm-cli
|
|
34
|
+
|
|
35
|
+
# Install
|
|
36
|
+
pip install .
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## 🖥️ Usage
|
|
40
|
+
|
|
41
|
+
### Basic Chat
|
|
42
|
+
|
|
43
|
+
Send a single message to an LLM:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
llm-cli chat "Hello, how are you?"
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Interactive Chat UI
|
|
50
|
+
|
|
51
|
+
Start an interactive chat session:
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
llm-cli chatui
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Image Support
|
|
58
|
+
|
|
59
|
+
Chat with an image:
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
llm-cli chatui2 --model openai/gpt-4o-somthing
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
### Configuration
|
|
66
|
+
|
|
67
|
+
View current configuration:
|
|
68
|
+
```bash
|
|
69
|
+
llm-cli config
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
Update configuration:
|
|
73
|
+
```bash
|
|
74
|
+
llm-cli config model "anthropic/claude-3-haiku"
|
|
75
|
+
llm-cli config temperature 0.7
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## 🛠️ Commands
|
|
79
|
+
|
|
80
|
+
- `chat`: Send a single message
|
|
81
|
+
- `chatui`: Interactive chat with markdown rendering
|
|
82
|
+
- `chatui2`: Interactive chat with image support
|
|
83
|
+
- `config`: Manage CLI configuration
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
## 🤝 Contributing
|
|
87
|
+
|
|
88
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
|
89
|
+
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from dataclasses import asdict, fields
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
from rich.markdown import Markdown
|
|
7
|
+
|
|
8
|
+
from cli.utils import delete_chat_session, get_chat_history, init_db, save_chat_history
|
|
9
|
+
from core.chat import chat as cc
|
|
10
|
+
from core.chat import is_vision_llm, parse_image, unparse_image
|
|
11
|
+
from core.config import ChatConfig, load_config, save_config
|
|
12
|
+
from core.prompt import system_prompt_cot
|
|
13
|
+
|
|
14
|
+
sys_p = system_prompt_cot
|
|
15
|
+
console = Console()
|
|
16
|
+
configg = load_config()
|
|
17
|
+
|
|
18
|
+
init_db()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@click.group
|
|
22
|
+
def cli():
|
|
23
|
+
""" llm-cli application """
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@cli.command()
|
|
29
|
+
@click.argument('message', type=str)
|
|
30
|
+
@click.option('--system_prompt', '-s', default=sys_p, type=str, help='system prompt to the llm')
|
|
31
|
+
@click.option('--model', '-m', default=configg.model, help='model name e.g.: provider/model_name')
|
|
32
|
+
@click.option('--temperature', '-t', default=configg.temperature, help='float value between 0 and 1, lower value means more deterministic, higher value means more creative')
|
|
33
|
+
@click.option('--image', type=click.Path(exists=True), default=None, help='file path or url to an image')
|
|
34
|
+
@click.option('--no_system_prompt', is_flag=True, help='disable system prompt')
|
|
35
|
+
@click.option('--skip_vision_check', is_flag=True, help='skip vision check')
|
|
36
|
+
def chat(model, message, temperature, system_prompt, image, no_system_prompt, skip_vision_check):
|
|
37
|
+
messages = []
|
|
38
|
+
if system_prompt and not no_system_prompt:
|
|
39
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
40
|
+
|
|
41
|
+
if image:
|
|
42
|
+
if is_vision_llm(model) or skip_vision_check:
|
|
43
|
+
content = parse_image(image=image, message=message)
|
|
44
|
+
messages.append({'role' : 'user', 'content' : content})
|
|
45
|
+
else:
|
|
46
|
+
console.print(f'[red]{model} is not a vision model(according to litellm).[/red]')
|
|
47
|
+
return
|
|
48
|
+
else:
|
|
49
|
+
messages.append({"role": "user", "content": message})
|
|
50
|
+
|
|
51
|
+
# print('model: ', model)
|
|
52
|
+
# print(f'gave this: {messages}')
|
|
53
|
+
|
|
54
|
+
response = cc(model=model, messages=messages, temperature=temperature)
|
|
55
|
+
click.secho(response.choices[0].message['content'], fg='green')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def print_markdown(message: str) -> None:
|
|
59
|
+
"""Render a message as Markdown or plain text."""
|
|
60
|
+
try:
|
|
61
|
+
md = Markdown(message)
|
|
62
|
+
console.print(md)
|
|
63
|
+
except Exception:
|
|
64
|
+
console.print(message)
|
|
65
|
+
console.print("\n")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def show_messages(messages: list[dict], console: Console, show_system_prompt: bool = False) -> None:
|
|
69
|
+
"""Display chat messages in the console."""
|
|
70
|
+
for message in messages:
|
|
71
|
+
role = message['role']
|
|
72
|
+
|
|
73
|
+
if role == 'system' and not show_system_prompt:
|
|
74
|
+
continue
|
|
75
|
+
|
|
76
|
+
content = message['content']
|
|
77
|
+
image_path = None
|
|
78
|
+
if isinstance(content, list):
|
|
79
|
+
content, image_path = unparse_image(content)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
if role == 'user':
|
|
84
|
+
console.print("[blue]You 👦:[/blue]")
|
|
85
|
+
elif role == 'assistant':
|
|
86
|
+
console.print("[green]LLM 🤖:[/green]")
|
|
87
|
+
elif role == 'system':
|
|
88
|
+
console.print("[cyan]System 🤖:[/cyan]")
|
|
89
|
+
else:
|
|
90
|
+
console.print("[yellow]Unknown Role:[/yellow]")
|
|
91
|
+
|
|
92
|
+
if image_path:
|
|
93
|
+
console.print(f'[yellow]Image is saved here: file:///{image_path}[/yellow]')
|
|
94
|
+
print_markdown(content)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@cli.command()
|
|
98
|
+
@click.option('--system_prompt', '-s', default=sys_p, type=str, help='System prompt to the LLM')
|
|
99
|
+
@click.option('--model', '-m', default=configg.model, help='Model name, e.g., provider/model_name')
|
|
100
|
+
@click.option('--temperature', '-t', default=configg.temperature, help=f'Temperature for the LLM, defaults to {configg.temperature}')
|
|
101
|
+
@click.option('--no_system_prompt', is_flag=True, help='Disable system prompt')
|
|
102
|
+
@click.option('--skip_vision_check', is_flag=True, help="Skip vision model check, (useful when litellm doesn't see the model as vision model)")
|
|
103
|
+
@click.option('--session_id', type=str, default=None, help='Session ID to continue')
|
|
104
|
+
@click.option('--title', '-t', default="Untitled Session", help='Title/description for the session')
|
|
105
|
+
def chatui(system_prompt, model, temperature, no_system_prompt, skip_vision_check, session_id, title):
|
|
106
|
+
"""Interactive chat interface with markdown rendering and image support."""
|
|
107
|
+
console.print("[cyan]Welcome to ChatUI![/cyan]")
|
|
108
|
+
console.print("[cyan]Type '/exit' to quit, '/clear' to reset conversation, '/help' for help, or '/image <path>' to add an image.[/cyan]\n")
|
|
109
|
+
|
|
110
|
+
messages = []
|
|
111
|
+
pending_image = None
|
|
112
|
+
|
|
113
|
+
if system_prompt and not no_system_prompt:
|
|
114
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
115
|
+
|
|
116
|
+
if session_id:
|
|
117
|
+
session = get_chat_history(session_id)
|
|
118
|
+
if session:
|
|
119
|
+
session_id, start_time, session_title, chat_history = session
|
|
120
|
+
messages = chat_history
|
|
121
|
+
console.print(f"[green]Continuing session '{session_title}' (ID: {session_id}) from {start_time}[/green]")
|
|
122
|
+
else:
|
|
123
|
+
console.print(f"[red]Session ID {session_id} not found. Starting a new session.[/red]")
|
|
124
|
+
else:
|
|
125
|
+
session_id = None
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
show_messages(messages, console)
|
|
129
|
+
|
|
130
|
+
while True:
|
|
131
|
+
user_input = click.prompt(click.style("You 👦", fg="blue"), default="", show_default=False).strip()
|
|
132
|
+
|
|
133
|
+
if user_input.lower() in {"/exit", "/quit"}:
|
|
134
|
+
session_id = save_chat_history(messages, session_id=session_id, title=title)
|
|
135
|
+
console.print(f"[cyan]Session saved with ID {session_id}. Goodbye![/cyan]")
|
|
136
|
+
break
|
|
137
|
+
elif user_input.lower() == "/clear":
|
|
138
|
+
messages = [{"role": "system", "content": system_prompt}] if not no_system_prompt else []
|
|
139
|
+
pending_image = None
|
|
140
|
+
console.print("[yellow]Conversation history cleared.[/yellow]\n")
|
|
141
|
+
continue
|
|
142
|
+
elif user_input.lower() == "/help":
|
|
143
|
+
console.print("[yellow]Available commands:[/yellow]\n"
|
|
144
|
+
"[bold yellow]/help[/bold yellow] - Show help message\n"
|
|
145
|
+
"[bold yellow]/exit or /quit[/bold yellow] - Exit the chat\n"
|
|
146
|
+
"[bold yellow]/clear[/bold yellow] - Reset conversation history\n"
|
|
147
|
+
"[bold yellow]/image <path>[/bold yellow] - Add an image to the conversation\n")
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
if user_input.startswith("/image"):
|
|
151
|
+
try:
|
|
152
|
+
_, image_path = user_input.split(maxsplit=1)
|
|
153
|
+
if not os.path.exists(image_path):
|
|
154
|
+
console.print(f"[red]Image file '{image_path}' not found![/red]")
|
|
155
|
+
continue
|
|
156
|
+
if is_vision_llm(model) or skip_vision_check:
|
|
157
|
+
pending_image = image_path
|
|
158
|
+
console.print(f"[green]Image '{image_path}' added. Enter a message with the image.[/green]")
|
|
159
|
+
else:
|
|
160
|
+
console.print(f"[red]{model} is not a vision model.[/red]")
|
|
161
|
+
pending_image = None
|
|
162
|
+
except ValueError:
|
|
163
|
+
console.print("[red]Please provide a valid image path using '/image <path>'.[/red]")
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
if pending_image:
|
|
167
|
+
content = parse_image(image=pending_image, message=user_input)
|
|
168
|
+
messages.append({"role": "user", "content": content})
|
|
169
|
+
pending_image = None
|
|
170
|
+
else:
|
|
171
|
+
messages.append({"role": "user", "content": user_input})
|
|
172
|
+
|
|
173
|
+
response = cc(model=model, messages=messages, temperature=temperature)
|
|
174
|
+
llm_message = response.choices[0].message["content"]
|
|
175
|
+
messages.append({"role": "assistant", "content": llm_message})
|
|
176
|
+
|
|
177
|
+
show_messages([{"role": "assistant", "content": llm_message}], console)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
@cli.command()
|
|
183
|
+
@click.option('--delete_session', type=str, required=False, default=None, help='Session ID to delete, or "all" to delete all sessions.')
|
|
184
|
+
def history(delete_session):
|
|
185
|
+
"""List all saved chat sessions."""
|
|
186
|
+
|
|
187
|
+
if delete_session:
|
|
188
|
+
delete_chat_session(delete_session)
|
|
189
|
+
console.print(f"[green]Session {delete_session} deleted.[/green]")
|
|
190
|
+
return
|
|
191
|
+
|
|
192
|
+
sessions = get_chat_history()
|
|
193
|
+
console.print("[cyan]Chat History:[/cyan]\n")
|
|
194
|
+
if sessions:
|
|
195
|
+
for session in sessions:
|
|
196
|
+
session_id, start_time, title, length = session
|
|
197
|
+
console.print(f"[yellow]Session ID:[/yellow] {session_id}\n"
|
|
198
|
+
f"[yellow]Start Time:[/yellow] {start_time}\n"
|
|
199
|
+
f"[yellow]Title:[/yellow] {title}\n"
|
|
200
|
+
f"[yellow]Length:[/yellow] {length} turns\n")
|
|
201
|
+
else:
|
|
202
|
+
console.print("[red]No chat history found.[/red]")
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
@cli.command()
|
|
206
|
+
@click.argument('key', required=False, default=None)
|
|
207
|
+
@click.argument('value', required=False, default=None)
|
|
208
|
+
def config(key: str, value: str):
|
|
209
|
+
"""
|
|
210
|
+
Configure chat settings.
|
|
211
|
+
Without arguments, displays the current configuration.
|
|
212
|
+
To update, provide a key and a value.
|
|
213
|
+
"""
|
|
214
|
+
current_config = load_config()
|
|
215
|
+
config_dict = asdict(current_config)
|
|
216
|
+
|
|
217
|
+
if not key:
|
|
218
|
+
# Show current configuration if no arguments are provided
|
|
219
|
+
click.secho("Current configuration:", fg='cyan')
|
|
220
|
+
for field_name, field_value in config_dict.items():
|
|
221
|
+
click.secho(f" {field_name}: {field_value}", fg='yellow')
|
|
222
|
+
return
|
|
223
|
+
|
|
224
|
+
# Validate the provided key
|
|
225
|
+
if key not in config_dict:
|
|
226
|
+
click.secho(f"Invalid configuration key: {key}", fg='red')
|
|
227
|
+
click.secho("Valid keys are:", fg='cyan')
|
|
228
|
+
for field_name in config_dict.keys():
|
|
229
|
+
click.secho(f" {field_name}", fg='yellow')
|
|
230
|
+
return
|
|
231
|
+
|
|
232
|
+
# Validate the provided value against the field's type
|
|
233
|
+
field_type = {f.name: f.type for f in fields(ChatConfig)}[key]
|
|
234
|
+
try:
|
|
235
|
+
# Dynamically cast the value to the correct type
|
|
236
|
+
if field_type == int:
|
|
237
|
+
value = int(value)
|
|
238
|
+
elif field_type == float:
|
|
239
|
+
value = float(value)
|
|
240
|
+
elif field_type == str:
|
|
241
|
+
value = str(value)
|
|
242
|
+
else:
|
|
243
|
+
raise ValueError(f"Unsupported type: {field_type}")
|
|
244
|
+
except ValueError:
|
|
245
|
+
click.secho(f"Invalid value type for {key}. Expected {field_type.__name__}.", fg='red')
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
# Update the configuration
|
|
249
|
+
setattr(current_config, key, value)
|
|
250
|
+
save_config(current_config)
|
|
251
|
+
click.secho(f"Configuration updated: {key} = {value}", fg='green')
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
if __name__ == "__main__":
|
|
255
|
+
cli.add_command(chat)
|
|
256
|
+
cli.add_command(chatui)
|
|
257
|
+
cli.add_command(config)
|
|
258
|
+
cli.add_command(history)
|
|
259
|
+
cli()
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import click
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
@click.command()
|
|
5
|
+
@click.argument("input", type=click.File("rb"), nargs=-1)
|
|
6
|
+
@click.argument("output", type=click.File("wb"))
|
|
7
|
+
def cli(input, output):
|
|
8
|
+
"""This script works similar to the Unix `cat` command but it writes
|
|
9
|
+
into a specific file (which could be the standard output as denoted by
|
|
10
|
+
the ``-`` sign).
|
|
11
|
+
|
|
12
|
+
\b
|
|
13
|
+
Copy stdin to stdout:
|
|
14
|
+
inout - -
|
|
15
|
+
|
|
16
|
+
\b
|
|
17
|
+
Copy foo.txt and bar.txt to stdout:
|
|
18
|
+
inout foo.txt bar.txt -
|
|
19
|
+
|
|
20
|
+
\b
|
|
21
|
+
Write stdin into the file foo.txt
|
|
22
|
+
inout - foo.txt
|
|
23
|
+
"""
|
|
24
|
+
for f in input:
|
|
25
|
+
while True:
|
|
26
|
+
chunk = f.read(1024)
|
|
27
|
+
if not chunk:
|
|
28
|
+
break
|
|
29
|
+
output.write(chunk)
|
|
30
|
+
output.flush()
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import sqlite3
|
|
3
|
+
import uuid
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from core.config import history_db
|
|
8
|
+
|
|
9
|
+
# Database setup
|
|
10
|
+
DB_PATH = history_db
|
|
11
|
+
|
|
12
|
+
def init_db():
|
|
13
|
+
"""Initialize the SQLite database."""
|
|
14
|
+
with sqlite3.connect(DB_PATH) as conn:
|
|
15
|
+
cursor = conn.cursor()
|
|
16
|
+
cursor.execute("""
|
|
17
|
+
CREATE TABLE IF NOT EXISTS sessions (
|
|
18
|
+
id TEXT PRIMARY KEY,
|
|
19
|
+
start_time TEXT NOT NULL,
|
|
20
|
+
title TEXT NOT NULL,
|
|
21
|
+
chat_history TEXT NOT NULL
|
|
22
|
+
)
|
|
23
|
+
""")
|
|
24
|
+
conn.commit()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def save_chat_history(history: list[dict], session_id: str = None, title: str = "Untitled Session") -> str:
|
|
28
|
+
"""Save chat history to the database."""
|
|
29
|
+
session_id = session_id or str(uuid.uuid4())
|
|
30
|
+
with sqlite3.connect(DB_PATH) as conn:
|
|
31
|
+
cursor = conn.cursor()
|
|
32
|
+
cursor.execute("""
|
|
33
|
+
INSERT OR REPLACE INTO sessions (id, start_time, title, chat_history)
|
|
34
|
+
VALUES (?, ?, ?, ?)
|
|
35
|
+
""", (session_id, datetime.now().strftime("%Y-%m-%d %H:%M:%S"), title, json.dumps(history))) # Save history as JSON
|
|
36
|
+
conn.commit()
|
|
37
|
+
return session_id
|
|
38
|
+
|
|
39
|
+
def get_chat_history(session_id: str = None):
|
|
40
|
+
"""Retrieve chat history."""
|
|
41
|
+
with sqlite3.connect(DB_PATH) as conn:
|
|
42
|
+
cursor = conn.cursor()
|
|
43
|
+
if session_id:
|
|
44
|
+
cursor.execute("SELECT id, start_time, title, chat_history FROM sessions WHERE id = ?", (session_id,))
|
|
45
|
+
row = cursor.fetchone()
|
|
46
|
+
if row:
|
|
47
|
+
row = row[:-1] + (json.loads(row[-1]),)
|
|
48
|
+
return row
|
|
49
|
+
else:
|
|
50
|
+
cursor.execute("SELECT id, start_time, title, chat_history FROM sessions")
|
|
51
|
+
rows = cursor.fetchall()
|
|
52
|
+
return [(row[0], row[1], row[2], len(json.loads(row[3]))) for row in rows]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def delete_chat_session(session_id:str=None):
|
|
56
|
+
"""Delete a chat session by ID."""
|
|
57
|
+
|
|
58
|
+
if session_id.strip() == 'all':
|
|
59
|
+
with sqlite3.connect(DB_PATH) as conn:
|
|
60
|
+
cursor = conn.cursor()
|
|
61
|
+
cursor.execute("DELETE FROM sessions")
|
|
62
|
+
conn.commit()
|
|
63
|
+
|
|
64
|
+
else:
|
|
65
|
+
with sqlite3.connect(DB_PATH) as conn:
|
|
66
|
+
cursor = conn.cursor()
|
|
67
|
+
cursor.execute("DELETE FROM sessions WHERE id = ?", (session_id,))
|
|
68
|
+
conn.commit()
|
|
69
|
+
|
|
70
|
+
return
|
|
71
|
+
|
|
File without changes
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Dict, List, Tuple
|
|
7
|
+
|
|
8
|
+
import litellm
|
|
9
|
+
from litellm import completion
|
|
10
|
+
from pydantic_core.core_schema import tuple_positional_schema
|
|
11
|
+
|
|
12
|
+
from .config import config_dir
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def chat(*args, **kwargs):
|
|
16
|
+
return completion(*args, **kwargs)
|
|
17
|
+
|
|
18
|
+
def is_vision_llm(model:str)->bool:
|
|
19
|
+
return litellm.supports_vision(model)
|
|
20
|
+
|
|
21
|
+
def encode_image(image_path: str |Path) -> str:
|
|
22
|
+
if isinstance(image_path, str):
|
|
23
|
+
image_path = Path(image_path)
|
|
24
|
+
|
|
25
|
+
if not image_path.exists() or not image_path.is_file():
|
|
26
|
+
raise FileNotFoundError(f"Image not found at {image_path}")
|
|
27
|
+
|
|
28
|
+
with open(image_path, "rb") as image_file:
|
|
29
|
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
30
|
+
|
|
31
|
+
def is_url(path:Path)->bool:
|
|
32
|
+
return path.as_posix().startswith("http")
|
|
33
|
+
|
|
34
|
+
def parse_image(image:str|Path, message:str=None)->list[dict]:
|
|
35
|
+
if isinstance(image, str):
|
|
36
|
+
image = Path(image)
|
|
37
|
+
|
|
38
|
+
if url:=is_url(image):
|
|
39
|
+
base_image = image.as_posix()
|
|
40
|
+
else:
|
|
41
|
+
try:
|
|
42
|
+
base_image = encode_image(image)
|
|
43
|
+
except FileNotFoundError as e:
|
|
44
|
+
return []
|
|
45
|
+
|
|
46
|
+
message_part =[]
|
|
47
|
+
if message:
|
|
48
|
+
message_part.append({
|
|
49
|
+
"type": "text",
|
|
50
|
+
"text": message
|
|
51
|
+
})
|
|
52
|
+
if base_image:
|
|
53
|
+
message_part.append(
|
|
54
|
+
{
|
|
55
|
+
"type": "image_url",
|
|
56
|
+
"image_url": {
|
|
57
|
+
"url": base_image if url else f"data:image/jpeg;base64,{base_image}"
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return message_part
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def random_name() -> str:
|
|
67
|
+
return f"{datetime.now().strftime('%Y%m%d-%H%M%S.%f')}-{random.randint(100000000, 9999999999)}"
|
|
68
|
+
|
|
69
|
+
def unparse_image(message: List[Dict], config_dir: str = config_dir/'attached_images') -> Tuple[str, str]:
|
|
70
|
+
"""
|
|
71
|
+
Get user message and image file path from a parsed message.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
message (list[dict]): Parsed message containing text and/or image data.
|
|
75
|
+
config_dir (str): Directory to save decoded images.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Tuple[str, str]: A tuple containing the user message and the saved image file path.
|
|
79
|
+
"""
|
|
80
|
+
user_message = None
|
|
81
|
+
image_path = None
|
|
82
|
+
|
|
83
|
+
# Ensure config_dir exists
|
|
84
|
+
os.makedirs(config_dir, exist_ok=True)
|
|
85
|
+
|
|
86
|
+
for part in message:
|
|
87
|
+
if part["type"] == "text":
|
|
88
|
+
user_message = part["text"]
|
|
89
|
+
elif part["type"] == "image_url":
|
|
90
|
+
image_data = part["image_url"]["url"]
|
|
91
|
+
if image_data.startswith("data:image/jpeg;base64,"):
|
|
92
|
+
# Decode base64 image and save it
|
|
93
|
+
image_data = image_data.split(",", 1)[1] # Remove the `data:image/jpeg;base64,` prefix
|
|
94
|
+
decoded_image = base64.b64decode(image_data)
|
|
95
|
+
image_path = Path(config_dir) / f"{random_name()}.jpg"
|
|
96
|
+
with open(image_path, "wb") as f:
|
|
97
|
+
f.write(decoded_image)
|
|
98
|
+
else:
|
|
99
|
+
image_path = image_data
|
|
100
|
+
|
|
101
|
+
return user_message, str(image_path) if image_path else None
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from dataclasses import asdict, dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
config_dir = os.path.expanduser('~/.llm-cli')
|
|
7
|
+
config_dir = Path(config_dir)
|
|
8
|
+
config_dir.mkdir(parents=True, exist_ok=True)
|
|
9
|
+
|
|
10
|
+
config_file = config_dir/'config.json'
|
|
11
|
+
history_db = config_dir/'chat_history.db'
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ChatConfig:
|
|
17
|
+
model:str = 'gemini/gemini-1.5-flash'
|
|
18
|
+
temperature:float = 0.2
|
|
19
|
+
max_token_output:int = 8192
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def save_config(config:ChatConfig=None, config_file:Path=config_file)->None:
|
|
23
|
+
|
|
24
|
+
if config is None:
|
|
25
|
+
config = ChatConfig()
|
|
26
|
+
|
|
27
|
+
config = asdict(config)
|
|
28
|
+
with open(config_file, 'w') as f:
|
|
29
|
+
json.dump(config,f)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def load_config() -> ChatConfig:
|
|
34
|
+
"""Load configuration from a JSON file."""
|
|
35
|
+
if os.path.exists(config_file):
|
|
36
|
+
with open(config_file, 'r') as f:
|
|
37
|
+
config_data = json.load(f)
|
|
38
|
+
return ChatConfig(**config_data)
|
|
39
|
+
return ChatConfig()
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
system_prompt = 'You are a Helpful assistant'
|
|
2
|
+
system_prompt_cot = "You are Intelligent, logical, anlytical, rational person. if Questions is factual answer in a single step, if questions are mathematical, reasoning, or requires stepwise thought, Think in steps. Develop a rationale"
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "llm-to-cli"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = ""
|
|
5
|
+
authors = ["tikendraw <tikendraksahu1029@gmail.com>"]
|
|
6
|
+
readme = "README.md"
|
|
7
|
+
|
|
8
|
+
packages = [
|
|
9
|
+
{ include = "cli" },
|
|
10
|
+
{ include = "core" },
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
[tool.poetry.dependencies]
|
|
14
|
+
python = "^3.11"
|
|
15
|
+
litellm = "^1.52.16"
|
|
16
|
+
click = "^8.1.7"
|
|
17
|
+
rich = "^13.9.4"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
[build-system]
|
|
21
|
+
requires = ["poetry-core"]
|
|
22
|
+
build-backend = "poetry.core.masonry.api"
|
|
23
|
+
|
|
24
|
+
[tool.poetry.scripts]
|
|
25
|
+
llm-cli = 'cli.main:cli'
|