asky-cli 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- asky/__init__.py +7 -0
- asky/__main__.py +6 -0
- asky/banner.py +123 -0
- asky/cli.py +506 -0
- asky/config.py +270 -0
- asky/config.toml +226 -0
- asky/html.py +62 -0
- asky/llm.py +378 -0
- asky/storage.py +157 -0
- asky/tools.py +314 -0
- asky_cli-0.1.6.dist-info/METADATA +290 -0
- asky_cli-0.1.6.dist-info/RECORD +14 -0
- asky_cli-0.1.6.dist-info/WHEEL +4 -0
- asky_cli-0.1.6.dist-info/entry_points.txt +3 -0
asky/__init__.py
ADDED
asky/__main__.py
ADDED
asky/banner.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Cute Terminal Icon - ASCII Art
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.text import Text
|
|
8
|
+
from rich.panel import Panel
|
|
9
|
+
from rich.table import Table
|
|
10
|
+
from rich import box
|
|
11
|
+
|
|
12
|
+
# Color shortcuts for templates
|
|
13
|
+
G = "bold #39ff14" # neon green (eyes)
|
|
14
|
+
P = "#ff8fa3" # pink (blush)
|
|
15
|
+
D = "dim" # dim (border)
|
|
16
|
+
N = "#39ff14" # nose (not bold = dimmer)
|
|
17
|
+
|
|
18
|
+
# Metallic shiny effect colors
|
|
19
|
+
M1 = "bold #ffffff" # bright highlight
|
|
20
|
+
M2 = "bold #c0c0c0" # silver
|
|
21
|
+
M3 = "#a0a0a0" # medium gray
|
|
22
|
+
M4 = "#707070" # shadow
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def display(lines):
|
|
26
|
+
"""Display lines with markup"""
|
|
27
|
+
console = Console()
|
|
28
|
+
console.print()
|
|
29
|
+
for line in lines:
|
|
30
|
+
console.print(Text.from_markup(line))
|
|
31
|
+
console.print()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def mini():
|
|
35
|
+
"""Mini version"""
|
|
36
|
+
display(
|
|
37
|
+
[
|
|
38
|
+
f"[{G}] ∩ ∩[/{G}] [{M1}]a[/{M1}]",
|
|
39
|
+
f"[{D}] ╭┴─────┴╮[/{D}] [{M2}]s[/{M2}]",
|
|
40
|
+
f"[{D}] │[/{D}] [{G}]▷[/{G}] [{D}][{N}]ω[/{N}][/{D}] [{G}]_[/{G}] [{D}]│[/{D}] [{M3}]k[/{M3}]",
|
|
41
|
+
f"[{D}] │[/{D}] [{P}]◠[/{P}] [{P}]◠[/{P}] [{D}]│[/{D}] [{M4}]y[/{M4}]",
|
|
42
|
+
f"[{D}] ╰───────╯[/{D}] ",
|
|
43
|
+
]
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_banner(
|
|
48
|
+
model_alias: str,
|
|
49
|
+
model_id: str,
|
|
50
|
+
sum_alias: str,
|
|
51
|
+
sum_id: str,
|
|
52
|
+
default_model: str,
|
|
53
|
+
search_provider: str,
|
|
54
|
+
model_ctx: int,
|
|
55
|
+
sum_ctx: int,
|
|
56
|
+
max_turns: int,
|
|
57
|
+
db_count: int,
|
|
58
|
+
) -> Panel:
|
|
59
|
+
"""Create a side-by-side banner with an icon and configuration info in two columns."""
|
|
60
|
+
icon_lines = [
|
|
61
|
+
f"[{G}] ∩ ∩[/{G}] [{M1}]a[/{M1}]",
|
|
62
|
+
f"[{D}] ╭┴─────┴╮[/{D}] [{M2}]s[/{M2}]",
|
|
63
|
+
f"[{D}] │[/{D}] [{G}]▷[/{G}] [{D}][{N}]ω[/{N}][/{D}] [{G}]_[/{G}] [{D}]│[/{D}] [{M3}]k[/{M3}]",
|
|
64
|
+
f"[{D}] │[/{D}] [{P}]◠[/{P}] [{P}]◠[/{P}] [{D}]│[/{D}] [{M4}]y[/{M4}]",
|
|
65
|
+
f"[{D}] ╰───────╯[/{D}] ",
|
|
66
|
+
]
|
|
67
|
+
icon_text = Text.from_markup("\n".join(icon_lines))
|
|
68
|
+
|
|
69
|
+
# --- Configuration Columns ---
|
|
70
|
+
col1 = Table.grid(padding=(0, 1))
|
|
71
|
+
col1.add_column(justify="left", style="bold cyan")
|
|
72
|
+
col1.add_column(justify="left")
|
|
73
|
+
col1.add_row(
|
|
74
|
+
" Main Model :", f" [white]{model_alias}[/white] ([dim]{model_id}[/dim])"
|
|
75
|
+
)
|
|
76
|
+
col1.add_row(" Summarizer :", f" [white]{sum_alias}[/white] ([dim]{sum_id}[/dim])")
|
|
77
|
+
col1.add_row(" Default :", f" [white]{default_model}[/white]")
|
|
78
|
+
|
|
79
|
+
col2 = Table.grid(padding=(0, 1))
|
|
80
|
+
col2.add_column(justify="left", style="bold cyan")
|
|
81
|
+
col2.add_column(justify="left")
|
|
82
|
+
col2.add_row(" Search :", f" [white]{search_provider}[/white]")
|
|
83
|
+
col2.add_row(
|
|
84
|
+
" Context :",
|
|
85
|
+
f" [white]{model_ctx:,}[/white]/[white]{sum_ctx:,}[/white] [dim]tokens[/dim]",
|
|
86
|
+
)
|
|
87
|
+
col2.add_row(
|
|
88
|
+
" System :",
|
|
89
|
+
f" [white]{max_turns}[/white] [dim]turns[/dim] | [white]{db_count}[/white] [dim]records[/dim]",
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
info_layout = Table.grid(padding=(0, 3))
|
|
93
|
+
info_layout.add_column()
|
|
94
|
+
info_layout.add_column()
|
|
95
|
+
info_layout.add_row(col1, col2)
|
|
96
|
+
|
|
97
|
+
# --- Main Layout ---
|
|
98
|
+
layout_table = Table.grid(padding=(0, 2))
|
|
99
|
+
layout_table.add_column()
|
|
100
|
+
layout_table.add_column(ratio=1)
|
|
101
|
+
layout_table.add_row(icon_text, info_layout)
|
|
102
|
+
|
|
103
|
+
return Panel(layout_table, box=box.ROUNDED, border_style="dim", padding=(0, 1))
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
if __name__ == "__main__":
|
|
107
|
+
mini()
|
|
108
|
+
# Test get_banner
|
|
109
|
+
console = Console()
|
|
110
|
+
console.print(
|
|
111
|
+
get_banner(
|
|
112
|
+
"gf",
|
|
113
|
+
"gemini-flash-latest",
|
|
114
|
+
"lfm",
|
|
115
|
+
"llama3",
|
|
116
|
+
"gf",
|
|
117
|
+
"searxng",
|
|
118
|
+
1000000,
|
|
119
|
+
4096,
|
|
120
|
+
20,
|
|
121
|
+
123,
|
|
122
|
+
)
|
|
123
|
+
)
|
asky/cli.py
ADDED
|
@@ -0,0 +1,506 @@
|
|
|
1
|
+
"""Command-line interface for asky."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import os
|
|
5
|
+
import pyperclip
|
|
6
|
+
import re
|
|
7
|
+
from typing import Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
from rich.markdown import Markdown
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
from asky.config import (
|
|
14
|
+
DEFAULT_MODEL,
|
|
15
|
+
MODELS,
|
|
16
|
+
USER_PROMPTS,
|
|
17
|
+
SUMMARIZATION_MODEL,
|
|
18
|
+
SEARCH_PROVIDER,
|
|
19
|
+
DEFAULT_CONTEXT_SIZE,
|
|
20
|
+
MAX_TURNS,
|
|
21
|
+
QUERY_SUMMARY_MAX_CHARS,
|
|
22
|
+
ANSWER_SUMMARY_MAX_CHARS,
|
|
23
|
+
)
|
|
24
|
+
from asky.banner import get_banner
|
|
25
|
+
from asky.storage import (
|
|
26
|
+
init_db,
|
|
27
|
+
get_history,
|
|
28
|
+
get_interaction_context,
|
|
29
|
+
cleanup_db,
|
|
30
|
+
save_interaction,
|
|
31
|
+
get_db_record_count,
|
|
32
|
+
)
|
|
33
|
+
from asky.llm import (
|
|
34
|
+
construct_system_prompt,
|
|
35
|
+
run_conversation_loop,
|
|
36
|
+
generate_summaries,
|
|
37
|
+
is_markdown,
|
|
38
|
+
UsageTracker,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def parse_args() -> argparse.Namespace:
|
|
43
|
+
"""Parse command-line arguments."""
|
|
44
|
+
parser = argparse.ArgumentParser(
|
|
45
|
+
description="Tool-calling CLI with model selection.",
|
|
46
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
47
|
+
)
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"-m",
|
|
50
|
+
"--model",
|
|
51
|
+
default=DEFAULT_MODEL,
|
|
52
|
+
choices=MODELS.keys(),
|
|
53
|
+
help="Select the model alias",
|
|
54
|
+
)
|
|
55
|
+
parser.add_argument(
|
|
56
|
+
"-d",
|
|
57
|
+
"--deep-research",
|
|
58
|
+
nargs="?",
|
|
59
|
+
type=int,
|
|
60
|
+
const=5,
|
|
61
|
+
default=0,
|
|
62
|
+
help="Enable deep research mode (optional: specify min number of queries, default 5)",
|
|
63
|
+
)
|
|
64
|
+
parser.add_argument(
|
|
65
|
+
"-dd",
|
|
66
|
+
"--deep-dive",
|
|
67
|
+
action="store_true",
|
|
68
|
+
help="Enable deep dive mode (extracts links and encourages reading more pages from same domain)",
|
|
69
|
+
)
|
|
70
|
+
parser.add_argument(
|
|
71
|
+
"-c",
|
|
72
|
+
"--continue-chat",
|
|
73
|
+
dest="continue_ids",
|
|
74
|
+
help="Continue conversation with context from specific history IDs (comma-separated, e.g. '1,2').",
|
|
75
|
+
)
|
|
76
|
+
parser.add_argument(
|
|
77
|
+
"-s",
|
|
78
|
+
"--summarize",
|
|
79
|
+
action="store_true",
|
|
80
|
+
help="Enable summarize mode (summarizes URL content and uses summaries for chat context)",
|
|
81
|
+
)
|
|
82
|
+
parser.add_argument(
|
|
83
|
+
"-fs",
|
|
84
|
+
"--force-search",
|
|
85
|
+
action="store_true",
|
|
86
|
+
help="Force the model to use web search (default: False).\n"
|
|
87
|
+
"Helpful for avoiding hallucinations with small models",
|
|
88
|
+
)
|
|
89
|
+
parser.add_argument(
|
|
90
|
+
"--cleanup-db",
|
|
91
|
+
nargs="?",
|
|
92
|
+
const="interactive",
|
|
93
|
+
help="Delete history records. usage: --cleanup-db [ID|ID-ID|ID,ID] or --cleanup-db --all",
|
|
94
|
+
)
|
|
95
|
+
parser.add_argument(
|
|
96
|
+
"--all",
|
|
97
|
+
action="store_true",
|
|
98
|
+
help="Used with --cleanup-db to delete ALL history.",
|
|
99
|
+
)
|
|
100
|
+
parser.add_argument(
|
|
101
|
+
"-H",
|
|
102
|
+
"--history",
|
|
103
|
+
nargs="?",
|
|
104
|
+
type=int,
|
|
105
|
+
const=10,
|
|
106
|
+
help="Show last N queries and answer summaries (default 10).\n"
|
|
107
|
+
"Use with --print-answer to print the full answer(s).",
|
|
108
|
+
)
|
|
109
|
+
parser.add_argument(
|
|
110
|
+
"-pa",
|
|
111
|
+
"--print-answer",
|
|
112
|
+
dest="print_ids",
|
|
113
|
+
help="Print the answer(s) for specific history IDs (comma-separated).",
|
|
114
|
+
)
|
|
115
|
+
parser.add_argument(
|
|
116
|
+
"-p",
|
|
117
|
+
"--prompts",
|
|
118
|
+
action="store_true",
|
|
119
|
+
help="List all configured user prompts.",
|
|
120
|
+
)
|
|
121
|
+
parser.add_argument(
|
|
122
|
+
"-v",
|
|
123
|
+
"--verbose",
|
|
124
|
+
action="store_true",
|
|
125
|
+
help="Enable verbose output (prints config and LLM inputs).",
|
|
126
|
+
)
|
|
127
|
+
parser.add_argument("query", nargs="*", help="The query string")
|
|
128
|
+
return parser.parse_args()
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def show_history(history_arg: int) -> None:
|
|
132
|
+
"""Display recent query history."""
|
|
133
|
+
limit = history_arg if history_arg > 0 else 10
|
|
134
|
+
rows = get_history(limit)
|
|
135
|
+
print(f"\nLast {len(rows)} Queries:")
|
|
136
|
+
print("-" * 60)
|
|
137
|
+
for row in rows:
|
|
138
|
+
rid, ts, query, q_sum, a_sum, mod = row
|
|
139
|
+
display_query = q_sum if q_sum else query
|
|
140
|
+
if len(display_query) > 50:
|
|
141
|
+
display_query = display_query[:47] + "..."
|
|
142
|
+
if len(a_sum) > 50:
|
|
143
|
+
a_sum = a_sum[:47] + "..."
|
|
144
|
+
|
|
145
|
+
print(f"{rid:<4} | {display_query:<50} | {a_sum:<50}")
|
|
146
|
+
print("-" * 60)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def load_context(continue_ids: str, summarize: bool) -> Optional[str]:
|
|
150
|
+
"""Load context from previous interactions."""
|
|
151
|
+
try:
|
|
152
|
+
raw_ids = [x.strip() for x in continue_ids.split(",")]
|
|
153
|
+
resolved_ids = []
|
|
154
|
+
relative_indices = []
|
|
155
|
+
|
|
156
|
+
for raw_id in raw_ids:
|
|
157
|
+
if raw_id.startswith("~"):
|
|
158
|
+
try:
|
|
159
|
+
# ~1 means last record (index 0), ~2 means second to last (index 1)
|
|
160
|
+
rel_val = int(raw_id[1:])
|
|
161
|
+
if rel_val < 1:
|
|
162
|
+
print(f"Error: Relative ID must be >= 1 (got {raw_id})")
|
|
163
|
+
return None
|
|
164
|
+
relative_indices.append(rel_val)
|
|
165
|
+
except ValueError:
|
|
166
|
+
print(f"Error: Invalid relative ID format: {raw_id}")
|
|
167
|
+
return None
|
|
168
|
+
else:
|
|
169
|
+
resolved_ids.append(int(raw_id))
|
|
170
|
+
|
|
171
|
+
if relative_indices:
|
|
172
|
+
max_depth = max(relative_indices)
|
|
173
|
+
# Fetch enough history to cover the requested depth
|
|
174
|
+
history_rows = get_history(limit=max_depth)
|
|
175
|
+
|
|
176
|
+
for rel_val in relative_indices:
|
|
177
|
+
list_index = rel_val - 1
|
|
178
|
+
if list_index < len(history_rows):
|
|
179
|
+
# history_rows is ordered by ID DESC
|
|
180
|
+
# row format: (id, timestamp, query, q_sum, a_sum, mod)
|
|
181
|
+
real_id = history_rows[list_index][0]
|
|
182
|
+
resolved_ids.append(real_id)
|
|
183
|
+
else:
|
|
184
|
+
print(
|
|
185
|
+
f"Error: Relative ID {rel_val} is out of range (only {len(history_rows)} records available)."
|
|
186
|
+
)
|
|
187
|
+
return None
|
|
188
|
+
|
|
189
|
+
# Remove duplicates while preserving order? Or just sort?
|
|
190
|
+
# get_interaction_context uses "WHERE id IN (...)" so order in list might not match output order if SQL doesn't enforce it.
|
|
191
|
+
# But commonly we want context in chronological order usually.
|
|
192
|
+
# get_interaction_context implementation:
|
|
193
|
+
# c.execute(query_str, ids)
|
|
194
|
+
# results = c.fetchall()
|
|
195
|
+
# It doesn't enforce order passed in `ids` unless we do explicit ordering.
|
|
196
|
+
# But `load_context` returns a joined string.
|
|
197
|
+
# Let's keep distinct IDs.
|
|
198
|
+
resolved_ids = sorted(list(set(resolved_ids)))
|
|
199
|
+
full_content = not summarize
|
|
200
|
+
context_str = get_interaction_context(resolved_ids, full=full_content)
|
|
201
|
+
if context_str:
|
|
202
|
+
print(f"\n[Loaded context from IDs: {', '.join(map(str, resolved_ids))}]")
|
|
203
|
+
return context_str
|
|
204
|
+
except ValueError:
|
|
205
|
+
print(
|
|
206
|
+
"Error: Invalid format for -c/--continue-chat. Use comma-separated integers or ~N for relative."
|
|
207
|
+
)
|
|
208
|
+
return None
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def build_messages(args: argparse.Namespace, context_str: str) -> List[Dict[str, str]]:
|
|
212
|
+
"""Build the initial message list for the conversation."""
|
|
213
|
+
messages = [
|
|
214
|
+
{
|
|
215
|
+
"role": "system",
|
|
216
|
+
"content": construct_system_prompt(
|
|
217
|
+
args.deep_research, args.deep_dive, args.force_search
|
|
218
|
+
),
|
|
219
|
+
},
|
|
220
|
+
]
|
|
221
|
+
|
|
222
|
+
if context_str:
|
|
223
|
+
messages.append(
|
|
224
|
+
{
|
|
225
|
+
"role": "user",
|
|
226
|
+
"content": f"Context from previous queries:\n{context_str}\n\nMy new query is below.",
|
|
227
|
+
}
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
query_text = " ".join(args.query)
|
|
231
|
+
messages.append({"role": "user", "content": query_text})
|
|
232
|
+
return messages
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def print_answers(ids_str: str, summarize: bool) -> None:
|
|
236
|
+
"""Print answers for specific history IDs."""
|
|
237
|
+
try:
|
|
238
|
+
ids = [int(x.strip()) for x in ids_str.split(",")]
|
|
239
|
+
except ValueError:
|
|
240
|
+
print("Error: Invalid ID format. Use comma-separated integers.")
|
|
241
|
+
return
|
|
242
|
+
|
|
243
|
+
context = get_interaction_context(ids, full=not summarize)
|
|
244
|
+
if not context:
|
|
245
|
+
print("No records found for the given IDs.")
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
print(f"\n[Retrieving answers for IDs: {ids_str}]\n")
|
|
249
|
+
print("-" * 60)
|
|
250
|
+
if is_markdown(context):
|
|
251
|
+
console = Console()
|
|
252
|
+
console.print(Markdown(context))
|
|
253
|
+
else:
|
|
254
|
+
print(context)
|
|
255
|
+
print("-" * 60)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def handle_cleanup(args: argparse.Namespace) -> bool:
|
|
259
|
+
"""Handle cleanup command. Returns True if cleanup was performed."""
|
|
260
|
+
if args.cleanup_db or (args.cleanup_db is None and args.all):
|
|
261
|
+
target = args.cleanup_db
|
|
262
|
+
if args.all:
|
|
263
|
+
cleanup_db(None, delete_all=True)
|
|
264
|
+
elif target and target != "interactive":
|
|
265
|
+
cleanup_db(target)
|
|
266
|
+
else:
|
|
267
|
+
print("Error: Please specify target IDs (e.g. 1, 1-5, 1,3) or use --all")
|
|
268
|
+
return True
|
|
269
|
+
return False
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def expand_query_text(text: str, verbose: bool = False) -> str:
|
|
273
|
+
"""Recursively expand slash commands like /cp and predefined prompts."""
|
|
274
|
+
expanded = text
|
|
275
|
+
max_depth = 5
|
|
276
|
+
depth = 0
|
|
277
|
+
|
|
278
|
+
while depth < max_depth:
|
|
279
|
+
original = expanded
|
|
280
|
+
|
|
281
|
+
# 1. Expand /cp
|
|
282
|
+
if "/cp" in expanded:
|
|
283
|
+
try:
|
|
284
|
+
clipboard_content = pyperclip.paste()
|
|
285
|
+
if clipboard_content:
|
|
286
|
+
expanded = expanded.replace("/cp", clipboard_content)
|
|
287
|
+
if verbose:
|
|
288
|
+
print("[Expanded /cp from clipboard]")
|
|
289
|
+
else:
|
|
290
|
+
if verbose:
|
|
291
|
+
print("[Warning: Clipboard is empty, /cp not expanded]")
|
|
292
|
+
except Exception as e:
|
|
293
|
+
if verbose:
|
|
294
|
+
print(f"[Error reading clipboard: {e}]")
|
|
295
|
+
|
|
296
|
+
# 2. Expand predefined prompts from USER_PROMPTS
|
|
297
|
+
# Pattern: /command followed by space or end of string
|
|
298
|
+
# We use a pattern that matches /word but avoids matching /cp if we already handled it
|
|
299
|
+
# Actually, let's just iterate over USER_PROMPTS keys
|
|
300
|
+
for key, prompt_val in USER_PROMPTS.items():
|
|
301
|
+
pattern = rf"/{re.escape(key)}(\s|$)"
|
|
302
|
+
if re.search(pattern, expanded):
|
|
303
|
+
expanded = re.sub(pattern, rf"{prompt_val}\1", expanded)
|
|
304
|
+
if verbose:
|
|
305
|
+
print(f"[Expanded Prompt '/{key}']")
|
|
306
|
+
|
|
307
|
+
if expanded == original:
|
|
308
|
+
break
|
|
309
|
+
depth += 1
|
|
310
|
+
|
|
311
|
+
return expanded.strip()
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def handle_print_answer_implicit(args: argparse.Namespace) -> bool:
|
|
315
|
+
"""Handle implicit print answer (query is list of ints). Returns True if handled."""
|
|
316
|
+
if not args.query:
|
|
317
|
+
return False
|
|
318
|
+
|
|
319
|
+
query_str = " ".join(args.query).strip()
|
|
320
|
+
if re.match(r"^(\d+\s*,?\s*)+$", query_str):
|
|
321
|
+
possible_ids = re.split(r"[,\s]+", query_str)
|
|
322
|
+
possible_ids = [x for x in possible_ids if x]
|
|
323
|
+
try:
|
|
324
|
+
clean_ids_str = ",".join(possible_ids)
|
|
325
|
+
print_answers(clean_ids_str, args.summarize)
|
|
326
|
+
return True
|
|
327
|
+
except ValueError:
|
|
328
|
+
pass
|
|
329
|
+
return False
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def main() -> None:
|
|
333
|
+
"""Main entry point for the CLI."""
|
|
334
|
+
args = parse_args()
|
|
335
|
+
|
|
336
|
+
# Initialize DB before any DB operations (like get_db_record_count for the banner)
|
|
337
|
+
init_db()
|
|
338
|
+
|
|
339
|
+
# Show banner for real queries (not history, cleanup, prompts, or implicit print)
|
|
340
|
+
if not (
|
|
341
|
+
args.history is not None
|
|
342
|
+
or args.cleanup_db
|
|
343
|
+
or (args.cleanup_db is None and args.all)
|
|
344
|
+
or args.print_ids
|
|
345
|
+
or args.prompts
|
|
346
|
+
):
|
|
347
|
+
query_str = " ".join(args.query).strip()
|
|
348
|
+
is_implicit_print = bool(re.match(r"^(\d+\s*,?\s*)+$", query_str))
|
|
349
|
+
|
|
350
|
+
if not is_implicit_print and args.query:
|
|
351
|
+
model_alias = args.model
|
|
352
|
+
model_id = MODELS[model_alias]["id"]
|
|
353
|
+
sum_alias = SUMMARIZATION_MODEL
|
|
354
|
+
sum_id = MODELS[sum_alias]["id"]
|
|
355
|
+
|
|
356
|
+
model_ctx = MODELS[model_alias].get("context_size", DEFAULT_CONTEXT_SIZE)
|
|
357
|
+
sum_ctx = MODELS[sum_alias].get("context_size", DEFAULT_CONTEXT_SIZE)
|
|
358
|
+
db_count = get_db_record_count()
|
|
359
|
+
|
|
360
|
+
banner = get_banner(
|
|
361
|
+
model_alias,
|
|
362
|
+
model_id,
|
|
363
|
+
sum_alias,
|
|
364
|
+
sum_id,
|
|
365
|
+
DEFAULT_MODEL,
|
|
366
|
+
SEARCH_PROVIDER,
|
|
367
|
+
model_ctx,
|
|
368
|
+
sum_ctx,
|
|
369
|
+
MAX_TURNS,
|
|
370
|
+
db_count,
|
|
371
|
+
)
|
|
372
|
+
Console().print(banner)
|
|
373
|
+
|
|
374
|
+
if args.verbose:
|
|
375
|
+
print("\n=== CONFIGURATION ===")
|
|
376
|
+
print(f"Selected Model: {args.model}")
|
|
377
|
+
print(f"Deep Research: {args.deep_research}")
|
|
378
|
+
print(f"Deep Dive: {args.deep_dive}")
|
|
379
|
+
print(f"Summarize: {args.summarize}")
|
|
380
|
+
print(f"Force Search: {args.force_search}")
|
|
381
|
+
print("-" * 20)
|
|
382
|
+
|
|
383
|
+
print(f"DEFAULT_MODEL: {DEFAULT_MODEL}")
|
|
384
|
+
print(f"MAX_TURNS: {MAX_TURNS}")
|
|
385
|
+
print(f"QUERY_SUMMARY_MAX_CHARS: {QUERY_SUMMARY_MAX_CHARS}")
|
|
386
|
+
print(f"ANSWER_SUMMARY_MAX_CHARS: {ANSWER_SUMMARY_MAX_CHARS}")
|
|
387
|
+
print("-" * 20)
|
|
388
|
+
print("MODELS Config:")
|
|
389
|
+
for m_alias, m_conf in MODELS.items():
|
|
390
|
+
print(f" [{m_alias}]: {m_conf['id']}")
|
|
391
|
+
for k, v in m_conf.items():
|
|
392
|
+
if k == "id":
|
|
393
|
+
continue
|
|
394
|
+
|
|
395
|
+
# Special handling for api_key_env
|
|
396
|
+
if k == "api_key_env":
|
|
397
|
+
print(f" {k}: {v}")
|
|
398
|
+
# Check if env var is set
|
|
399
|
+
env_val = os.environ.get(v)
|
|
400
|
+
if env_val:
|
|
401
|
+
masked = (
|
|
402
|
+
env_val[:5] + "..." + env_val[-4:]
|
|
403
|
+
if len(env_val) > 10
|
|
404
|
+
else "***"
|
|
405
|
+
)
|
|
406
|
+
print(f" [Status]: SET ({masked})")
|
|
407
|
+
else:
|
|
408
|
+
print(" [Status]: NOT SET")
|
|
409
|
+
continue
|
|
410
|
+
|
|
411
|
+
if "key" in k.lower() and v and k != "api_key_env":
|
|
412
|
+
# Mask key directly
|
|
413
|
+
masked = v[:5] + "..." + v[-4:] if len(v) > 10 else "***"
|
|
414
|
+
print(f" {k}: {masked}")
|
|
415
|
+
else:
|
|
416
|
+
print(f" {k}: {v}")
|
|
417
|
+
print("=====================\n")
|
|
418
|
+
|
|
419
|
+
# init_db() was moved to the top of main()
|
|
420
|
+
|
|
421
|
+
# Handle History Request
|
|
422
|
+
if args.history is not None:
|
|
423
|
+
show_history(args.history)
|
|
424
|
+
return
|
|
425
|
+
|
|
426
|
+
# Handle Cleanup
|
|
427
|
+
if handle_cleanup(args):
|
|
428
|
+
return
|
|
429
|
+
|
|
430
|
+
# Handle Explicit Print Answer
|
|
431
|
+
if args.print_ids:
|
|
432
|
+
print_answers(args.print_ids, args.summarize)
|
|
433
|
+
return
|
|
434
|
+
|
|
435
|
+
# Handle Implicit Print Answer (query is list of ints)
|
|
436
|
+
if handle_print_answer_implicit(args):
|
|
437
|
+
return
|
|
438
|
+
|
|
439
|
+
if args.prompts:
|
|
440
|
+
print("\n=== USER PROMPTS ===")
|
|
441
|
+
if USER_PROMPTS:
|
|
442
|
+
for key, prompt in USER_PROMPTS.items():
|
|
443
|
+
print(f" /{key:<10} : {prompt}")
|
|
444
|
+
else:
|
|
445
|
+
print(" (No prompts configured)")
|
|
446
|
+
print("====================\n")
|
|
447
|
+
return
|
|
448
|
+
|
|
449
|
+
if not args.query:
|
|
450
|
+
print(
|
|
451
|
+
"Error: Query argument is required unless -H/--history or --prompts is used."
|
|
452
|
+
)
|
|
453
|
+
return
|
|
454
|
+
|
|
455
|
+
# Join query tokens and expand slash commands
|
|
456
|
+
query_text = " ".join(args.query)
|
|
457
|
+
query_text = expand_query_text(query_text, verbose=args.verbose)
|
|
458
|
+
|
|
459
|
+
# Update args.query to the expanded version for later use (saving interaction)
|
|
460
|
+
# Note: args.query was a list, we'll keep it as a list with one element for compatibility
|
|
461
|
+
args.query = [query_text]
|
|
462
|
+
|
|
463
|
+
# Handle Context
|
|
464
|
+
context_str = ""
|
|
465
|
+
if args.continue_ids:
|
|
466
|
+
context_str = load_context(args.continue_ids, args.summarize)
|
|
467
|
+
if context_str is None:
|
|
468
|
+
return
|
|
469
|
+
|
|
470
|
+
messages = build_messages(args, context_str)
|
|
471
|
+
model_config = MODELS[args.model]
|
|
472
|
+
|
|
473
|
+
usage_tracker = UsageTracker()
|
|
474
|
+
final_answer = run_conversation_loop(
|
|
475
|
+
model_config,
|
|
476
|
+
messages,
|
|
477
|
+
args.summarize,
|
|
478
|
+
verbose=args.verbose,
|
|
479
|
+
usage_tracker=usage_tracker,
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
# Save Interaction
|
|
483
|
+
if final_answer:
|
|
484
|
+
print("\n[Saving interaction...]")
|
|
485
|
+
query_text = " ".join(args.query)
|
|
486
|
+
query_summary, answer_summary = generate_summaries(
|
|
487
|
+
query_text, final_answer, usage_tracker=usage_tracker
|
|
488
|
+
)
|
|
489
|
+
save_interaction(
|
|
490
|
+
query_text, final_answer, args.model, query_summary, answer_summary
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
# Print Session Usage Report
|
|
494
|
+
if usage_tracker.usage:
|
|
495
|
+
print("\n=== SESSION TOKEN USAGE ===")
|
|
496
|
+
total_session_tokens = 0
|
|
497
|
+
for m_alias, tokens in usage_tracker.usage.items():
|
|
498
|
+
print(f" {m_alias:<15}: {tokens:,} tokens")
|
|
499
|
+
total_session_tokens += tokens
|
|
500
|
+
print("-" * 30)
|
|
501
|
+
print(f" {'TOTAL':<15}: {total_session_tokens:,} tokens")
|
|
502
|
+
print("===========================\n")
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
if __name__ == "__main__":
|
|
506
|
+
main()
|