asky-cli 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- asky/__init__.py +7 -0
- asky/__main__.py +6 -0
- asky/banner.py +123 -0
- asky/cli.py +506 -0
- asky/config.py +270 -0
- asky/config.toml +226 -0
- asky/html.py +62 -0
- asky/llm.py +378 -0
- asky/storage.py +157 -0
- asky/tools.py +314 -0
- asky_cli-0.1.6.dist-info/METADATA +290 -0
- asky_cli-0.1.6.dist-info/RECORD +14 -0
- asky_cli-0.1.6.dist-info/WHEEL +4 -0
- asky_cli-0.1.6.dist-info/entry_points.txt +3 -0
asky/tools.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
"""Tool execution functions for web search and URL content retrieval."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import requests
|
|
6
|
+
import subprocess
|
|
7
|
+
import time
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Any, Dict, List
|
|
10
|
+
|
|
11
|
+
from asky.config import (
|
|
12
|
+
SEARXNG_URL,
|
|
13
|
+
MODELS,
|
|
14
|
+
SUMMARIZATION_MODEL,
|
|
15
|
+
SEARCH_PROVIDER,
|
|
16
|
+
SERPER_API_URL,
|
|
17
|
+
SERPER_API_KEY_ENV,
|
|
18
|
+
CUSTOM_TOOLS,
|
|
19
|
+
USER_AGENT,
|
|
20
|
+
)
|
|
21
|
+
from asky.html import HTMLStripper, strip_tags, strip_think_tags
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Track URLs that have been read in the current session
|
|
25
|
+
read_urls: List[str] = []
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def reset_read_urls() -> None:
|
|
29
|
+
"""Reset the list of read URLs for a new session."""
|
|
30
|
+
global read_urls
|
|
31
|
+
read_urls = []
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _execute_searxng_search(q: str, count: int) -> Dict[str, Any]:
|
|
35
|
+
"""Execute a web search using SearXNG."""
|
|
36
|
+
# Ensure no trailing slash on SEARXNG_URL
|
|
37
|
+
base_url = SEARXNG_URL.rstrip("/")
|
|
38
|
+
try:
|
|
39
|
+
headers = {
|
|
40
|
+
"User-Agent": USER_AGENT,
|
|
41
|
+
"Accept": "application/json",
|
|
42
|
+
}
|
|
43
|
+
resp = requests.get(
|
|
44
|
+
f"{base_url}/search",
|
|
45
|
+
params={"q": q, "format": "json"},
|
|
46
|
+
headers=headers,
|
|
47
|
+
timeout=20,
|
|
48
|
+
)
|
|
49
|
+
if resp.status_code != 200:
|
|
50
|
+
return {"error": f"SearXNG error {resp.status_code}: {resp.text[:200]}"}
|
|
51
|
+
|
|
52
|
+
data = resp.json()
|
|
53
|
+
results = []
|
|
54
|
+
for x in data.get("results", [])[:count]:
|
|
55
|
+
results.append(
|
|
56
|
+
{
|
|
57
|
+
"title": strip_tags(x.get("title", "")),
|
|
58
|
+
"url": x.get("url"),
|
|
59
|
+
"snippet": strip_tags(x.get("content", ""))[:400],
|
|
60
|
+
"engine": x.get("engine"),
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
return {"results": results}
|
|
64
|
+
except Exception as e:
|
|
65
|
+
return {"error": f"SearXNG search failed: {str(e)}"}
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _execute_serper_search(q: str, count: int) -> Dict[str, Any]:
|
|
69
|
+
"""Execute a web search using Serper API."""
|
|
70
|
+
api_key = os.environ.get(SERPER_API_KEY_ENV)
|
|
71
|
+
if not api_key:
|
|
72
|
+
return {
|
|
73
|
+
"error": f"Serper API key not found in environment variable {SERPER_API_KEY_ENV}"
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
headers = {
|
|
78
|
+
"X-API-KEY": api_key,
|
|
79
|
+
"Content-Type": "application/json",
|
|
80
|
+
}
|
|
81
|
+
payload = json.dumps({"q": q, "num": count})
|
|
82
|
+
resp = requests.post(
|
|
83
|
+
SERPER_API_URL,
|
|
84
|
+
headers=headers,
|
|
85
|
+
data=payload,
|
|
86
|
+
timeout=20,
|
|
87
|
+
)
|
|
88
|
+
resp.raise_for_status()
|
|
89
|
+
data = resp.json()
|
|
90
|
+
results = []
|
|
91
|
+
# Support both 'organic' and 'knowledgeGraph' if present
|
|
92
|
+
for x in data.get("organic", [])[:count]:
|
|
93
|
+
results.append(
|
|
94
|
+
{
|
|
95
|
+
"title": strip_tags(x.get("title", "")),
|
|
96
|
+
"url": x.get("link"),
|
|
97
|
+
"snippet": strip_tags(x.get("snippet", ""))[:400],
|
|
98
|
+
"engine": "serper",
|
|
99
|
+
}
|
|
100
|
+
)
|
|
101
|
+
return {"results": results}
|
|
102
|
+
except Exception as e:
|
|
103
|
+
return {"error": f"Serper search failed: {str(e)}"}
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def execute_web_search(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
107
|
+
"""Execute a web search using the configured provider."""
|
|
108
|
+
q = args.get("q", "")
|
|
109
|
+
count = args.get("count", 5)
|
|
110
|
+
|
|
111
|
+
if SEARCH_PROVIDER == "serper":
|
|
112
|
+
return _execute_serper_search(q, count)
|
|
113
|
+
else:
|
|
114
|
+
return _execute_searxng_search(q, count)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def summarize_text(text: str) -> str:
|
|
118
|
+
"""Summarize text using the defined summarization model."""
|
|
119
|
+
# Import here to avoid circular dependency
|
|
120
|
+
from asky.llm import get_llm_msg
|
|
121
|
+
|
|
122
|
+
if not text:
|
|
123
|
+
return ""
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
messages = [
|
|
127
|
+
{
|
|
128
|
+
"role": "system",
|
|
129
|
+
"content": "You are a helpful assistant. Summarize the following text concisely, focusing on key facts.",
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
"role": "user",
|
|
133
|
+
"content": f"Text to summarize:\n\n{text[:50000]}",
|
|
134
|
+
},
|
|
135
|
+
]
|
|
136
|
+
|
|
137
|
+
model_id = MODELS[SUMMARIZATION_MODEL]["id"]
|
|
138
|
+
msg = get_llm_msg(model_id, messages, tools=None)
|
|
139
|
+
return strip_think_tags(msg.get("content", ""))
|
|
140
|
+
except Exception as e:
|
|
141
|
+
return f"[Error in summarization: {str(e)}]"
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _sanitize_url(url: str) -> str:
|
|
145
|
+
"""Remove artifacts like shell-escaped backslashes from URLs."""
|
|
146
|
+
if not url:
|
|
147
|
+
return ""
|
|
148
|
+
# Remove backslashes which are often artifacts of shell-escaping parentheses or special chars
|
|
149
|
+
return url.replace("\\", "")
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def fetch_single_url(
|
|
153
|
+
url: str, max_chars: int, summarize: bool = False
|
|
154
|
+
) -> Dict[str, str]:
|
|
155
|
+
"""Fetch content from a single URL."""
|
|
156
|
+
url = _sanitize_url(url)
|
|
157
|
+
global read_urls
|
|
158
|
+
if url in read_urls:
|
|
159
|
+
return {url: "Error: Already read this URL."}
|
|
160
|
+
try:
|
|
161
|
+
headers = {"User-Agent": USER_AGENT}
|
|
162
|
+
resp = requests.get(url, headers=headers, timeout=20)
|
|
163
|
+
resp.raise_for_status()
|
|
164
|
+
content = strip_tags(resp.text)
|
|
165
|
+
|
|
166
|
+
if summarize:
|
|
167
|
+
content = f"Summary of {url}:\n" + summarize_text(content)
|
|
168
|
+
else:
|
|
169
|
+
content = content[:max_chars]
|
|
170
|
+
|
|
171
|
+
read_urls.append(url)
|
|
172
|
+
return {url: content}
|
|
173
|
+
except Exception as e:
|
|
174
|
+
return {url: f"Error: {str(e)}"}
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def execute_get_url_content(
|
|
178
|
+
args: Dict[str, Any], max_chars: int, summarize: bool
|
|
179
|
+
) -> Dict[str, Any]:
|
|
180
|
+
"""Fetch content from one or more URLs."""
|
|
181
|
+
header_url = args.get("url")
|
|
182
|
+
urls = args.get("urls", [])
|
|
183
|
+
|
|
184
|
+
# LLM can override the global summarize flag in its tool call
|
|
185
|
+
effective_summarize = args.get("summarize", summarize)
|
|
186
|
+
|
|
187
|
+
# Support both single 'url' and list 'urls'
|
|
188
|
+
if header_url:
|
|
189
|
+
urls.append(header_url)
|
|
190
|
+
|
|
191
|
+
# Deduplicate and filter empty
|
|
192
|
+
urls = list(set([u for u in urls if u]))
|
|
193
|
+
|
|
194
|
+
if not urls:
|
|
195
|
+
return {"error": "No URLs provided."}
|
|
196
|
+
|
|
197
|
+
results = {}
|
|
198
|
+
for i, url in enumerate(urls):
|
|
199
|
+
if i > 0 and effective_summarize:
|
|
200
|
+
# Small delay between summarizations to avoid hitting RPM limits
|
|
201
|
+
time.sleep(1)
|
|
202
|
+
results.update(fetch_single_url(url, max_chars, effective_summarize))
|
|
203
|
+
|
|
204
|
+
return results
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def execute_get_url_details(args: Dict[str, Any], max_chars: int) -> Dict[str, Any]:
|
|
208
|
+
"""Fetch content and extract links from a URL."""
|
|
209
|
+
url = _sanitize_url(args.get("url", ""))
|
|
210
|
+
global read_urls
|
|
211
|
+
if url in read_urls:
|
|
212
|
+
return {"error": "You have already read this URL."}
|
|
213
|
+
try:
|
|
214
|
+
headers = {"User-Agent": USER_AGENT}
|
|
215
|
+
resp = requests.get(url, headers=headers, timeout=20)
|
|
216
|
+
resp.raise_for_status()
|
|
217
|
+
s = HTMLStripper()
|
|
218
|
+
s.feed(resp.text)
|
|
219
|
+
|
|
220
|
+
# Mark as read only after successful fetch
|
|
221
|
+
read_urls.append(url)
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
"content": s.get_data()[:max_chars],
|
|
225
|
+
"links": s.get_links()[:50], # Limit links to avoid context overflow
|
|
226
|
+
"system_note": "IMPORTANT: Do NOT use get_url_details again. Use get_url_content to read links.",
|
|
227
|
+
}
|
|
228
|
+
except Exception as e:
|
|
229
|
+
return {"error": f"Failed to fetch details: {str(e)}"}
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def execute_get_date_time() -> Dict[str, Any]:
|
|
233
|
+
"""Return the current date and time."""
|
|
234
|
+
return {"date_time": datetime.now().isoformat()}
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def _execute_custom_tool(name: str, args: Dict[str, Any]) -> Dict[str, Any]:
|
|
238
|
+
"""Execute a custom tool defined in config.toml."""
|
|
239
|
+
tool_cfg = CUSTOM_TOOLS.get(name)
|
|
240
|
+
if not tool_cfg:
|
|
241
|
+
return {"error": f"Custom tool configuration for '{name}' not found."}
|
|
242
|
+
|
|
243
|
+
cmd_base = tool_cfg.get("command", "")
|
|
244
|
+
if not cmd_base:
|
|
245
|
+
return {"error": f"No command defined for custom tool '{name}'."}
|
|
246
|
+
|
|
247
|
+
# Prepare arguments:
|
|
248
|
+
# 1. Get defaults from config
|
|
249
|
+
props = tool_cfg.get("parameters", {}).get("properties", {})
|
|
250
|
+
processed_args = {}
|
|
251
|
+
|
|
252
|
+
# 2. Merge defaults with provided args
|
|
253
|
+
# Note: LLM might already handle defaults, but we ensure it here
|
|
254
|
+
for k, p in props.items():
|
|
255
|
+
val = args.get(k)
|
|
256
|
+
if val is None:
|
|
257
|
+
val = p.get("default")
|
|
258
|
+
|
|
259
|
+
if val is not None:
|
|
260
|
+
# Remove existing double quotes and wrap in new ones
|
|
261
|
+
clean_val = str(val).replace('"', "")
|
|
262
|
+
processed_args[k] = f'"{clean_val}"'
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
# Check if the command uses placeholders
|
|
266
|
+
if "{" in cmd_base and "}" in cmd_base:
|
|
267
|
+
try:
|
|
268
|
+
cmd_str = cmd_base.format(**processed_args)
|
|
269
|
+
except KeyError as e:
|
|
270
|
+
return {"error": f"Missing parameter required by command template: {e}"}
|
|
271
|
+
else:
|
|
272
|
+
# Append arguments in order of appearance in properties mapping
|
|
273
|
+
arg_list = []
|
|
274
|
+
for k in props.keys():
|
|
275
|
+
if k in processed_args:
|
|
276
|
+
arg_list.append(processed_args[k])
|
|
277
|
+
|
|
278
|
+
cmd_str = f"{cmd_base} {' '.join(arg_list)}".strip()
|
|
279
|
+
|
|
280
|
+
print(f"Executing custom tool command: {cmd_str}")
|
|
281
|
+
result = subprocess.run(
|
|
282
|
+
cmd_str, shell=True, capture_output=True, text=True, timeout=30
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
return {
|
|
286
|
+
"stdout": result.stdout.strip(),
|
|
287
|
+
"stderr": result.stderr.strip(),
|
|
288
|
+
"exit_code": result.returncode,
|
|
289
|
+
}
|
|
290
|
+
except Exception as e:
|
|
291
|
+
return {"error": f"Failed to execute custom tool '{name}': {str(e)}"}
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def dispatch_tool_call(
|
|
295
|
+
call: Dict[str, Any], max_chars: int, summarize: bool
|
|
296
|
+
) -> Dict[str, Any]:
|
|
297
|
+
"""Dispatch a tool call to the appropriate executor."""
|
|
298
|
+
func = call["function"]
|
|
299
|
+
name = func["name"]
|
|
300
|
+
args = json.loads(func["arguments"]) if func.get("arguments") else {}
|
|
301
|
+
print(f"Dispatching tool call: {name} with args {args}")
|
|
302
|
+
if name == "web_search":
|
|
303
|
+
return execute_web_search(args)
|
|
304
|
+
if name == "get_url_content":
|
|
305
|
+
return execute_get_url_content(args, max_chars, summarize)
|
|
306
|
+
if name == "get_url_details":
|
|
307
|
+
return execute_get_url_details(args, max_chars)
|
|
308
|
+
if name == "get_date_time":
|
|
309
|
+
return execute_get_date_time()
|
|
310
|
+
|
|
311
|
+
if name in CUSTOM_TOOLS:
|
|
312
|
+
return _execute_custom_tool(name, args)
|
|
313
|
+
|
|
314
|
+
return {"error": f"Unknown tool: {name}"}
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: asky-cli
|
|
3
|
+
Version: 0.1.6
|
|
4
|
+
Summary: AI-powered web search CLI with LLM tool-calling capabilities
|
|
5
|
+
License-Expression: MIT
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Requires-Dist: pyperclip>=1.11.0
|
|
8
|
+
Requires-Dist: requests
|
|
9
|
+
Requires-Dist: rich
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
<img src="assets/asearch_icon.png" alt="asky icon" width="200" align="right">
|
|
14
|
+
|
|
15
|
+
<font size="6">**asky**</font>
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
AI-powered web search CLI with LLM tool-calling capabilities.
|
|
19
|
+
|
|
20
|
+
asky (can be invoked as `asky` or `ask`) is a powerful command-line interface that brings AI-powered search and research capabilities directly to your terminal. It uses LLMs and tools to synthesize answers from the web (or from files and apps you have on your computer).
|
|
21
|
+
## Key Features
|
|
22
|
+
|
|
23
|
+
- **Multi-Model Support**: Easily define and switch between various LLMs and providers that supports OpenAI compatible API.
|
|
24
|
+
- **Tool-Calling Integration**: Models can autonomously perform web searches (via SearXNG or Serper API), fetch URL content, and get current date/time to provide accurate, up-to-date answers.
|
|
25
|
+
- **Intelligent Content Fetching**: Automatically strips HTML noise (scripts, styles) to provide clean text context to the models.
|
|
26
|
+
- **Conversation History**: Maintains a local SQLite database of your queries and answers, allowing for context-aware follow-up questions.
|
|
27
|
+
- **Deep Research Mode**: Automatically performs multiple searches to provide comprehensive analysis of complex topics.
|
|
28
|
+
- **Deep Dive Mode**: Recursively explores links found on web pages for in-depth information gathering.
|
|
29
|
+
- **Predefined Prompts**: Save and quickly invoke common prompt patterns using simple slashes (e.g., `/gn` for get latest news from The Guardian).
|
|
30
|
+
- **Clipboard Integration**: Use `/cp` to expand the query with clipboard content.
|
|
31
|
+
- **Custom Tools**: Expose any CLI command as a tool for the LLM. Define your own commands and parameters in `config.toml`.
|
|
32
|
+
|
|
33
|
+
## How it Works
|
|
34
|
+
|
|
35
|
+
1. **User Query**: You provide a query to the `ask` command.
|
|
36
|
+
2. **Model Selection**: asky initializes the selected LLM based on your configuration.
|
|
37
|
+
3. **Tool Loop**: The LLM analyzes your query. If it needs real-world data, it calls integrated tools (like `web_search`).
|
|
38
|
+
4. **Context Synthesis**: asky fetches the data, cleans it, and feeds it back to the LLM. This process can repeat for up to 15 turns for complex research.
|
|
39
|
+
5. **Final Answer**: The LLM synthesizes all gathered information into a concise, formatted response.
|
|
40
|
+
6. **Persistence**: The interaction is saved to your local history for future reference.
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
## Installation
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
pip install ask-cli
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Or install from source:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
pip install -e .
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Usage
|
|
56
|
+
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
# Basic query
|
|
60
|
+
ask what is the weather in Berlin
|
|
61
|
+
|
|
62
|
+
# Continue from previous query (by ID)
|
|
63
|
+
ask -c 1 tell me more about that
|
|
64
|
+
|
|
65
|
+
# Continue from last query (relative ID)
|
|
66
|
+
ask -c~1 explain more
|
|
67
|
+
# OR
|
|
68
|
+
ask -c "~2" what about the one before that?
|
|
69
|
+
|
|
70
|
+
> [!NOTE]
|
|
71
|
+
> **Zsh Users**: When using `~` for relative IDs, you must either quote the value (e.g., `ask -c "~1"`) or place it immediately after the flag without a space (e.g., `ask -c~1`). If you use a space without quotes (e.g., `ask -c ~1`), zsh will attempt to expand it as a directory stack entry.
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
➜ ~ ask -p
|
|
75
|
+
|
|
76
|
+
=== USER PROMPTS ===
|
|
77
|
+
/gn : Give me latest news from The Guardian, use https://www.theguardian.com/europe
|
|
78
|
+
/wh : how is weather in
|
|
79
|
+
====================
|
|
80
|
+
|
|
81
|
+
➜ ~ ask /wh delft
|
|
82
|
+
Dispatching tool call: web_search with args {'q': 'weather in Delft'}
|
|
83
|
+
Dispatching tool call: get_url_content with args {'urls': ...}
|
|
84
|
+
|
|
85
|
+
The weather in **Delft, South Holland, Netherlands** is currently **45°F and Cloudy with Showers in the Vicinity** (as of 4:20 pm CET).
|
|
86
|
+
|
|
87
|
+
Here is the forecast for today and the next couple of days:
|
|
88
|
+
|
|
89
|
+
...
|
|
90
|
+
|
|
91
|
+
Query completed in 3.88 seconds
|
|
92
|
+
|
|
93
|
+
--------------------------------------------------------------------------------
|
|
94
|
+
➜ ~ ask --help
|
|
95
|
+
usage: ask [-h] [-m {gf,glmair,glmflash,q34t,q34,lfm,q8,q30,onano,omini}] [-d [DEEP_RESEARCH]] [-dd] [-c CONTINUE_IDS] [-s] [-fs] [--cleanup-db [CLEANUP_DB]] [--all]
|
|
96
|
+
[-H [HISTORY]] [-pa PRINT_IDS] [-p] [-v]
|
|
97
|
+
[query ...]
|
|
98
|
+
|
|
99
|
+
Tool-calling CLI with model selection.
|
|
100
|
+
|
|
101
|
+
positional arguments:
|
|
102
|
+
query The query string
|
|
103
|
+
|
|
104
|
+
options:
|
|
105
|
+
-h, --help show this help message and exit
|
|
106
|
+
-m, --model {gf,glmair,glmflash,q34t,q34,lfm,q8,q30,onano,omini}
|
|
107
|
+
Select the model alias
|
|
108
|
+
-d, --deep-research [DEEP_RESEARCH]
|
|
109
|
+
Enable deep research mode (optional: specify min number of queries, default 5)
|
|
110
|
+
-dd, --deep-dive Enable deep dive mode (extracts links and encourages reading more pages from same domain)
|
|
111
|
+
-c, --continue-chat CONTINUE_IDS
|
|
112
|
+
Continue conversation with context from specific history IDs (comma-separated, e.g. '1,2').
|
|
113
|
+
-s, --summarize Enable summarize mode (summarizes URL content and uses summaries for chat context)
|
|
114
|
+
-fs, --force-search Force the model to use web search (default: False).
|
|
115
|
+
Helpful for avoiding hallucinations with small models
|
|
116
|
+
--cleanup-db [CLEANUP_DB]
|
|
117
|
+
Delete history records. usage: --cleanup-db [ID|ID-ID|ID,ID] or --cleanup-db --all
|
|
118
|
+
--all Used with --cleanup-db to delete ALL history.
|
|
119
|
+
-H, --history [HISTORY]
|
|
120
|
+
Show last N queries and answer summaries (default 10).
|
|
121
|
+
Use with --print-answer to print the full answer(s).
|
|
122
|
+
-pa, --print-answer PRINT_IDS
|
|
123
|
+
Print the answer(s) for specific history IDs (comma-separated).
|
|
124
|
+
-p, --prompts List all configured user prompts.
|
|
125
|
+
-v, --verbose Enable verbose output (prints config and LLM inputs).
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
**Deep research mode** (encourages model to perform multiple searches)
|
|
129
|
+
|
|
130
|
+
ask -d 5 comprehensive analysis of topic
|
|
131
|
+
|
|
132
|
+
**Deep dive mode** (encourages model to read multiple pages from same domain)
|
|
133
|
+
|
|
134
|
+
ask -dd https://example.com
|
|
135
|
+
|
|
136
|
+
**Use a specific model**
|
|
137
|
+
|
|
138
|
+
ask -m gf what is quantum computing
|
|
139
|
+
|
|
140
|
+
**Force web search**
|
|
141
|
+
|
|
142
|
+
ask -fs latest news on topic
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
**Pre-configured model definitions**
|
|
146
|
+
|
|
147
|
+
Followin model definitions ship with default config.toml, but you can add any number of models that are served with an OpenAI compatible API.
|
|
148
|
+
|
|
149
|
+
- `gf` - Google Gemini Flash (default)
|
|
150
|
+
- `lfm` - Liquid LFM 2.5
|
|
151
|
+
- `q8` - Qwen3 8B
|
|
152
|
+
- `q30` - Qwen3 30B
|
|
153
|
+
- `q34` - Qwen3 4B
|
|
154
|
+
- `q34t` - Qwen3 4B Thinking
|
|
155
|
+
|
|
156
|
+
## Custom Tools
|
|
157
|
+
|
|
158
|
+
You can define your own tools in `config.toml` that the LLM can use to interact with your local system. Each tool runs a CLI command and returns the output to the LLM.
|
|
159
|
+
|
|
160
|
+
Example configuration for a `list_dir` tool:
|
|
161
|
+
|
|
162
|
+
```toml
|
|
163
|
+
[tool.list_dir]
|
|
164
|
+
command = "ls"
|
|
165
|
+
description = "List the contents of a directory."
|
|
166
|
+
|
|
167
|
+
[tool.list_dir.parameters]
|
|
168
|
+
type = "object"
|
|
169
|
+
required = ["path"]
|
|
170
|
+
|
|
171
|
+
[tool.list_dir.parameters.properties.path]
|
|
172
|
+
type = "string"
|
|
173
|
+
default = "."
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
Example configuration for a `grep_search` tool:
|
|
177
|
+
|
|
178
|
+
```toml
|
|
179
|
+
[tool.grep_search]
|
|
180
|
+
command = "grep -r {pattern} {path}"
|
|
181
|
+
description = "Search for a pattern in files recursively."
|
|
182
|
+
|
|
183
|
+
[tool.grep_search.parameters]
|
|
184
|
+
type = "object"
|
|
185
|
+
required = ["pattern"]
|
|
186
|
+
|
|
187
|
+
[tool.grep_search.parameters.properties.pattern]
|
|
188
|
+
type = "string"
|
|
189
|
+
description = "The regex pattern to search for."
|
|
190
|
+
|
|
191
|
+
[tool.grep_search.parameters.properties.path]
|
|
192
|
+
type = "string"
|
|
193
|
+
description = "The directory path to search in."
|
|
194
|
+
default = "."
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
> [!CAUTION]
|
|
198
|
+
> **Security Risk**: Custom tools execute commands using your system shell. While asky attempts to quote arguments safely, exposing powerful CLI tools to an LLM can be risky. Use this feature with caution.
|
|
199
|
+
|
|
200
|
+
### How it works:
|
|
201
|
+
- **Placeholders**: Use `{param_name}` in the `command` string to inject arguments. If no placeholders are found, arguments are appended to the command.
|
|
202
|
+
- **Quoting**: All arguments are automatically cleaned (inner double-quotes removed) and wrapped in double-quotes for safety.
|
|
203
|
+
- **Execution**: Commands are executed via terminal shell, allowing for advanced piping and redirection.
|
|
204
|
+
|
|
205
|
+
> [!TIP]
|
|
206
|
+
> **Performance Tip**: When using recursive tools like `grep`, consider excluding large directories like `.venv` or `node_modules` to avoid timeouts:
|
|
207
|
+
> `command = "grep -r --exclude-dir={.venv,node_modules} {pattern} {path}"`
|
|
208
|
+
|
|
209
|
+
> [!NOTE]
|
|
210
|
+
> **Optional Parameters**: If you define a parameter with a `default` value in `config.toml`, it will be automatically injected into your `command` if the LLM omits it.
|
|
211
|
+
|
|
212
|
+
## Configuration options
|
|
213
|
+
[See default configuration](./src/asky/config.toml)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
On first run, a default configuration file is created at `~/.config/asky/config.toml`. You can edit this file to configure models, API keys, and other settings.
|
|
217
|
+
|
|
218
|
+
### API Keys
|
|
219
|
+
You can set API keys in two ways:
|
|
220
|
+
1. **Environment Variables**: Set `GOOGLE_API_KEY` (or other configured env vars) in your shell.
|
|
221
|
+
2. **Config File**: Add keys directly to `[api.name]` sections in `config.toml`.
|
|
222
|
+
|
|
223
|
+
Example `config.toml`:
|
|
224
|
+
```toml
|
|
225
|
+
[general]
|
|
226
|
+
default_model = "gf"
|
|
227
|
+
|
|
228
|
+
[api.gemini]
|
|
229
|
+
api_key_env = "GOOGLE_API_KEY"
|
|
230
|
+
|
|
231
|
+
[api.lmstudio]
|
|
232
|
+
url = "http://localhost:1234/v1/chat/completions"
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
### Verification
|
|
236
|
+
Run with `-v` to see the loaded configuration:
|
|
237
|
+
```bash
|
|
238
|
+
ask -v
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
## Web Search
|
|
243
|
+
|
|
244
|
+
asky works best with a web search tool. You can use SearXNG or Serper API.
|
|
245
|
+
|
|
246
|
+
### Serper API
|
|
247
|
+
Serper is a paid service, but gives 2500 requests for free.
|
|
248
|
+
|
|
249
|
+
### Install & configure SearXNG
|
|
250
|
+
SearXNG is free and open source, it's easy to set up with a single docker command.
|
|
251
|
+
|
|
252
|
+
Following command taken from [SearXNG docs](https://docs.searxng.org/admin/installation-docker.html#instancing).
|
|
253
|
+
```bash
|
|
254
|
+
docker pull docker.io/searxng/searxng:latest
|
|
255
|
+
|
|
256
|
+
# Create directories for configuration and persistent data
|
|
257
|
+
$ mkdir -p ./searxng/config/ ./searxng/data/
|
|
258
|
+
$ cd ./searxng/
|
|
259
|
+
|
|
260
|
+
# Run the container
|
|
261
|
+
$ docker run --name searxng -d \
|
|
262
|
+
-p 8888:8080 \
|
|
263
|
+
-v "./config/:/etc/searxng/" \
|
|
264
|
+
-v "./data/:/var/cache/searxng/" \
|
|
265
|
+
docker.io/searxng/searxng:latest
|
|
266
|
+
```
|
|
267
|
+
You need to add "-json" to the formats section of the default searxng config.yaml file.
|
|
268
|
+
```yaml
|
|
269
|
+
# remove format to deny access, use lower case.
|
|
270
|
+
# formats: [html, csv, json, rss]
|
|
271
|
+
formats:
|
|
272
|
+
- html
|
|
273
|
+
- json
|
|
274
|
+
```
|
|
275
|
+
Then restart the container.
|
|
276
|
+
```bash
|
|
277
|
+
docker restart searxng
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
## Requirements
|
|
283
|
+
|
|
284
|
+
- Python 3.10+
|
|
285
|
+
- Running SearXNG instance or Serper API key.
|
|
286
|
+
- LM Studio (for local models) or API keys for remote models
|
|
287
|
+
|
|
288
|
+
## License
|
|
289
|
+
|
|
290
|
+
MIT
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
asky/__init__.py,sha256=7VmWT0rB9Bxx9OkRdd0-J934J9DMwO8h7jKwva8OuGI,160
|
|
2
|
+
asky/__main__.py,sha256=QlI65PwFjeU7adUyTzMMaX6ArDVPuvqjY1DViZb3-k4,106
|
|
3
|
+
asky/banner.py,sha256=ucEVPj840J2wvECM8CaBRa6CqvEjMGotY1HLiyeIjqA,3655
|
|
4
|
+
asky/cli.py,sha256=txY4JfTECPG7fYbfkYOdeNDxYik0iYaIV2YNX9uBEGc,16620
|
|
5
|
+
asky/config.py,sha256=Ti7V5MZAneP48qqTj2K_RRSw6kNKIdB_J--XBLjmqig,9100
|
|
6
|
+
asky/config.toml,sha256=yvfM5IhH3qPMnVhxpy0Ak4IZ1l8Ic5crhoX_yuV4-X0,7827
|
|
7
|
+
asky/html.py,sha256=ZJJsLQlUFpPO8OYrhHV3o_G2mmEdF2SU0sUVs6-8k2I,1805
|
|
8
|
+
asky/llm.py,sha256=VKAmsGO_AQvLIuXuLE8NztBFKR4PtvK9OrR1yhTzRew,12758
|
|
9
|
+
asky/storage.py,sha256=91PkV6gnZkjayEGIgFHN7zeYCxqMOuquuPGq4aFKv4E,4944
|
|
10
|
+
asky/tools.py,sha256=_myR4ntshgJH6oRhDumxdrXkF-8j5Ry79Rt_obyNP_c,10034
|
|
11
|
+
asky_cli-0.1.6.dist-info/METADATA,sha256=pGPI9UKCDJ3r9t44sahuowPnLDFQSAPI7qr5_9Z-PAc,10321
|
|
12
|
+
asky_cli-0.1.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
13
|
+
asky_cli-0.1.6.dist-info/entry_points.txt,sha256=_Q-svN8lIJemHnlPjGmZPBi6cfWd9nuK6NGNLDT8c8w,59
|
|
14
|
+
asky_cli-0.1.6.dist-info/RECORD,,
|