yaicli 0.3.3__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +6 -3
- yaicli/chat.py +396 -0
- yaicli/cli.py +251 -251
- yaicli/client.py +385 -0
- yaicli/config.py +32 -20
- yaicli/console.py +2 -2
- yaicli/const.py +46 -21
- yaicli/entry.py +68 -39
- yaicli/exceptions.py +8 -36
- yaicli/functions/__init__.py +39 -0
- yaicli/functions/buildin/execute_shell_command.py +47 -0
- yaicli/printer.py +145 -225
- yaicli/render.py +1 -1
- yaicli/role.py +231 -0
- yaicli/schemas.py +31 -0
- yaicli/tools.py +103 -0
- yaicli/utils.py +5 -2
- {yaicli-0.3.3.dist-info → yaicli-0.5.0.dist-info}/METADATA +172 -132
- yaicli-0.5.0.dist-info/RECORD +24 -0
- {yaicli-0.3.3.dist-info → yaicli-0.5.0.dist-info}/entry_points.txt +1 -1
- yaicli/api.py +0 -316
- yaicli/chat_manager.py +0 -290
- yaicli/roles.py +0 -248
- yaicli-0.3.3.dist-info/RECORD +0 -20
- {yaicli-0.3.3.dist-info → yaicli-0.5.0.dist-info}/WHEEL +0 -0
- {yaicli-0.3.3.dist-info → yaicli-0.5.0.dist-info}/licenses/LICENSE +0 -0
yaicli/schemas.py
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Optional
|
3
|
+
|
4
|
+
|
5
|
+
@dataclass
|
6
|
+
class ChatMessage:
|
7
|
+
"""Chat message class"""
|
8
|
+
|
9
|
+
role: str
|
10
|
+
content: str
|
11
|
+
name: Optional[str] = None
|
12
|
+
tool_call_id: Optional[str] = None
|
13
|
+
|
14
|
+
|
15
|
+
@dataclass
|
16
|
+
class ToolCall:
|
17
|
+
"""Function call class"""
|
18
|
+
|
19
|
+
id: str
|
20
|
+
name: str
|
21
|
+
arguments: str
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class LLMResponse:
|
26
|
+
"""Data structure for llm response with reasoning and content"""
|
27
|
+
|
28
|
+
reasoning: Optional[str] = None
|
29
|
+
content: str = ""
|
30
|
+
finish_reason: Optional[str] = None
|
31
|
+
tool_call: Optional[ToolCall] = None
|
yaicli/tools.py
ADDED
@@ -0,0 +1,103 @@
|
|
1
|
+
import importlib.util
|
2
|
+
import sys
|
3
|
+
from typing import Any, Dict, List, NewType, Optional
|
4
|
+
|
5
|
+
from instructor import OpenAISchema
|
6
|
+
|
7
|
+
from .console import get_console
|
8
|
+
from .const import FUNCTIONS_DIR
|
9
|
+
|
10
|
+
console = get_console()
|
11
|
+
|
12
|
+
|
13
|
+
class Function:
|
14
|
+
"""Function description class"""
|
15
|
+
|
16
|
+
def __init__(self, function: type[OpenAISchema]):
|
17
|
+
self.name = function.openai_schema["name"]
|
18
|
+
self.description = function.openai_schema.get("description", "")
|
19
|
+
self.parameters = function.openai_schema.get("parameters", {})
|
20
|
+
self.execute = function.execute # type: ignore
|
21
|
+
|
22
|
+
|
23
|
+
FunctionName = NewType("FunctionName", str)
|
24
|
+
|
25
|
+
_func_name_map: Optional[dict[FunctionName, Function]] = None
|
26
|
+
|
27
|
+
|
28
|
+
def get_func_name_map() -> dict[FunctionName, Function]:
|
29
|
+
"""Get function name map"""
|
30
|
+
global _func_name_map
|
31
|
+
if _func_name_map:
|
32
|
+
return _func_name_map
|
33
|
+
if not FUNCTIONS_DIR.exists():
|
34
|
+
FUNCTIONS_DIR.mkdir(parents=True, exist_ok=True)
|
35
|
+
return {}
|
36
|
+
functions = []
|
37
|
+
for file in FUNCTIONS_DIR.glob("*.py"):
|
38
|
+
if file.name.startswith("_"):
|
39
|
+
continue
|
40
|
+
module_name = str(file).replace("/", ".").rstrip(".py")
|
41
|
+
spec = importlib.util.spec_from_file_location(module_name, str(file))
|
42
|
+
module = importlib.util.module_from_spec(spec) # type: ignore
|
43
|
+
sys.modules[module_name] = module
|
44
|
+
spec.loader.exec_module(module) # type: ignore
|
45
|
+
|
46
|
+
if not issubclass(module.Function, OpenAISchema):
|
47
|
+
raise TypeError(f"Function {module_name} must be a subclass of instructor.OpenAISchema")
|
48
|
+
if not hasattr(module.Function, "execute"):
|
49
|
+
raise TypeError(f"Function {module_name} must have an 'execute' classmethod")
|
50
|
+
|
51
|
+
# Add to function list
|
52
|
+
functions.append(Function(function=module.Function))
|
53
|
+
|
54
|
+
# Cache the function list
|
55
|
+
_func_name_map = {FunctionName(func.name): func for func in functions}
|
56
|
+
return _func_name_map
|
57
|
+
|
58
|
+
|
59
|
+
def list_functions() -> list[Function]:
|
60
|
+
"""List all available buildin functions"""
|
61
|
+
global _func_name_map
|
62
|
+
if not _func_name_map:
|
63
|
+
_func_name_map = get_func_name_map()
|
64
|
+
|
65
|
+
return list(_func_name_map.values())
|
66
|
+
|
67
|
+
|
68
|
+
def get_function(name: FunctionName) -> Function:
|
69
|
+
"""Get a function by name
|
70
|
+
|
71
|
+
Args:
|
72
|
+
name: Function name
|
73
|
+
|
74
|
+
Returns:
|
75
|
+
Function execute method
|
76
|
+
|
77
|
+
Raises:
|
78
|
+
ValueError: If function not found
|
79
|
+
"""
|
80
|
+
func_map = get_func_name_map()
|
81
|
+
if name in func_map:
|
82
|
+
return func_map[FunctionName(name)]
|
83
|
+
raise ValueError(f"Function {name!r} not found")
|
84
|
+
|
85
|
+
|
86
|
+
def get_openai_schemas() -> List[Dict[str, Any]]:
|
87
|
+
"""Get OpenAI-compatible function schemas
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
List of function schemas in OpenAI format
|
91
|
+
"""
|
92
|
+
transformed_schemas = []
|
93
|
+
for function in list_functions():
|
94
|
+
schema = {
|
95
|
+
"type": "function",
|
96
|
+
"function": {
|
97
|
+
"name": function.name,
|
98
|
+
"description": function.description,
|
99
|
+
"parameters": function.parameters,
|
100
|
+
},
|
101
|
+
}
|
102
|
+
transformed_schemas.append(schema)
|
103
|
+
return transformed_schemas
|
yaicli/utils.py
CHANGED
@@ -6,7 +6,7 @@ from typing import Any, Callable, Optional, TypeVar
|
|
6
6
|
import typer
|
7
7
|
from distro import name as distro_name
|
8
8
|
|
9
|
-
from
|
9
|
+
from .const import DEFAULT_OS_NAME, DEFAULT_SHELL_NAME
|
10
10
|
|
11
11
|
T = TypeVar("T", int, float, str, bool)
|
12
12
|
|
@@ -110,7 +110,7 @@ def filter_command(command: str) -> Optional[str]:
|
|
110
110
|
return command.strip().replace("```", "")
|
111
111
|
|
112
112
|
|
113
|
-
def str2bool(value:
|
113
|
+
def str2bool(value: Any) -> bool:
|
114
114
|
"""Convert a string representation of truth to true (1) or false (0).
|
115
115
|
True values are 'y', 'yes', 't', 'true', 'on', and '1';
|
116
116
|
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
|
@@ -119,6 +119,9 @@ def str2bool(value: str) -> bool:
|
|
119
119
|
if value in {False, True}:
|
120
120
|
return bool(value)
|
121
121
|
|
122
|
+
if not isinstance(value, str):
|
123
|
+
return value
|
124
|
+
|
122
125
|
norm = value.strip().lower()
|
123
126
|
|
124
127
|
if norm in {"1", "true", "t", "yes", "y", "on"}:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5.0
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -215,7 +215,10 @@ Classifier: Programming Language :: Python :: 3
|
|
215
215
|
Requires-Python: >=3.9
|
216
216
|
Requires-Dist: distro>=1.9.0
|
217
217
|
Requires-Dist: httpx>=0.28.1
|
218
|
-
Requires-Dist:
|
218
|
+
Requires-Dist: instructor>=1.7.9
|
219
|
+
Requires-Dist: json-repair>=0.44.1
|
220
|
+
Requires-Dist: litellm>=1.67.5
|
221
|
+
Requires-Dist: openai>=1.76.0
|
219
222
|
Requires-Dist: prompt-toolkit>=3.0.50
|
220
223
|
Requires-Dist: rich>=13.9.4
|
221
224
|
Requires-Dist: socksio>=1.0.0
|
@@ -235,9 +238,7 @@ generate and execute shell commands, or get quick answers without leaving your w
|
|
235
238
|
|
236
239
|
**Supports both standard and deep reasoning models across all major LLM providers.**
|
237
240
|
|
238
|
-
<
|
239
|
-
<img src="https://vhs.charm.sh/vhs-5U1BBjJkTUBReRswsSgIVx.gif" alt="YAICLI Chat Demo" width="85%">
|
240
|
-
</p>
|
241
|
+
<a href="https://asciinema.org/a/vyreM0n576GjGL2asjI3QzUIY" target="_blank"><img src="https://asciinema.org/a/vyreM0n576GjGL2asjI3QzUIY.svg" width="85%"/></a>
|
241
242
|
|
242
243
|
> [!NOTE]
|
243
244
|
> YAICLI is actively developed. While core functionality is stable, some features may evolve in future releases.
|
@@ -259,8 +260,7 @@ generate and execute shell commands, or get quick answers without leaving your w
|
|
259
260
|
### 🔌 Universal LLM Compatibility
|
260
261
|
|
261
262
|
- **OpenAI-Compatible**: Works with any OpenAI-compatible API endpoint
|
262
|
-
- **Multi-Provider Support**:
|
263
|
-
- **Custom Response Parsing**: Extract exactly what you need with jmespath
|
263
|
+
- **Multi-Provider Support**: Using litellm to support all major LLM providers
|
264
264
|
|
265
265
|
### 💻 Enhanced Terminal Experience
|
266
266
|
|
@@ -272,7 +272,11 @@ generate and execute shell commands, or get quick answers without leaving your w
|
|
272
272
|
|
273
273
|
- **Layered Configuration**: Environment variables > Config file > Sensible defaults
|
274
274
|
- **Debugging Tools**: Verbose mode with detailed API tracing
|
275
|
-
|
275
|
+
|
276
|
+
### 📚 Function Calling
|
277
|
+
|
278
|
+
- **Function Calling**: Enable function calling in API requests
|
279
|
+
- **Function Output**: Show the output of functions
|
276
280
|
|
277
281
|

|
278
282
|
|
@@ -320,71 +324,79 @@ settings, just as below:
|
|
320
324
|
|
321
325
|
```ini
|
322
326
|
[core]
|
323
|
-
PROVIDER
|
324
|
-
BASE_URL
|
325
|
-
API_KEY
|
326
|
-
MODEL
|
327
|
+
PROVIDER=openai
|
328
|
+
BASE_URL=https://api.openai.com/v1
|
329
|
+
API_KEY=
|
330
|
+
MODEL=gpt-4o
|
327
331
|
|
328
332
|
# auto detect shell and os (or specify manually, e.g., bash, zsh, powershell.exe)
|
329
|
-
SHELL_NAME
|
330
|
-
OS_NAME
|
331
|
-
|
332
|
-
# API paths (usually no need to change for OpenAI compatible APIs)
|
333
|
-
COMPLETION_PATH = chat/completions
|
334
|
-
ANSWER_PATH = choices[0].message.content
|
333
|
+
SHELL_NAME=auto
|
334
|
+
OS_NAME=auto
|
335
335
|
|
336
336
|
# true: streaming response, false: non-streaming
|
337
|
-
STREAM
|
337
|
+
STREAM=true
|
338
338
|
|
339
339
|
# LLM parameters
|
340
|
-
TEMPERATURE
|
341
|
-
TOP_P
|
342
|
-
MAX_TOKENS
|
343
|
-
TIMEOUT
|
340
|
+
TEMPERATURE=0.5
|
341
|
+
TOP_P=1.0
|
342
|
+
MAX_TOKENS=1024
|
343
|
+
TIMEOUT=60
|
344
|
+
REASONING_EFFORT=
|
344
345
|
|
345
346
|
# Interactive mode parameters
|
346
|
-
INTERACTIVE_ROUND
|
347
|
+
INTERACTIVE_ROUND=25
|
347
348
|
|
348
349
|
# UI/UX
|
349
|
-
CODE_THEME
|
350
|
+
CODE_THEME=monokai
|
350
351
|
# Max entries kept in history file
|
351
|
-
MAX_HISTORY
|
352
|
-
AUTO_SUGGEST
|
352
|
+
MAX_HISTORY=500
|
353
|
+
AUTO_SUGGEST=true
|
353
354
|
# Print reasoning content or not
|
354
|
-
SHOW_REASONING
|
355
|
+
SHOW_REASONING=true
|
355
356
|
# Text alignment (default, left, center, right, full)
|
356
|
-
JUSTIFY
|
357
|
+
JUSTIFY=default
|
357
358
|
|
358
359
|
# Chat history settings
|
359
|
-
CHAT_HISTORY_DIR
|
360
|
-
MAX_SAVED_CHATS
|
360
|
+
CHAT_HISTORY_DIR=<tmpdir>/yaicli/chats
|
361
|
+
MAX_SAVED_CHATS=20
|
362
|
+
|
363
|
+
# Role settings
|
364
|
+
# Set to false to disable warnings about modified built-in roles
|
365
|
+
ROLE_MODIFY_WARNING=true
|
366
|
+
|
367
|
+
# Function settings
|
368
|
+
# Set to false to disable sending functions in API requests
|
369
|
+
ENABLE_FUNCTIONS=true
|
370
|
+
# Set to false to disable showing function output in the response
|
371
|
+
SHOW_FUNCTION_OUTPUT=true
|
361
372
|
```
|
362
373
|
|
363
374
|
### Configuration Options Reference
|
364
375
|
|
365
|
-
| Option
|
366
|
-
|
367
|
-
| `PROVIDER`
|
368
|
-
| `BASE_URL`
|
369
|
-
| `API_KEY`
|
370
|
-
| `MODEL`
|
371
|
-
| `SHELL_NAME`
|
372
|
-
| `OS_NAME`
|
373
|
-
| `
|
374
|
-
| `
|
375
|
-
| `
|
376
|
-
| `
|
377
|
-
| `
|
378
|
-
| `
|
379
|
-
| `
|
380
|
-
| `
|
381
|
-
| `
|
382
|
-
| `
|
383
|
-
| `
|
384
|
-
| `
|
385
|
-
| `
|
386
|
-
| `
|
387
|
-
| `
|
376
|
+
| Option | Description | Default | Env Variable |
|
377
|
+
| ---------------------- | ------------------------------------------- | --------------------------- | -------------------------- |
|
378
|
+
| `PROVIDER` | LLM provider (openai, claude, cohere, etc.) | `openai` | `YAI_PROVIDER` |
|
379
|
+
| `BASE_URL` | API endpoint URL | `https://api.openai.com/v1` | `YAI_BASE_URL` |
|
380
|
+
| `API_KEY` | Your API key | - | `YAI_API_KEY` |
|
381
|
+
| `MODEL` | LLM model to use | `gpt-4o` | `YAI_MODEL` |
|
382
|
+
| `SHELL_NAME` | Shell type | `auto` | `YAI_SHELL_NAME` |
|
383
|
+
| `OS_NAME` | Operating system | `auto` | `YAI_OS_NAME` |
|
384
|
+
| `STREAM` | Enable streaming | `true` | `YAI_STREAM` |
|
385
|
+
| `TIMEOUT` | API timeout (seconds) | `60` | `YAI_TIMEOUT` |
|
386
|
+
| `INTERACTIVE_ROUND` | Interactive mode rounds | `25` | `YAI_INTERACTIVE_ROUND` |
|
387
|
+
| `CODE_THEME` | Syntax highlighting theme | `monokai` | `YAI_CODE_THEME` |
|
388
|
+
| `TEMPERATURE` | Response randomness | `0.7` | `YAI_TEMPERATURE` |
|
389
|
+
| `TOP_P` | Top-p sampling | `1.0` | `YAI_TOP_P` |
|
390
|
+
| `MAX_TOKENS` | Max response tokens | `1024` | `YAI_MAX_TOKENS` |
|
391
|
+
| `MAX_HISTORY` | Max history entries | `500` | `YAI_MAX_HISTORY` |
|
392
|
+
| `AUTO_SUGGEST` | Enable history suggestions | `true` | `YAI_AUTO_SUGGEST` |
|
393
|
+
| `SHOW_REASONING` | Enable reasoning display | `true` | `YAI_SHOW_REASONING` |
|
394
|
+
| `JUSTIFY` | Text alignment | `default` | `YAI_JUSTIFY` |
|
395
|
+
| `CHAT_HISTORY_DIR` | Chat history directory | `<tempdir>/yaicli/chats` | `YAI_CHAT_HISTORY_DIR` |
|
396
|
+
| `MAX_SAVED_CHATS` | Max saved chats | `20` | `YAI_MAX_SAVED_CHATS` |
|
397
|
+
| `ROLE_MODIFY_WARNING` | Warn user when modifying role | `true` | `YAI_ROLE_MODIFY_WARNING` |
|
398
|
+
| `ENABLE_FUNCTIONS` | Enable function calling | `true` | `YAI_ENABLE_FUNCTIONS` |
|
399
|
+
| `SHOW_FUNCTION_OUTPUT` | Show function output in response | `true` | `YAI_SHOW_FUNCTION_OUTPUT` |
|
388
400
|
|
389
401
|
### LLM Provider Configuration
|
390
402
|
|
@@ -393,52 +405,29 @@ other providers.
|
|
393
405
|
|
394
406
|
#### Pre-configured Provider Settings
|
395
407
|
|
396
|
-
| Provider | BASE_URL |
|
397
|
-
|
398
|
-
| **OpenAI** (default) | `https://api.openai.com/v1` |
|
399
|
-
| **Claude** (native API) | `https://api.anthropic.com/v1` |
|
400
|
-
| **Claude** (OpenAI-compatible) | `https://api.anthropic.com/v1/openai` |
|
401
|
-
| **Cohere** | `https://api.cohere.com
|
402
|
-
| **
|
408
|
+
| Provider | BASE_URL |
|
409
|
+
| ------------------------------ | --------------------------------------------------------- |
|
410
|
+
| **OpenAI** (default) | `https://api.openai.com/v1` |
|
411
|
+
| **Claude** (native API) | `https://api.anthropic.com/v1` |
|
412
|
+
| **Claude** (OpenAI-compatible) | `https://api.anthropic.com/v1/openai` |
|
413
|
+
| **Cohere** | `https://api.cohere.com` |
|
414
|
+
| **Gemini** | `https://generativelanguage.googleapis.com/v1beta/openai` |
|
403
415
|
|
404
416
|
> **Note**: Many providers offer OpenAI-compatible endpoints that work with the default settings.
|
405
417
|
>
|
406
418
|
> - Google Gemini: https://ai.google.dev/gemini-api/docs/openai
|
407
419
|
> - Claude: https://docs.anthropic.com/en/api/openai-sdk
|
408
420
|
|
409
|
-
|
410
|
-
|
411
|
-
To configure a custom provider:
|
412
|
-
|
413
|
-
1. **Find the API Endpoint**:
|
414
|
-
|
415
|
-
- Check the provider's API documentation for their chat completion endpoint
|
416
|
-
|
417
|
-
2. **Identify the Response Structure**:
|
418
|
-
|
419
|
-
- Look at the JSON response format to find where the text content is located
|
420
|
-
|
421
|
-
3. **Set the Path Expression**:
|
422
|
-
- Use jmespath syntax to specify the path to the text content
|
421
|
+
If you not sure about base_url or just use the default provider base_url, just leave it blank.
|
423
422
|
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
"text": "Hi! My name is Claude.",
|
431
|
-
"type": "text"
|
432
|
-
}
|
433
|
-
],
|
434
|
-
"id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
|
435
|
-
"model": "claude-3-7-sonnet-20250219",
|
436
|
-
"role": "assistant"
|
437
|
-
}
|
423
|
+
```ini
|
424
|
+
[core]
|
425
|
+
PROVIDER=cohere
|
426
|
+
BASE_URL=
|
427
|
+
API_KEY=xxx
|
428
|
+
MODEL=command-r-plus
|
438
429
|
```
|
439
430
|
|
440
|
-
The path to extract the text is: `content.[0].text`
|
441
|
-
|
442
431
|
### Syntax Highlighting Themes
|
443
432
|
|
444
433
|
YAICLI supports all Pygments syntax highlighting themes. You can set your preferred theme in the config file:
|
@@ -449,7 +438,7 @@ CODE_THEME = monokai
|
|
449
438
|
|
450
439
|
Browse available themes at: https://pygments.org/styles/
|
451
440
|
|
452
|
-

|
453
442
|
|
454
443
|
## 🚀 Usage
|
455
444
|
|
@@ -483,43 +472,50 @@ ai --verbose "Explain quantum computing"
|
|
483
472
|
YAICLI: Your AI assistant in the command line.
|
484
473
|
Call with a PROMPT to get a direct answer, use --shell to execute as command, or use --chat for an interactive session.
|
485
474
|
|
486
|
-
╭─ Arguments
|
487
|
-
│ prompt [PROMPT] The prompt to send to the LLM. Reads from stdin if available. [default: None]
|
488
|
-
|
489
|
-
╭─ Options
|
490
|
-
│ --install-completion Install completion for the current shell.
|
491
|
-
│ --show-completion Show completion for the current shell, to copy it or customize the installation.
|
492
|
-
│ --help -h Show this message and exit.
|
493
|
-
|
494
|
-
╭─ LLM Options
|
495
|
-
│ --model -M
|
496
|
-
│ --temperature -T
|
497
|
-
│ --top-p -P
|
498
|
-
│ --max-tokens -M
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
│ --
|
503
|
-
│ --
|
504
|
-
│ --
|
505
|
-
│ --
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
│ --
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
│ --
|
520
|
-
│ --
|
521
|
-
│ --
|
522
|
-
|
475
|
+
╭─ Arguments ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
476
|
+
│ prompt [PROMPT] The prompt to send to the LLM. Reads from stdin if available. [default: None] │
|
477
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
478
|
+
╭─ Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
479
|
+
│ --install-completion Install completion for the current shell. │
|
480
|
+
│ --show-completion Show completion for the current shell, to copy it or customize the installation. │
|
481
|
+
│ --help -h Show this message and exit. │
|
482
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
483
|
+
╭─ LLM Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
484
|
+
│ --model -M TEXT Specify the model to use. │
|
485
|
+
│ --temperature -T FLOAT RANGE [0.0<=x<=2.0] Specify the temperature to use. [default: 0.5] │
|
486
|
+
│ --top-p -P FLOAT RANGE [0.0<=x<=1.0] Specify the top-p to use. [default: 1.0] │
|
487
|
+
│ --max-tokens -M INTEGER RANGE [x>=1] Specify the max tokens to use. [default: 1024] │
|
488
|
+
│ --stream --no-stream Specify whether to stream the response. (default: stream) │
|
489
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
490
|
+
╭─ Role Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
491
|
+
│ --role -r TEXT Specify the assistant role to use. [default: DEFAULT] │
|
492
|
+
│ --create-role TEXT Create a new role with the specified name. │
|
493
|
+
│ --delete-role TEXT Delete a role with the specified name. │
|
494
|
+
│ --list-roles List all available roles. │
|
495
|
+
│ --show-role TEXT Show the role with the specified name. │
|
496
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
497
|
+
╭─ Chat Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
498
|
+
│ --chat -c Start in interactive chat mode. │
|
499
|
+
│ --list-chats List saved chat sessions. │
|
500
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
501
|
+
╭─ Shell Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
502
|
+
│ --shell -s Generate and optionally execute a shell command (non-interactive). │
|
503
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
504
|
+
╭─ Code Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
505
|
+
│ --code Generate code in plaintext (non-interactive). │
|
506
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
507
|
+
╭─ Other Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
508
|
+
│ --verbose -V Show verbose output (e.g., loaded config). │
|
509
|
+
│ --template Show the default config file template and exit. │
|
510
|
+
│ --show-reasoning --hide-reasoning Show reasoning content from the LLM. (default: show) │
|
511
|
+
│ --justify -j [default|left|center|right|full] Specify the justify to use. [default: default] │
|
512
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
513
|
+
╭─ Function Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
514
|
+
│ --install-functions Install default functions. │
|
515
|
+
│ --list-functions List all available functions. │
|
516
|
+
│ --enable-functions --disable-functions Enable/disable function calling in API requests (default: disabled) │
|
517
|
+
│ --show-function-output --hide-function-output Show the output of functions (default: show) │
|
518
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
523
519
|
```
|
524
520
|
|
525
521
|
### Interactive Mode Features
|
@@ -530,6 +526,7 @@ ai --verbose "Explain quantum computing"
|
|
530
526
|
|
531
527
|
**Commands**
|
532
528
|
|
529
|
+
- `/help|?` - Show help message
|
533
530
|
- `/clear` - Clear conversation history
|
534
531
|
- `/his` - Show command history
|
535
532
|
- `/list` - List saved chats
|
@@ -834,6 +831,51 @@ $ ai --code "write a fib generator" --model deepseek-r1
|
|
834
831
|
|
835
832
|

|
836
833
|
|
834
|
+
### Function Call
|
835
|
+
|
836
|
+
To use function call, you need to install default functions by `ai --install-functions`.
|
837
|
+
After that, you can check the functions by `ai --list-functions`.
|
838
|
+
You can also define your own functions by adding them to the config folder in `~/.config/yaicli/functions/` (`C:\Users\<user>\.config\yaicli\functions` on Windows).
|
839
|
+
|
840
|
+
`--enable-functions` option is corresponds to the configuration key `ENABLE_FUNCTIONS`.
|
841
|
+
|
842
|
+
```shell
|
843
|
+
ai 'check the current dir total size' --enable-functions
|
844
|
+
Assistant:
|
845
|
+
Thinking:
|
846
|
+
|
847
|
+
▌ Okay, the user wants to check the current directory's total size. Hmm, how do I do that in macOS with zsh?
|
848
|
+
▌ I remember that the command to get disk usage is usually 'du'. But wait, the default 'du' might not give the total size of the
|
849
|
+
▌ current directory directly. Let me think. Oh right, if I use 'du -sh' with the current directory, that should give the total size
|
850
|
+
▌ in human-readable format.
|
851
|
+
▌ Wait, but sometimes the -s option summarizes the directory. So 'du -sh .' would calculate the total size of the current directory
|
852
|
+
▌ and its subdirectories. That should work. Let me confirm the syntax. Yeah, 'du -sh .' is the right command here.
|
853
|
+
▌ The user is using zsh, but the 'du' command is standard, so it should be available. I need to execute this shell command. The
|
854
|
+
▌ function provided is execute_shell_command, so I'll call that with the shell command 'du -sh .' as the argument.
|
855
|
+
▌ I should make sure the parameters are correctly formatted. The function requires a shell_command string. Alright, that's all.
|
856
|
+
▌ Let's generate the tool call.
|
857
|
+
|
858
|
+
{"index":0,"finish_reason":"tool_calls","delta":{"role":"assistant","content":null,"audio":null,"tool_calls":[{"id":"call_202505141526
|
859
|
+
36cc3f776ae8f14b56_0","index":0,"type":"function","function":{"name":"execute_shell_command","arguments":"{"shell_command": "du -sh
|
860
|
+
."}","outputs":null},"code_interpreter":null,"retrieval":null,"drawing_tool":null,"web_browser":null,"search_intent":null,"search_resu
|
861
|
+
lt":null}],"tool_call_id":null,"attachments":null,"metadata":null}}
|
862
|
+
@Function call: execute_shell_command({"shell_command": "du -sh ."})
|
863
|
+
╭─ Function output ─────╮
|
864
|
+
│ Exit code: 0, Output: │
|
865
|
+
│ 156M . │
|
866
|
+
│ │
|
867
|
+
╰───────────────────────╯
|
868
|
+
Thinking:
|
869
|
+
|
870
|
+
▌ Okay, the user asked to check the current directory's total size. I used the 'du -sh .' command, which stands for disk usage,
|
871
|
+
▌ summarize, and current directory. The output was "156M". So I need to present this in a concise way.
|
872
|
+
▌ First, confirm the command was executed. Then, report the result clearly. Since the user didn't ask for extra details, keep it
|
873
|
+
▌ simple. Just state the total size as 156MB. Maybe mention the command used for transparency. Alright, that should cover it without
|
874
|
+
▌ overcomplicating.
|
875
|
+
|
876
|
+
Current directory size: 156M (using du -sh .).
|
877
|
+
```
|
878
|
+
|
837
879
|
## 💻 Technical Details
|
838
880
|
|
839
881
|
### Architecture
|
@@ -849,12 +891,10 @@ YAICLI is designed with a modular architecture that separates concerns and makes
|
|
849
891
|
### Dependencies
|
850
892
|
|
851
893
|
| Library | Purpose |
|
852
|
-
|
894
|
+
| --------------------------------------------------------------- | -------------------------------------------------- |
|
853
895
|
| [Typer](https://typer.tiangolo.com/) | Command-line interface with type hints |
|
854
896
|
| [Rich](https://rich.readthedocs.io/) | Terminal formatting and beautiful display |
|
855
897
|
| [prompt_toolkit](https://python-prompt-toolkit.readthedocs.io/) | Interactive input with history and auto-completion |
|
856
|
-
| [httpx](https://www.python-httpx.org/) | Modern HTTP client with async support |
|
857
|
-
| [jmespath](https://jmespath.org/) | JSON data extraction |
|
858
898
|
|
859
899
|
## 👨💻 Contributing
|
860
900
|
|
@@ -0,0 +1,24 @@
|
|
1
|
+
pyproject.toml,sha256=-w396zGDRn2FDWOQzsKsi5lCrGe-rpQqCWL06I6EzYU,1596
|
2
|
+
yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
yaicli/chat.py,sha256=DeTmOeBPU-oiOAIaDj2h-auJor0GyVVhrViLYF6zGIM,13638
|
4
|
+
yaicli/cli.py,sha256=XY-xfB_kGHeBaNo17iI1OtEuuC2YIGW2WG4k87coX_Y,22502
|
5
|
+
yaicli/client.py,sha256=fKUDmn9s7tF9Q2wIB8WhbsjFYIpV0E29t_Vw0qVmVbI,16229
|
6
|
+
yaicli/config.py,sha256=_mp8P6zXyrdp4TzBfHraOCkjv5DMZMOwiEhQnFYWwZA,6321
|
7
|
+
yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
|
8
|
+
yaicli/const.py,sha256=FYW8cNqFzZwnYbgr_HXZSzSS8OIU_UsFIn4SZ0zOJ8U,8129
|
9
|
+
yaicli/entry.py,sha256=1cd6c_P4stmAtJLhpaSJB-rXTROhmS_kuzX76T93axk,8705
|
10
|
+
yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
|
11
|
+
yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
|
12
|
+
yaicli/printer.py,sha256=c6Y5uN5yR7GKGT2OgZyrHCLYThAeu8fNjrhUGig873s,8644
|
13
|
+
yaicli/render.py,sha256=k8o2P8fI44PJlyQbs7gmMiu2x2prwajdWn5JIt15BIA,505
|
14
|
+
yaicli/role.py,sha256=PfwiVJIlzg7EzlvMM-kIy6vBK0d5d_J4M1I_fIZGnWk,7399
|
15
|
+
yaicli/schemas.py,sha256=PiuSY7ORZaA4OL_tYm0inwqirHp5M-F3zcCipLwsH9E,571
|
16
|
+
yaicli/tools.py,sha256=d-5LXbEB-1Uq5VKSgwlAiNDVOGrHkku2DpmZoorq1zw,3098
|
17
|
+
yaicli/utils.py,sha256=bpo3Xhozpxsaci3FtEIKZ32l4ZdyWMsrHjYGX0tB4J4,4541
|
18
|
+
yaicli/functions/__init__.py,sha256=_FJooQ9GkijG8xLwuU0cr5GBrGnC9Nc6bnCeUjrsT0k,1271
|
19
|
+
yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
|
20
|
+
yaicli-0.5.0.dist-info/METADATA,sha256=4gqkgfCv4aJFzq6mBTrtgQ8IrNolnRA-ekbRyERarHQ,48657
|
21
|
+
yaicli-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
22
|
+
yaicli-0.5.0.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
|
23
|
+
yaicli-0.5.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
24
|
+
yaicli-0.5.0.dist-info/RECORD,,
|