yaicli 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +5 -3
- yaicli/chat.py +396 -0
- yaicli/cli.py +250 -251
- yaicli/client.py +385 -0
- yaicli/config.py +31 -24
- yaicli/console.py +2 -2
- yaicli/const.py +28 -2
- yaicli/entry.py +68 -39
- yaicli/exceptions.py +8 -36
- yaicli/functions/__init__.py +39 -0
- yaicli/functions/buildin/execute_shell_command.py +47 -0
- yaicli/printer.py +145 -225
- yaicli/render.py +1 -1
- yaicli/role.py +231 -0
- yaicli/schemas.py +31 -0
- yaicli/tools.py +103 -0
- yaicli/utils.py +5 -2
- {yaicli-0.4.0.dist-info → yaicli-0.5.0.dist-info}/METADATA +164 -87
- yaicli-0.5.0.dist-info/RECORD +24 -0
- {yaicli-0.4.0.dist-info → yaicli-0.5.0.dist-info}/entry_points.txt +1 -1
- yaicli/chat_manager.py +0 -290
- yaicli/providers/__init__.py +0 -34
- yaicli/providers/base.py +0 -51
- yaicli/providers/cohere.py +0 -136
- yaicli/providers/openai.py +0 -176
- yaicli/roles.py +0 -276
- yaicli-0.4.0.dist-info/RECORD +0 -23
- {yaicli-0.4.0.dist-info → yaicli-0.5.0.dist-info}/WHEEL +0 -0
- {yaicli-0.4.0.dist-info → yaicli-0.5.0.dist-info}/licenses/LICENSE +0 -0
yaicli/schemas.py
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Optional
|
3
|
+
|
4
|
+
|
5
|
+
@dataclass
|
6
|
+
class ChatMessage:
|
7
|
+
"""Chat message class"""
|
8
|
+
|
9
|
+
role: str
|
10
|
+
content: str
|
11
|
+
name: Optional[str] = None
|
12
|
+
tool_call_id: Optional[str] = None
|
13
|
+
|
14
|
+
|
15
|
+
@dataclass
|
16
|
+
class ToolCall:
|
17
|
+
"""Function call class"""
|
18
|
+
|
19
|
+
id: str
|
20
|
+
name: str
|
21
|
+
arguments: str
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class LLMResponse:
|
26
|
+
"""Data structure for llm response with reasoning and content"""
|
27
|
+
|
28
|
+
reasoning: Optional[str] = None
|
29
|
+
content: str = ""
|
30
|
+
finish_reason: Optional[str] = None
|
31
|
+
tool_call: Optional[ToolCall] = None
|
yaicli/tools.py
ADDED
@@ -0,0 +1,103 @@
|
|
1
|
+
import importlib.util
|
2
|
+
import sys
|
3
|
+
from typing import Any, Dict, List, NewType, Optional
|
4
|
+
|
5
|
+
from instructor import OpenAISchema
|
6
|
+
|
7
|
+
from .console import get_console
|
8
|
+
from .const import FUNCTIONS_DIR
|
9
|
+
|
10
|
+
console = get_console()
|
11
|
+
|
12
|
+
|
13
|
+
class Function:
|
14
|
+
"""Function description class"""
|
15
|
+
|
16
|
+
def __init__(self, function: type[OpenAISchema]):
|
17
|
+
self.name = function.openai_schema["name"]
|
18
|
+
self.description = function.openai_schema.get("description", "")
|
19
|
+
self.parameters = function.openai_schema.get("parameters", {})
|
20
|
+
self.execute = function.execute # type: ignore
|
21
|
+
|
22
|
+
|
23
|
+
FunctionName = NewType("FunctionName", str)
|
24
|
+
|
25
|
+
_func_name_map: Optional[dict[FunctionName, Function]] = None
|
26
|
+
|
27
|
+
|
28
|
+
def get_func_name_map() -> dict[FunctionName, Function]:
|
29
|
+
"""Get function name map"""
|
30
|
+
global _func_name_map
|
31
|
+
if _func_name_map:
|
32
|
+
return _func_name_map
|
33
|
+
if not FUNCTIONS_DIR.exists():
|
34
|
+
FUNCTIONS_DIR.mkdir(parents=True, exist_ok=True)
|
35
|
+
return {}
|
36
|
+
functions = []
|
37
|
+
for file in FUNCTIONS_DIR.glob("*.py"):
|
38
|
+
if file.name.startswith("_"):
|
39
|
+
continue
|
40
|
+
module_name = str(file).replace("/", ".").rstrip(".py")
|
41
|
+
spec = importlib.util.spec_from_file_location(module_name, str(file))
|
42
|
+
module = importlib.util.module_from_spec(spec) # type: ignore
|
43
|
+
sys.modules[module_name] = module
|
44
|
+
spec.loader.exec_module(module) # type: ignore
|
45
|
+
|
46
|
+
if not issubclass(module.Function, OpenAISchema):
|
47
|
+
raise TypeError(f"Function {module_name} must be a subclass of instructor.OpenAISchema")
|
48
|
+
if not hasattr(module.Function, "execute"):
|
49
|
+
raise TypeError(f"Function {module_name} must have an 'execute' classmethod")
|
50
|
+
|
51
|
+
# Add to function list
|
52
|
+
functions.append(Function(function=module.Function))
|
53
|
+
|
54
|
+
# Cache the function list
|
55
|
+
_func_name_map = {FunctionName(func.name): func for func in functions}
|
56
|
+
return _func_name_map
|
57
|
+
|
58
|
+
|
59
|
+
def list_functions() -> list[Function]:
|
60
|
+
"""List all available buildin functions"""
|
61
|
+
global _func_name_map
|
62
|
+
if not _func_name_map:
|
63
|
+
_func_name_map = get_func_name_map()
|
64
|
+
|
65
|
+
return list(_func_name_map.values())
|
66
|
+
|
67
|
+
|
68
|
+
def get_function(name: FunctionName) -> Function:
|
69
|
+
"""Get a function by name
|
70
|
+
|
71
|
+
Args:
|
72
|
+
name: Function name
|
73
|
+
|
74
|
+
Returns:
|
75
|
+
Function execute method
|
76
|
+
|
77
|
+
Raises:
|
78
|
+
ValueError: If function not found
|
79
|
+
"""
|
80
|
+
func_map = get_func_name_map()
|
81
|
+
if name in func_map:
|
82
|
+
return func_map[FunctionName(name)]
|
83
|
+
raise ValueError(f"Function {name!r} not found")
|
84
|
+
|
85
|
+
|
86
|
+
def get_openai_schemas() -> List[Dict[str, Any]]:
|
87
|
+
"""Get OpenAI-compatible function schemas
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
List of function schemas in OpenAI format
|
91
|
+
"""
|
92
|
+
transformed_schemas = []
|
93
|
+
for function in list_functions():
|
94
|
+
schema = {
|
95
|
+
"type": "function",
|
96
|
+
"function": {
|
97
|
+
"name": function.name,
|
98
|
+
"description": function.description,
|
99
|
+
"parameters": function.parameters,
|
100
|
+
},
|
101
|
+
}
|
102
|
+
transformed_schemas.append(schema)
|
103
|
+
return transformed_schemas
|
yaicli/utils.py
CHANGED
@@ -6,7 +6,7 @@ from typing import Any, Callable, Optional, TypeVar
|
|
6
6
|
import typer
|
7
7
|
from distro import name as distro_name
|
8
8
|
|
9
|
-
from
|
9
|
+
from .const import DEFAULT_OS_NAME, DEFAULT_SHELL_NAME
|
10
10
|
|
11
11
|
T = TypeVar("T", int, float, str, bool)
|
12
12
|
|
@@ -110,7 +110,7 @@ def filter_command(command: str) -> Optional[str]:
|
|
110
110
|
return command.strip().replace("```", "")
|
111
111
|
|
112
112
|
|
113
|
-
def str2bool(value:
|
113
|
+
def str2bool(value: Any) -> bool:
|
114
114
|
"""Convert a string representation of truth to true (1) or false (0).
|
115
115
|
True values are 'y', 'yes', 't', 'true', 'on', and '1';
|
116
116
|
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
|
@@ -119,6 +119,9 @@ def str2bool(value: str) -> bool:
|
|
119
119
|
if value in {False, True}:
|
120
120
|
return bool(value)
|
121
121
|
|
122
|
+
if not isinstance(value, str):
|
123
|
+
return value
|
124
|
+
|
122
125
|
norm = value.strip().lower()
|
123
126
|
|
124
127
|
if norm in {"1", "true", "t", "yes", "y", "on"}:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5.0
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -213,9 +213,11 @@ Classifier: License :: OSI Approved :: MIT License
|
|
213
213
|
Classifier: Operating System :: OS Independent
|
214
214
|
Classifier: Programming Language :: Python :: 3
|
215
215
|
Requires-Python: >=3.9
|
216
|
-
Requires-Dist: cohere>=5.15.0
|
217
216
|
Requires-Dist: distro>=1.9.0
|
218
217
|
Requires-Dist: httpx>=0.28.1
|
218
|
+
Requires-Dist: instructor>=1.7.9
|
219
|
+
Requires-Dist: json-repair>=0.44.1
|
220
|
+
Requires-Dist: litellm>=1.67.5
|
219
221
|
Requires-Dist: openai>=1.76.0
|
220
222
|
Requires-Dist: prompt-toolkit>=3.0.50
|
221
223
|
Requires-Dist: rich>=13.9.4
|
@@ -258,8 +260,7 @@ generate and execute shell commands, or get quick answers without leaving your w
|
|
258
260
|
### 🔌 Universal LLM Compatibility
|
259
261
|
|
260
262
|
- **OpenAI-Compatible**: Works with any OpenAI-compatible API endpoint
|
261
|
-
- **Multi-Provider Support**:
|
262
|
-
- **Custom Response Parsing**: Extract exactly what you need with jmespath
|
263
|
+
- **Multi-Provider Support**: Using litellm to support all major LLM providers
|
263
264
|
|
264
265
|
### 💻 Enhanced Terminal Experience
|
265
266
|
|
@@ -271,7 +272,11 @@ generate and execute shell commands, or get quick answers without leaving your w
|
|
271
272
|
|
272
273
|
- **Layered Configuration**: Environment variables > Config file > Sensible defaults
|
273
274
|
- **Debugging Tools**: Verbose mode with detailed API tracing
|
274
|
-
|
275
|
+
|
276
|
+
### 📚 Function Calling
|
277
|
+
|
278
|
+
- **Function Calling**: Enable function calling in API requests
|
279
|
+
- **Function Output**: Show the output of functions
|
275
280
|
|
276
281
|

|
277
282
|
|
@@ -319,66 +324,79 @@ settings, just as below:
|
|
319
324
|
|
320
325
|
```ini
|
321
326
|
[core]
|
322
|
-
PROVIDER
|
323
|
-
BASE_URL
|
324
|
-
API_KEY
|
325
|
-
MODEL
|
327
|
+
PROVIDER=openai
|
328
|
+
BASE_URL=https://api.openai.com/v1
|
329
|
+
API_KEY=
|
330
|
+
MODEL=gpt-4o
|
326
331
|
|
327
332
|
# auto detect shell and os (or specify manually, e.g., bash, zsh, powershell.exe)
|
328
|
-
SHELL_NAME
|
329
|
-
OS_NAME
|
333
|
+
SHELL_NAME=auto
|
334
|
+
OS_NAME=auto
|
330
335
|
|
331
336
|
# true: streaming response, false: non-streaming
|
332
|
-
STREAM
|
337
|
+
STREAM=true
|
333
338
|
|
334
339
|
# LLM parameters
|
335
|
-
TEMPERATURE
|
336
|
-
TOP_P
|
337
|
-
MAX_TOKENS
|
338
|
-
TIMEOUT
|
340
|
+
TEMPERATURE=0.5
|
341
|
+
TOP_P=1.0
|
342
|
+
MAX_TOKENS=1024
|
343
|
+
TIMEOUT=60
|
344
|
+
REASONING_EFFORT=
|
339
345
|
|
340
346
|
# Interactive mode parameters
|
341
|
-
INTERACTIVE_ROUND
|
347
|
+
INTERACTIVE_ROUND=25
|
342
348
|
|
343
349
|
# UI/UX
|
344
|
-
CODE_THEME
|
350
|
+
CODE_THEME=monokai
|
345
351
|
# Max entries kept in history file
|
346
|
-
MAX_HISTORY
|
347
|
-
AUTO_SUGGEST
|
352
|
+
MAX_HISTORY=500
|
353
|
+
AUTO_SUGGEST=true
|
348
354
|
# Print reasoning content or not
|
349
|
-
SHOW_REASONING
|
355
|
+
SHOW_REASONING=true
|
350
356
|
# Text alignment (default, left, center, right, full)
|
351
|
-
JUSTIFY
|
357
|
+
JUSTIFY=default
|
352
358
|
|
353
359
|
# Chat history settings
|
354
|
-
CHAT_HISTORY_DIR
|
355
|
-
MAX_SAVED_CHATS
|
360
|
+
CHAT_HISTORY_DIR=<tmpdir>/yaicli/chats
|
361
|
+
MAX_SAVED_CHATS=20
|
362
|
+
|
363
|
+
# Role settings
|
364
|
+
# Set to false to disable warnings about modified built-in roles
|
365
|
+
ROLE_MODIFY_WARNING=true
|
366
|
+
|
367
|
+
# Function settings
|
368
|
+
# Set to false to disable sending functions in API requests
|
369
|
+
ENABLE_FUNCTIONS=true
|
370
|
+
# Set to false to disable showing function output in the response
|
371
|
+
SHOW_FUNCTION_OUTPUT=true
|
356
372
|
```
|
357
373
|
|
358
374
|
### Configuration Options Reference
|
359
375
|
|
360
|
-
| Option
|
361
|
-
|
|
362
|
-
| `PROVIDER`
|
363
|
-
| `BASE_URL`
|
364
|
-
| `API_KEY`
|
365
|
-
| `MODEL`
|
366
|
-
| `SHELL_NAME`
|
367
|
-
| `OS_NAME`
|
368
|
-
| `STREAM`
|
369
|
-
| `TIMEOUT`
|
370
|
-
| `INTERACTIVE_ROUND`
|
371
|
-
| `CODE_THEME`
|
372
|
-
| `TEMPERATURE`
|
373
|
-
| `TOP_P`
|
374
|
-
| `MAX_TOKENS`
|
375
|
-
| `MAX_HISTORY`
|
376
|
-
| `AUTO_SUGGEST`
|
377
|
-
| `SHOW_REASONING`
|
378
|
-
| `JUSTIFY`
|
379
|
-
| `CHAT_HISTORY_DIR`
|
380
|
-
| `MAX_SAVED_CHATS`
|
381
|
-
| `ROLE_MODIFY_WARNING`
|
376
|
+
| Option | Description | Default | Env Variable |
|
377
|
+
| ---------------------- | ------------------------------------------- | --------------------------- | -------------------------- |
|
378
|
+
| `PROVIDER` | LLM provider (openai, claude, cohere, etc.) | `openai` | `YAI_PROVIDER` |
|
379
|
+
| `BASE_URL` | API endpoint URL | `https://api.openai.com/v1` | `YAI_BASE_URL` |
|
380
|
+
| `API_KEY` | Your API key | - | `YAI_API_KEY` |
|
381
|
+
| `MODEL` | LLM model to use | `gpt-4o` | `YAI_MODEL` |
|
382
|
+
| `SHELL_NAME` | Shell type | `auto` | `YAI_SHELL_NAME` |
|
383
|
+
| `OS_NAME` | Operating system | `auto` | `YAI_OS_NAME` |
|
384
|
+
| `STREAM` | Enable streaming | `true` | `YAI_STREAM` |
|
385
|
+
| `TIMEOUT` | API timeout (seconds) | `60` | `YAI_TIMEOUT` |
|
386
|
+
| `INTERACTIVE_ROUND` | Interactive mode rounds | `25` | `YAI_INTERACTIVE_ROUND` |
|
387
|
+
| `CODE_THEME` | Syntax highlighting theme | `monokai` | `YAI_CODE_THEME` |
|
388
|
+
| `TEMPERATURE` | Response randomness | `0.7` | `YAI_TEMPERATURE` |
|
389
|
+
| `TOP_P` | Top-p sampling | `1.0` | `YAI_TOP_P` |
|
390
|
+
| `MAX_TOKENS` | Max response tokens | `1024` | `YAI_MAX_TOKENS` |
|
391
|
+
| `MAX_HISTORY` | Max history entries | `500` | `YAI_MAX_HISTORY` |
|
392
|
+
| `AUTO_SUGGEST` | Enable history suggestions | `true` | `YAI_AUTO_SUGGEST` |
|
393
|
+
| `SHOW_REASONING` | Enable reasoning display | `true` | `YAI_SHOW_REASONING` |
|
394
|
+
| `JUSTIFY` | Text alignment | `default` | `YAI_JUSTIFY` |
|
395
|
+
| `CHAT_HISTORY_DIR` | Chat history directory | `<tempdir>/yaicli/chats` | `YAI_CHAT_HISTORY_DIR` |
|
396
|
+
| `MAX_SAVED_CHATS` | Max saved chats | `20` | `YAI_MAX_SAVED_CHATS` |
|
397
|
+
| `ROLE_MODIFY_WARNING` | Warn user when modifying role | `true` | `YAI_ROLE_MODIFY_WARNING` |
|
398
|
+
| `ENABLE_FUNCTIONS` | Enable function calling | `true` | `YAI_ENABLE_FUNCTIONS` |
|
399
|
+
| `SHOW_FUNCTION_OUTPUT` | Show function output in response | `true` | `YAI_SHOW_FUNCTION_OUTPUT` |
|
382
400
|
|
383
401
|
### LLM Provider Configuration
|
384
402
|
|
@@ -387,10 +405,6 @@ other providers.
|
|
387
405
|
|
388
406
|
#### Pre-configured Provider Settings
|
389
407
|
|
390
|
-
`provider` is not case sensitive.
|
391
|
-
|
392
|
-
Claude and gemini native api will support soon.
|
393
|
-
|
394
408
|
| Provider | BASE_URL |
|
395
409
|
| ------------------------------ | --------------------------------------------------------- |
|
396
410
|
| **OpenAI** (default) | `https://api.openai.com/v1` |
|
@@ -404,6 +418,16 @@ Claude and gemini native api will support soon.
|
|
404
418
|
> - Google Gemini: https://ai.google.dev/gemini-api/docs/openai
|
405
419
|
> - Claude: https://docs.anthropic.com/en/api/openai-sdk
|
406
420
|
|
421
|
+
If you not sure about base_url or just use the default provider base_url, just leave it blank.
|
422
|
+
|
423
|
+
```ini
|
424
|
+
[core]
|
425
|
+
PROVIDER=cohere
|
426
|
+
BASE_URL=
|
427
|
+
API_KEY=xxx
|
428
|
+
MODEL=command-r-plus
|
429
|
+
```
|
430
|
+
|
407
431
|
### Syntax Highlighting Themes
|
408
432
|
|
409
433
|
YAICLI supports all Pygments syntax highlighting themes. You can set your preferred theme in the config file:
|
@@ -448,43 +472,50 @@ ai --verbose "Explain quantum computing"
|
|
448
472
|
YAICLI: Your AI assistant in the command line.
|
449
473
|
Call with a PROMPT to get a direct answer, use --shell to execute as command, or use --chat for an interactive session.
|
450
474
|
|
451
|
-
╭─ Arguments
|
452
|
-
│ prompt [PROMPT] The prompt to send to the LLM. Reads from stdin if available. [default: None]
|
453
|
-
|
454
|
-
╭─ Options
|
455
|
-
│ --install-completion Install completion for the current shell.
|
456
|
-
│ --show-completion Show completion for the current shell, to copy it or customize the installation.
|
457
|
-
│ --help -h Show this message and exit.
|
458
|
-
|
459
|
-
╭─ LLM Options
|
460
|
-
│ --model -M
|
461
|
-
│ --temperature -T
|
462
|
-
│ --top-p -P
|
463
|
-
│ --max-tokens -M
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
│ --
|
468
|
-
│ --
|
469
|
-
│ --
|
470
|
-
│ --
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
│ --
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
│ --
|
485
|
-
│ --
|
486
|
-
│ --
|
487
|
-
|
475
|
+
╭─ Arguments ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
476
|
+
│ prompt [PROMPT] The prompt to send to the LLM. Reads from stdin if available. [default: None] │
|
477
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
478
|
+
╭─ Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
479
|
+
│ --install-completion Install completion for the current shell. │
|
480
|
+
│ --show-completion Show completion for the current shell, to copy it or customize the installation. │
|
481
|
+
│ --help -h Show this message and exit. │
|
482
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
483
|
+
╭─ LLM Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
484
|
+
│ --model -M TEXT Specify the model to use. │
|
485
|
+
│ --temperature -T FLOAT RANGE [0.0<=x<=2.0] Specify the temperature to use. [default: 0.5] │
|
486
|
+
│ --top-p -P FLOAT RANGE [0.0<=x<=1.0] Specify the top-p to use. [default: 1.0] │
|
487
|
+
│ --max-tokens -M INTEGER RANGE [x>=1] Specify the max tokens to use. [default: 1024] │
|
488
|
+
│ --stream --no-stream Specify whether to stream the response. (default: stream) │
|
489
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
490
|
+
╭─ Role Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
491
|
+
│ --role -r TEXT Specify the assistant role to use. [default: DEFAULT] │
|
492
|
+
│ --create-role TEXT Create a new role with the specified name. │
|
493
|
+
│ --delete-role TEXT Delete a role with the specified name. │
|
494
|
+
│ --list-roles List all available roles. │
|
495
|
+
│ --show-role TEXT Show the role with the specified name. │
|
496
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
497
|
+
╭─ Chat Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
498
|
+
│ --chat -c Start in interactive chat mode. │
|
499
|
+
│ --list-chats List saved chat sessions. │
|
500
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
501
|
+
╭─ Shell Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
502
|
+
│ --shell -s Generate and optionally execute a shell command (non-interactive). │
|
503
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
504
|
+
╭─ Code Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
505
|
+
│ --code Generate code in plaintext (non-interactive). │
|
506
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
507
|
+
╭─ Other Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
508
|
+
│ --verbose -V Show verbose output (e.g., loaded config). │
|
509
|
+
│ --template Show the default config file template and exit. │
|
510
|
+
│ --show-reasoning --hide-reasoning Show reasoning content from the LLM. (default: show) │
|
511
|
+
│ --justify -j [default|left|center|right|full] Specify the justify to use. [default: default] │
|
512
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
513
|
+
╭─ Function Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
514
|
+
│ --install-functions Install default functions. │
|
515
|
+
│ --list-functions List all available functions. │
|
516
|
+
│ --enable-functions --disable-functions Enable/disable function calling in API requests (default: disabled) │
|
517
|
+
│ --show-function-output --hide-function-output Show the output of functions (default: show) │
|
518
|
+
╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
488
519
|
```
|
489
520
|
|
490
521
|
### Interactive Mode Features
|
@@ -495,6 +526,7 @@ ai --verbose "Explain quantum computing"
|
|
495
526
|
|
496
527
|
**Commands**
|
497
528
|
|
529
|
+
- `/help|?` - Show help message
|
498
530
|
- `/clear` - Clear conversation history
|
499
531
|
- `/his` - Show command history
|
500
532
|
- `/list` - List saved chats
|
@@ -799,6 +831,51 @@ $ ai --code "write a fib generator" --model deepseek-r1
|
|
799
831
|
|
800
832
|

|
801
833
|
|
834
|
+
### Function Call
|
835
|
+
|
836
|
+
To use function call, you need to install default functions by `ai --install-functions`.
|
837
|
+
After that, you can check the functions by `ai --list-functions`.
|
838
|
+
You can also define your own functions by adding them to the config folder in `~/.config/yaicli/functions/` (`C:\Users\<user>\.config\yaicli\functions` on Windows).
|
839
|
+
|
840
|
+
`--enable-functions` option is corresponds to the configuration key `ENABLE_FUNCTIONS`.
|
841
|
+
|
842
|
+
```shell
|
843
|
+
ai 'check the current dir total size' --enable-functions
|
844
|
+
Assistant:
|
845
|
+
Thinking:
|
846
|
+
|
847
|
+
▌ Okay, the user wants to check the current directory's total size. Hmm, how do I do that in macOS with zsh?
|
848
|
+
▌ I remember that the command to get disk usage is usually 'du'. But wait, the default 'du' might not give the total size of the
|
849
|
+
▌ current directory directly. Let me think. Oh right, if I use 'du -sh' with the current directory, that should give the total size
|
850
|
+
▌ in human-readable format.
|
851
|
+
▌ Wait, but sometimes the -s option summarizes the directory. So 'du -sh .' would calculate the total size of the current directory
|
852
|
+
▌ and its subdirectories. That should work. Let me confirm the syntax. Yeah, 'du -sh .' is the right command here.
|
853
|
+
▌ The user is using zsh, but the 'du' command is standard, so it should be available. I need to execute this shell command. The
|
854
|
+
▌ function provided is execute_shell_command, so I'll call that with the shell command 'du -sh .' as the argument.
|
855
|
+
▌ I should make sure the parameters are correctly formatted. The function requires a shell_command string. Alright, that's all.
|
856
|
+
▌ Let's generate the tool call.
|
857
|
+
|
858
|
+
{"index":0,"finish_reason":"tool_calls","delta":{"role":"assistant","content":null,"audio":null,"tool_calls":[{"id":"call_202505141526
|
859
|
+
36cc3f776ae8f14b56_0","index":0,"type":"function","function":{"name":"execute_shell_command","arguments":"{"shell_command": "du -sh
|
860
|
+
."}","outputs":null},"code_interpreter":null,"retrieval":null,"drawing_tool":null,"web_browser":null,"search_intent":null,"search_resu
|
861
|
+
lt":null}],"tool_call_id":null,"attachments":null,"metadata":null}}
|
862
|
+
@Function call: execute_shell_command({"shell_command": "du -sh ."})
|
863
|
+
╭─ Function output ─────╮
|
864
|
+
│ Exit code: 0, Output: │
|
865
|
+
│ 156M . │
|
866
|
+
│ │
|
867
|
+
╰───────────────────────╯
|
868
|
+
Thinking:
|
869
|
+
|
870
|
+
▌ Okay, the user asked to check the current directory's total size. I used the 'du -sh .' command, which stands for disk usage,
|
871
|
+
▌ summarize, and current directory. The output was "156M". So I need to present this in a concise way.
|
872
|
+
▌ First, confirm the command was executed. Then, report the result clearly. Since the user didn't ask for extra details, keep it
|
873
|
+
▌ simple. Just state the total size as 156MB. Maybe mention the command used for transparency. Alright, that should cover it without
|
874
|
+
▌ overcomplicating.
|
875
|
+
|
876
|
+
Current directory size: 156M (using du -sh .).
|
877
|
+
```
|
878
|
+
|
802
879
|
## 💻 Technical Details
|
803
880
|
|
804
881
|
### Architecture
|
@@ -0,0 +1,24 @@
|
|
1
|
+
pyproject.toml,sha256=-w396zGDRn2FDWOQzsKsi5lCrGe-rpQqCWL06I6EzYU,1596
|
2
|
+
yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
yaicli/chat.py,sha256=DeTmOeBPU-oiOAIaDj2h-auJor0GyVVhrViLYF6zGIM,13638
|
4
|
+
yaicli/cli.py,sha256=XY-xfB_kGHeBaNo17iI1OtEuuC2YIGW2WG4k87coX_Y,22502
|
5
|
+
yaicli/client.py,sha256=fKUDmn9s7tF9Q2wIB8WhbsjFYIpV0E29t_Vw0qVmVbI,16229
|
6
|
+
yaicli/config.py,sha256=_mp8P6zXyrdp4TzBfHraOCkjv5DMZMOwiEhQnFYWwZA,6321
|
7
|
+
yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
|
8
|
+
yaicli/const.py,sha256=FYW8cNqFzZwnYbgr_HXZSzSS8OIU_UsFIn4SZ0zOJ8U,8129
|
9
|
+
yaicli/entry.py,sha256=1cd6c_P4stmAtJLhpaSJB-rXTROhmS_kuzX76T93axk,8705
|
10
|
+
yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
|
11
|
+
yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
|
12
|
+
yaicli/printer.py,sha256=c6Y5uN5yR7GKGT2OgZyrHCLYThAeu8fNjrhUGig873s,8644
|
13
|
+
yaicli/render.py,sha256=k8o2P8fI44PJlyQbs7gmMiu2x2prwajdWn5JIt15BIA,505
|
14
|
+
yaicli/role.py,sha256=PfwiVJIlzg7EzlvMM-kIy6vBK0d5d_J4M1I_fIZGnWk,7399
|
15
|
+
yaicli/schemas.py,sha256=PiuSY7ORZaA4OL_tYm0inwqirHp5M-F3zcCipLwsH9E,571
|
16
|
+
yaicli/tools.py,sha256=d-5LXbEB-1Uq5VKSgwlAiNDVOGrHkku2DpmZoorq1zw,3098
|
17
|
+
yaicli/utils.py,sha256=bpo3Xhozpxsaci3FtEIKZ32l4ZdyWMsrHjYGX0tB4J4,4541
|
18
|
+
yaicli/functions/__init__.py,sha256=_FJooQ9GkijG8xLwuU0cr5GBrGnC9Nc6bnCeUjrsT0k,1271
|
19
|
+
yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
|
20
|
+
yaicli-0.5.0.dist-info/METADATA,sha256=4gqkgfCv4aJFzq6mBTrtgQ8IrNolnRA-ekbRyERarHQ,48657
|
21
|
+
yaicli-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
22
|
+
yaicli-0.5.0.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
|
23
|
+
yaicli-0.5.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
24
|
+
yaicli-0.5.0.dist-info/RECORD,,
|