yaicli 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yaicli/schemas.py ADDED
@@ -0,0 +1,31 @@
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+
4
+
5
+ @dataclass
6
+ class ChatMessage:
7
+ """Chat message class"""
8
+
9
+ role: str
10
+ content: str
11
+ name: Optional[str] = None
12
+ tool_call_id: Optional[str] = None
13
+
14
+
15
+ @dataclass
16
+ class ToolCall:
17
+ """Function call class"""
18
+
19
+ id: str
20
+ name: str
21
+ arguments: str
22
+
23
+
24
+ @dataclass
25
+ class LLMResponse:
26
+ """Data structure for llm response with reasoning and content"""
27
+
28
+ reasoning: Optional[str] = None
29
+ content: str = ""
30
+ finish_reason: Optional[str] = None
31
+ tool_call: Optional[ToolCall] = None
yaicli/tools.py ADDED
@@ -0,0 +1,103 @@
1
+ import importlib.util
2
+ import sys
3
+ from typing import Any, Dict, List, NewType, Optional
4
+
5
+ from instructor import OpenAISchema
6
+
7
+ from .console import get_console
8
+ from .const import FUNCTIONS_DIR
9
+
10
+ console = get_console()
11
+
12
+
13
+ class Function:
14
+ """Function description class"""
15
+
16
+ def __init__(self, function: type[OpenAISchema]):
17
+ self.name = function.openai_schema["name"]
18
+ self.description = function.openai_schema.get("description", "")
19
+ self.parameters = function.openai_schema.get("parameters", {})
20
+ self.execute = function.execute # type: ignore
21
+
22
+
23
+ FunctionName = NewType("FunctionName", str)
24
+
25
+ _func_name_map: Optional[dict[FunctionName, Function]] = None
26
+
27
+
28
+ def get_func_name_map() -> dict[FunctionName, Function]:
29
+ """Get function name map"""
30
+ global _func_name_map
31
+ if _func_name_map:
32
+ return _func_name_map
33
+ if not FUNCTIONS_DIR.exists():
34
+ FUNCTIONS_DIR.mkdir(parents=True, exist_ok=True)
35
+ return {}
36
+ functions = []
37
+ for file in FUNCTIONS_DIR.glob("*.py"):
38
+ if file.name.startswith("_"):
39
+ continue
40
+ module_name = str(file).replace("/", ".").rstrip(".py")
41
+ spec = importlib.util.spec_from_file_location(module_name, str(file))
42
+ module = importlib.util.module_from_spec(spec) # type: ignore
43
+ sys.modules[module_name] = module
44
+ spec.loader.exec_module(module) # type: ignore
45
+
46
+ if not issubclass(module.Function, OpenAISchema):
47
+ raise TypeError(f"Function {module_name} must be a subclass of instructor.OpenAISchema")
48
+ if not hasattr(module.Function, "execute"):
49
+ raise TypeError(f"Function {module_name} must have an 'execute' classmethod")
50
+
51
+ # Add to function list
52
+ functions.append(Function(function=module.Function))
53
+
54
+ # Cache the function list
55
+ _func_name_map = {FunctionName(func.name): func for func in functions}
56
+ return _func_name_map
57
+
58
+
59
+ def list_functions() -> list[Function]:
60
+ """List all available buildin functions"""
61
+ global _func_name_map
62
+ if not _func_name_map:
63
+ _func_name_map = get_func_name_map()
64
+
65
+ return list(_func_name_map.values())
66
+
67
+
68
+ def get_function(name: FunctionName) -> Function:
69
+ """Get a function by name
70
+
71
+ Args:
72
+ name: Function name
73
+
74
+ Returns:
75
+ Function execute method
76
+
77
+ Raises:
78
+ ValueError: If function not found
79
+ """
80
+ func_map = get_func_name_map()
81
+ if name in func_map:
82
+ return func_map[FunctionName(name)]
83
+ raise ValueError(f"Function {name!r} not found")
84
+
85
+
86
+ def get_openai_schemas() -> List[Dict[str, Any]]:
87
+ """Get OpenAI-compatible function schemas
88
+
89
+ Returns:
90
+ List of function schemas in OpenAI format
91
+ """
92
+ transformed_schemas = []
93
+ for function in list_functions():
94
+ schema = {
95
+ "type": "function",
96
+ "function": {
97
+ "name": function.name,
98
+ "description": function.description,
99
+ "parameters": function.parameters,
100
+ },
101
+ }
102
+ transformed_schemas.append(schema)
103
+ return transformed_schemas
yaicli/utils.py CHANGED
@@ -6,7 +6,7 @@ from typing import Any, Callable, Optional, TypeVar
6
6
  import typer
7
7
  from distro import name as distro_name
8
8
 
9
- from yaicli.const import DEFAULT_OS_NAME, DEFAULT_SHELL_NAME
9
+ from .const import DEFAULT_OS_NAME, DEFAULT_SHELL_NAME
10
10
 
11
11
  T = TypeVar("T", int, float, str, bool)
12
12
 
@@ -110,7 +110,7 @@ def filter_command(command: str) -> Optional[str]:
110
110
  return command.strip().replace("```", "")
111
111
 
112
112
 
113
- def str2bool(value: str) -> bool:
113
+ def str2bool(value: Any) -> bool:
114
114
  """Convert a string representation of truth to true (1) or false (0).
115
115
  True values are 'y', 'yes', 't', 'true', 'on', and '1';
116
116
  false values are 'n', 'no', 'f', 'false', 'off', and '0'.
@@ -119,6 +119,9 @@ def str2bool(value: str) -> bool:
119
119
  if value in {False, True}:
120
120
  return bool(value)
121
121
 
122
+ if not isinstance(value, str):
123
+ return value
124
+
122
125
  norm = value.strip().lower()
123
126
 
124
127
  if norm in {"1", "true", "t", "yes", "y", "on"}:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.4.0
3
+ Version: 0.5.1
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -213,9 +213,11 @@ Classifier: License :: OSI Approved :: MIT License
213
213
  Classifier: Operating System :: OS Independent
214
214
  Classifier: Programming Language :: Python :: 3
215
215
  Requires-Python: >=3.9
216
- Requires-Dist: cohere>=5.15.0
217
216
  Requires-Dist: distro>=1.9.0
218
217
  Requires-Dist: httpx>=0.28.1
218
+ Requires-Dist: instructor>=1.7.9
219
+ Requires-Dist: json-repair>=0.44.1
220
+ Requires-Dist: litellm>=1.67.5
219
221
  Requires-Dist: openai>=1.76.0
220
222
  Requires-Dist: prompt-toolkit>=3.0.50
221
223
  Requires-Dist: rich>=13.9.4
@@ -241,6 +243,8 @@ generate and execute shell commands, or get quick answers without leaving your w
241
243
  > [!NOTE]
242
244
  > YAICLI is actively developed. While core functionality is stable, some features may evolve in future releases.
243
245
 
246
+ > We support Function Call since v0.5.0!
247
+
244
248
  ## ✨ Key Features
245
249
 
246
250
  ### 🔄 Multiple Interaction Modes
@@ -258,8 +262,7 @@ generate and execute shell commands, or get quick answers without leaving your w
258
262
  ### 🔌 Universal LLM Compatibility
259
263
 
260
264
  - **OpenAI-Compatible**: Works with any OpenAI-compatible API endpoint
261
- - **Multi-Provider Support**: Easy configuration for Claude, Gemini, Cohere, etc.
262
- - **Custom Response Parsing**: Extract exactly what you need with jmespath
265
+ - **Multi-Provider Support**: Using litellm to support all major LLM providers
263
266
 
264
267
  ### 💻 Enhanced Terminal Experience
265
268
 
@@ -271,7 +274,11 @@ generate and execute shell commands, or get quick answers without leaving your w
271
274
 
272
275
  - **Layered Configuration**: Environment variables > Config file > Sensible defaults
273
276
  - **Debugging Tools**: Verbose mode with detailed API tracing
274
- - **Lightweight**: Minimal dependencies with focused functionality
277
+
278
+ ### 📚 Function Calling
279
+
280
+ - **Function Calling**: Enable function calling in API requests
281
+ - **Function Output**: Show the output of functions
275
282
 
276
283
  ![What is life](artwork/reasoning_example.png)
277
284
 
@@ -319,66 +326,79 @@ settings, just as below:
319
326
 
320
327
  ```ini
321
328
  [core]
322
- PROVIDER = openai
323
- BASE_URL = https://api.openai.com/v1
324
- API_KEY =
325
- MODEL = gpt-4o
329
+ PROVIDER=openai
330
+ BASE_URL=https://api.openai.com/v1
331
+ API_KEY=
332
+ MODEL=gpt-4o
326
333
 
327
334
  # auto detect shell and os (or specify manually, e.g., bash, zsh, powershell.exe)
328
- SHELL_NAME = auto
329
- OS_NAME = auto
335
+ SHELL_NAME=auto
336
+ OS_NAME=auto
330
337
 
331
338
  # true: streaming response, false: non-streaming
332
- STREAM = true
339
+ STREAM=true
333
340
 
334
341
  # LLM parameters
335
- TEMPERATURE = 0.7
336
- TOP_P = 1.0
337
- MAX_TOKENS = 1024
338
- TIMEOUT = 60
342
+ TEMPERATURE=0.5
343
+ TOP_P=1.0
344
+ MAX_TOKENS=1024
345
+ TIMEOUT=60
346
+ REASONING_EFFORT=
339
347
 
340
348
  # Interactive mode parameters
341
- INTERACTIVE_ROUND = 25
349
+ INTERACTIVE_ROUND=25
342
350
 
343
351
  # UI/UX
344
- CODE_THEME = monokai
352
+ CODE_THEME=monokai
345
353
  # Max entries kept in history file
346
- MAX_HISTORY = 500
347
- AUTO_SUGGEST = true
354
+ MAX_HISTORY=500
355
+ AUTO_SUGGEST=true
348
356
  # Print reasoning content or not
349
- SHOW_REASONING = true
357
+ SHOW_REASONING=true
350
358
  # Text alignment (default, left, center, right, full)
351
- JUSTIFY = default
359
+ JUSTIFY=default
352
360
 
353
361
  # Chat history settings
354
- CHAT_HISTORY_DIR = <tempdir>/yaicli/chats
355
- MAX_SAVED_CHATS = 20
362
+ CHAT_HISTORY_DIR=<tmpdir>/yaicli/chats
363
+ MAX_SAVED_CHATS=20
364
+
365
+ # Role settings
366
+ # Set to false to disable warnings about modified built-in roles
367
+ ROLE_MODIFY_WARNING=true
368
+
369
+ # Function settings
370
+ # Set to false to disable sending functions in API requests
371
+ ENABLE_FUNCTIONS=true
372
+ # Set to false to disable showing function output in the response
373
+ SHOW_FUNCTION_OUTPUT=true
356
374
  ```
357
375
 
358
376
  ### Configuration Options Reference
359
377
 
360
- | Option | Description | Default | Env Variable |
361
- | --------------------- | ------------------------------------------- | --------------------------- | ------------------------- |
362
- | `PROVIDER` | LLM provider (openai, claude, cohere, etc.) | `openai` | `YAI_PROVIDER` |
363
- | `BASE_URL` | API endpoint URL | `https://api.openai.com/v1` | `YAI_BASE_URL` |
364
- | `API_KEY` | Your API key | - | `YAI_API_KEY` |
365
- | `MODEL` | LLM model to use | `gpt-4o` | `YAI_MODEL` |
366
- | `SHELL_NAME` | Shell type | `auto` | `YAI_SHELL_NAME` |
367
- | `OS_NAME` | Operating system | `auto` | `YAI_OS_NAME` |
368
- | `STREAM` | Enable streaming | `true` | `YAI_STREAM` |
369
- | `TIMEOUT` | API timeout (seconds) | `60` | `YAI_TIMEOUT` |
370
- | `INTERACTIVE_ROUND` | Interactive mode rounds | `25` | `YAI_INTERACTIVE_ROUND` |
371
- | `CODE_THEME` | Syntax highlighting theme | `monokai` | `YAI_CODE_THEME` |
372
- | `TEMPERATURE` | Response randomness | `0.7` | `YAI_TEMPERATURE` |
373
- | `TOP_P` | Top-p sampling | `1.0` | `YAI_TOP_P` |
374
- | `MAX_TOKENS` | Max response tokens | `1024` | `YAI_MAX_TOKENS` |
375
- | `MAX_HISTORY` | Max history entries | `500` | `YAI_MAX_HISTORY` |
376
- | `AUTO_SUGGEST` | Enable history suggestions | `true` | `YAI_AUTO_SUGGEST` |
377
- | `SHOW_REASONING` | Enable reasoning display | `true` | `YAI_SHOW_REASONING` |
378
- | `JUSTIFY` | Text alignment | `default` | `YAI_JUSTIFY` |
379
- | `CHAT_HISTORY_DIR` | Chat history directory | `<tempdir>/yaicli/history` | `YAI_CHAT_HISTORY_DIR` |
380
- | `MAX_SAVED_CHATS` | Max saved chats | `20` | `YAI_MAX_SAVED_CHATS` |
381
- | `ROLE_MODIFY_WARNING` | Warn user when modifying role | `true` | `YAI_ROLE_MODIFY_WARNING` |
378
+ | Option | Description | Default | Env Variable |
379
+ | ---------------------- | ------------------------------------------- | --------------------------- | -------------------------- |
380
+ | `PROVIDER` | LLM provider (openai, claude, cohere, etc.) | `openai` | `YAI_PROVIDER` |
381
+ | `BASE_URL` | API endpoint URL | `https://api.openai.com/v1` | `YAI_BASE_URL` |
382
+ | `API_KEY` | Your API key | - | `YAI_API_KEY` |
383
+ | `MODEL` | LLM model to use | `gpt-4o` | `YAI_MODEL` |
384
+ | `SHELL_NAME` | Shell type | `auto` | `YAI_SHELL_NAME` |
385
+ | `OS_NAME` | Operating system | `auto` | `YAI_OS_NAME` |
386
+ | `STREAM` | Enable streaming | `true` | `YAI_STREAM` |
387
+ | `TIMEOUT` | API timeout (seconds) | `60` | `YAI_TIMEOUT` |
388
+ | `INTERACTIVE_ROUND` | Interactive mode rounds | `25` | `YAI_INTERACTIVE_ROUND` |
389
+ | `CODE_THEME` | Syntax highlighting theme | `monokai` | `YAI_CODE_THEME` |
390
+ | `TEMPERATURE` | Response randomness | `0.7` | `YAI_TEMPERATURE` |
391
+ | `TOP_P` | Top-p sampling | `1.0` | `YAI_TOP_P` |
392
+ | `MAX_TOKENS` | Max response tokens | `1024` | `YAI_MAX_TOKENS` |
393
+ | `MAX_HISTORY` | Max history entries | `500` | `YAI_MAX_HISTORY` |
394
+ | `AUTO_SUGGEST` | Enable history suggestions | `true` | `YAI_AUTO_SUGGEST` |
395
+ | `SHOW_REASONING` | Enable reasoning display | `true` | `YAI_SHOW_REASONING` |
396
+ | `JUSTIFY` | Text alignment | `default` | `YAI_JUSTIFY` |
397
+ | `CHAT_HISTORY_DIR` | Chat history directory | `<tempdir>/yaicli/chats` | `YAI_CHAT_HISTORY_DIR` |
398
+ | `MAX_SAVED_CHATS` | Max saved chats | `20` | `YAI_MAX_SAVED_CHATS` |
399
+ | `ROLE_MODIFY_WARNING` | Warn user when modifying role | `true` | `YAI_ROLE_MODIFY_WARNING` |
400
+ | `ENABLE_FUNCTIONS` | Enable function calling | `true` | `YAI_ENABLE_FUNCTIONS` |
401
+ | `SHOW_FUNCTION_OUTPUT` | Show function output in response | `true` | `YAI_SHOW_FUNCTION_OUTPUT` |
382
402
 
383
403
  ### LLM Provider Configuration
384
404
 
@@ -387,10 +407,6 @@ other providers.
387
407
 
388
408
  #### Pre-configured Provider Settings
389
409
 
390
- `provider` is not case sensitive.
391
-
392
- Claude and gemini native api will support soon.
393
-
394
410
  | Provider | BASE_URL |
395
411
  | ------------------------------ | --------------------------------------------------------- |
396
412
  | **OpenAI** (default) | `https://api.openai.com/v1` |
@@ -404,6 +420,16 @@ Claude and gemini native api will support soon.
404
420
  > - Google Gemini: https://ai.google.dev/gemini-api/docs/openai
405
421
  > - Claude: https://docs.anthropic.com/en/api/openai-sdk
406
422
 
423
+ If you not sure about base_url or just use the default provider base_url, just leave it blank.
424
+
425
+ ```ini
426
+ [core]
427
+ PROVIDER=cohere
428
+ BASE_URL=
429
+ API_KEY=xxx
430
+ MODEL=command-r-plus
431
+ ```
432
+
407
433
  ### Syntax Highlighting Themes
408
434
 
409
435
  YAICLI supports all Pygments syntax highlighting themes. You can set your preferred theme in the config file:
@@ -448,43 +474,50 @@ ai --verbose "Explain quantum computing"
448
474
  YAICLI: Your AI assistant in the command line.
449
475
  Call with a PROMPT to get a direct answer, use --shell to execute as command, or use --chat for an interactive session.
450
476
 
451
- ╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
452
- │ prompt [PROMPT] The prompt to send to the LLM. Reads from stdin if available. [default: None]
453
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
454
- ╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
455
- │ --install-completion Install completion for the current shell.
456
- │ --show-completion Show completion for the current shell, to copy it or customize the installation.
457
- │ --help -h Show this message and exit.
458
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
459
- ╭─ LLM Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
460
- │ --model -M TEXT Specify the model to use.
461
- │ --temperature -T FLOAT RANGE [0.0<=x<=2.0] Specify the temperature to use. [default: 0.7]
462
- │ --top-p -P FLOAT RANGE [0.0<=x<=1.0] Specify the top-p to use. [default: 1.0]
463
- │ --max-tokens -M INTEGER RANGE [x>=1] Specify the max tokens to use. [default: 1024]
464
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
465
- ╭─ Role Options ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
466
- --role -r TEXT Specify the assistant role to use. [default: DEFAULT] │
467
- │ --create-role TEXT Create a new role with the specified name.
468
- │ --delete-role TEXT Delete a role with the specified name.
469
- │ --list-roles List all available roles.
470
- │ --show-role TEXT Show the role with the specified name.
471
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
472
- ╭─ Chat Options ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
473
- --chat -c Start in interactive chat mode. │
474
- │ --list-chats List saved chat sessions.
475
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
476
- ╭─ Shell Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
477
- --shell -s Generate and optionally execute a shell command (non-interactive). │
478
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
479
- ╭─ Code Options ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
480
- --code Generate code in plaintext (non-interactive). │
481
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
482
- ╭─ Other Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
483
- --verbose -V Show verbose output (e.g., loaded config). │
484
- │ --template Show the default config file template and exit.
485
- │ --show-reasoning --no-show-reasoning Show reasoning content from the LLM. (default: True)
486
- │ --justify -j [default|left|center|right|full] Specify the justify to use. [default: default]
487
- ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
477
+ ╭─ Arguments ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
478
+ │ prompt [PROMPT] The prompt to send to the LLM. Reads from stdin if available. [default: None]
479
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
480
+ ╭─ Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
481
+ │ --install-completion Install completion for the current shell.
482
+ │ --show-completion Show completion for the current shell, to copy it or customize the installation.
483
+ │ --help -h Show this message and exit.
484
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
485
+ ╭─ LLM Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
486
+ │ --model -M TEXT Specify the model to use.
487
+ │ --temperature -T FLOAT RANGE [0.0<=x<=2.0] Specify the temperature to use. [default: 0.5]
488
+ │ --top-p -P FLOAT RANGE [0.0<=x<=1.0] Specify the top-p to use. [default: 1.0]
489
+ │ --max-tokens -M INTEGER RANGE [x>=1] Specify the max tokens to use. [default: 1024]
490
+ │ --stream --no-stream Specify whether to stream the response. (default: stream) │
491
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
492
+ ╭─ Role Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
493
+ │ --role -r TEXT Specify the assistant role to use. [default: DEFAULT]
494
+ │ --create-role TEXT Create a new role with the specified name.
495
+ │ --delete-role TEXT Delete a role with the specified name.
496
+ │ --list-roles List all available roles.
497
+ │ --show-role TEXT Show the role with the specified name. │
498
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
499
+ ╭─ Chat Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
500
+ │ --chat -c Start in interactive chat mode.
501
+ │ --list-chats List saved chat sessions. │
502
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
503
+ ╭─ Shell Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
504
+ │ --shell -s Generate and optionally execute a shell command (non-interactive). │
505
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
506
+ ╭─ Code Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
507
+ │ --code Generate code in plaintext (non-interactive). │
508
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
509
+ ╭─ Other Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
510
+ │ --verbose -V Show verbose output (e.g., loaded config).
511
+ │ --template Show the default config file template and exit.
512
+ │ --show-reasoning --hide-reasoning Show reasoning content from the LLM. (default: show)
513
+ │ --justify -j [default|left|center|right|full] Specify the justify to use. [default: default] │
514
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
515
+ ╭─ Function Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
516
+ │ --install-functions Install default functions. │
517
+ │ --list-functions List all available functions. │
518
+ │ --enable-functions --disable-functions Enable/disable function calling in API requests (default: disabled) │
519
+ │ --show-function-output --hide-function-output Show the output of functions (default: show) │
520
+ ╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
488
521
  ```
489
522
 
490
523
  ### Interactive Mode Features
@@ -495,6 +528,7 @@ ai --verbose "Explain quantum computing"
495
528
 
496
529
  **Commands**
497
530
 
531
+ - `/help|?` - Show help message
498
532
  - `/clear` - Clear conversation history
499
533
  - `/his` - Show command history
500
534
  - `/list` - List saved chats
@@ -799,6 +833,51 @@ $ ai --code "write a fib generator" --model deepseek-r1
799
833
 
800
834
  ![fib code example](artwork/reasoning_code_example.png)
801
835
 
836
+ ### Function Call
837
+
838
+ To use function call, you need to install default functions by `ai --install-functions`.
839
+ After that, you can check the functions by `ai --list-functions`.
840
+ You can also define your own functions by adding them to the config folder in `~/.config/yaicli/functions/` (`C:\Users\<user>\.config\yaicli\functions` on Windows).
841
+
842
+ `--enable-functions` option is corresponds to the configuration key `ENABLE_FUNCTIONS`.
843
+
844
+ ```shell
845
+ ai 'check the current dir total size' --enable-functions
846
+ Assistant:
847
+ Thinking:
848
+
849
+ ▌ Okay, the user wants to check the current directory's total size. Hmm, how do I do that in macOS with zsh?
850
+ ▌ I remember that the command to get disk usage is usually 'du'. But wait, the default 'du' might not give the total size of the
851
+ ▌ current directory directly. Let me think. Oh right, if I use 'du -sh' with the current directory, that should give the total size
852
+ ▌ in human-readable format.
853
+ ▌ Wait, but sometimes the -s option summarizes the directory. So 'du -sh .' would calculate the total size of the current directory
854
+ ▌ and its subdirectories. That should work. Let me confirm the syntax. Yeah, 'du -sh .' is the right command here.
855
+ ▌ The user is using zsh, but the 'du' command is standard, so it should be available. I need to execute this shell command. The
856
+ ▌ function provided is execute_shell_command, so I'll call that with the shell command 'du -sh .' as the argument.
857
+ ▌ I should make sure the parameters are correctly formatted. The function requires a shell_command string. Alright, that's all.
858
+ ▌ Let's generate the tool call.
859
+
860
+ {"index":0,"finish_reason":"tool_calls","delta":{"role":"assistant","content":null,"audio":null,"tool_calls":[{"id":"call_202505141526
861
+ 36cc3f776ae8f14b56_0","index":0,"type":"function","function":{"name":"execute_shell_command","arguments":"{"shell_command": "du -sh
862
+ ."}","outputs":null},"code_interpreter":null,"retrieval":null,"drawing_tool":null,"web_browser":null,"search_intent":null,"search_resu
863
+ lt":null}],"tool_call_id":null,"attachments":null,"metadata":null}}
864
+ @Function call: execute_shell_command({"shell_command": "du -sh ."})
865
+ ╭─ Function output ─────╮
866
+ │ Exit code: 0, Output: │
867
+ │ 156M . │
868
+ │ │
869
+ ╰───────────────────────╯
870
+ Thinking:
871
+
872
+ ▌ Okay, the user asked to check the current directory's total size. I used the 'du -sh .' command, which stands for disk usage,
873
+ ▌ summarize, and current directory. The output was "156M". So I need to present this in a concise way.
874
+ ▌ First, confirm the command was executed. Then, report the result clearly. Since the user didn't ask for extra details, keep it
875
+ ▌ simple. Just state the total size as 156MB. Maybe mention the command used for transparency. Alright, that should cover it without
876
+ ▌ overcomplicating.
877
+
878
+ Current directory size: 156M (using du -sh .).
879
+ ```
880
+
802
881
  ## 💻 Technical Details
803
882
 
804
883
  ### Architecture
@@ -0,0 +1,24 @@
1
+ pyproject.toml,sha256=bgKGpm_kwSxf-lHJfJw6WfaiKzOOgQpC4SPLuSz7zLM,1596
2
+ yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ yaicli/chat.py,sha256=DeTmOeBPU-oiOAIaDj2h-auJor0GyVVhrViLYF6zGIM,13638
4
+ yaicli/cli.py,sha256=XY-xfB_kGHeBaNo17iI1OtEuuC2YIGW2WG4k87coX_Y,22502
5
+ yaicli/client.py,sha256=fKUDmn9s7tF9Q2wIB8WhbsjFYIpV0E29t_Vw0qVmVbI,16229
6
+ yaicli/config.py,sha256=_mp8P6zXyrdp4TzBfHraOCkjv5DMZMOwiEhQnFYWwZA,6321
7
+ yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
8
+ yaicli/const.py,sha256=FYW8cNqFzZwnYbgr_HXZSzSS8OIU_UsFIn4SZ0zOJ8U,8129
9
+ yaicli/entry.py,sha256=Dpipmzi-wIC3kaypJVOg-WKEjfCcwzI12cMJd0ATOro,8691
10
+ yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
11
+ yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
12
+ yaicli/printer.py,sha256=c6Y5uN5yR7GKGT2OgZyrHCLYThAeu8fNjrhUGig873s,8644
13
+ yaicli/render.py,sha256=k8o2P8fI44PJlyQbs7gmMiu2x2prwajdWn5JIt15BIA,505
14
+ yaicli/role.py,sha256=PfwiVJIlzg7EzlvMM-kIy6vBK0d5d_J4M1I_fIZGnWk,7399
15
+ yaicli/schemas.py,sha256=PiuSY7ORZaA4OL_tYm0inwqirHp5M-F3zcCipLwsH9E,571
16
+ yaicli/tools.py,sha256=d-5LXbEB-1Uq5VKSgwlAiNDVOGrHkku2DpmZoorq1zw,3098
17
+ yaicli/utils.py,sha256=bpo3Xhozpxsaci3FtEIKZ32l4ZdyWMsrHjYGX0tB4J4,4541
18
+ yaicli/functions/__init__.py,sha256=_FJooQ9GkijG8xLwuU0cr5GBrGnC9Nc6bnCeUjrsT0k,1271
19
+ yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
20
+ yaicli-0.5.1.dist-info/METADATA,sha256=KI4OnsGFKKWC-Ksy86CDgdElcvYjHva8dfdYcQp5cT0,48699
21
+ yaicli-0.5.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
+ yaicli-0.5.1.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
23
+ yaicli-0.5.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
24
+ yaicli-0.5.1.dist-info/RECORD,,
@@ -1,3 +1,3 @@
1
1
  [console_scripts]
2
2
  ai = yaicli.entry:app
3
- yai = yaicli.entry:app
3
+ yaicli = yaicli.entry:app