yaicli 0.0.8__py3-none-any.whl → 0.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.0.8"
3
+ version = "0.0.11"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  authors = [{ name = "belingud", email = "im.victor@qq.com" }]
6
6
  readme = "README.md"
@@ -31,10 +31,11 @@ keywords = [
31
31
  ]
32
32
  dependencies = [
33
33
  "distro>=1.9.0",
34
+ "httpx>=0.28.1",
34
35
  "jmespath>=1.0.1",
35
36
  "prompt-toolkit>=3.0.50",
36
- "requests>=2.32.3",
37
37
  "rich>=13.9.4",
38
+ "socksio>=1.0.0",
38
39
  "typer>=0.15.2",
39
40
  ]
40
41
  [project.urls]
@@ -51,11 +52,12 @@ resolution = "highest"
51
52
  [dependency-groups]
52
53
  dev = ["bump2version>=1.0.1", "pytest>=8.3.5", "ruff>=0.11.2"]
53
54
 
55
+ [tool.isort]
56
+ profile = "black"
57
+
54
58
  [tool.ruff]
55
59
  line-length = 120
56
- select = ["E", "F", "W", "I", "B", "C90"]
57
- ignore = ["E501"]
58
-
60
+ fix = true
59
61
 
60
62
  [build-system]
61
63
  requires = ["hatchling>=1.18.0"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.8
3
+ Version: 0.0.11
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -214,21 +214,24 @@ Classifier: Operating System :: OS Independent
214
214
  Classifier: Programming Language :: Python :: 3
215
215
  Requires-Python: >=3.9
216
216
  Requires-Dist: distro>=1.9.0
217
+ Requires-Dist: httpx>=0.28.1
217
218
  Requires-Dist: jmespath>=1.0.1
218
219
  Requires-Dist: prompt-toolkit>=3.0.50
219
- Requires-Dist: requests>=2.32.3
220
220
  Requires-Dist: rich>=13.9.4
221
+ Requires-Dist: socksio>=1.0.0
221
222
  Requires-Dist: typer>=0.15.2
222
223
  Description-Content-Type: text/markdown
223
224
 
224
- # YAICLI - Your AI Command Line Interface
225
+ # YAICLI - Your AI Interface in Command Line
225
226
 
226
227
  [![PyPI version](https://img.shields.io/pypi/v/yaicli?style=for-the-badge)](https://pypi.org/project/yaicli/)
227
228
  ![GitHub License](https://img.shields.io/github/license/belingud/yaicli?style=for-the-badge)
228
229
  ![PyPI - Downloads](https://img.shields.io/pypi/dm/yaicli?logo=pypi&style=for-the-badge)
229
230
  ![Pepy Total Downloads](https://img.shields.io/pepy/dt/yaicli?style=for-the-badge&logo=python)
230
231
 
231
- YAICLI is a powerful command-line AI assistant tool that enables you to interact with Large Language Models (LLMs) like ChatGPT's gpt-4o through your terminal. It offers multiple operation modes for everyday conversations, generating and executing shell commands, and one-shot quick queries.
232
+ YAICLI is a compact yet potent command-line AI assistant, allowing you to engage with Large Language Models (LLMs) such as ChatGPT's gpt-4o directly via your terminal. It offers multiple operation modes for everyday conversations, generating and executing shell commands, and one-shot quick queries.
233
+
234
+ Support regular and deep thinking models.
232
235
 
233
236
  > [!WARNING]
234
237
  > This is a work in progress, some features could change or be removed in the future.
@@ -257,6 +260,9 @@ YAICLI is a powerful command-line AI assistant tool that enables you to interact
257
260
  - **Keyboard Shortcuts**:
258
261
  - Tab to switch between Chat and Execute modes
259
262
 
263
+ - **History**:
264
+ - Save and recall previous queries
265
+
260
266
  ## Installation
261
267
 
262
268
  ### Prerequisites
@@ -296,6 +302,7 @@ The default configuration file is located at `~/.config/yaicli/config.ini`. Look
296
302
 
297
303
  ```ini
298
304
  [core]
305
+ PROVIDER=OPENAI
299
306
  BASE_URL=https://api.openai.com/v1
300
307
  API_KEY=your_api_key_here
301
308
  MODEL=gpt-4o
@@ -312,6 +319,10 @@ ANSWER_PATH=choices[0].message.content
312
319
  # true: streaming response
313
320
  # false: non-streaming response
314
321
  STREAM=true
322
+
323
+ TEMPERATURE=0.7
324
+ TOP_P=1.0
325
+ MAX_TOKENS=1024
315
326
  ```
316
327
 
317
328
  ### Configuration Options
@@ -327,6 +338,58 @@ Below are the available configuration options and override environment variables
327
338
  - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: AI_ANSWER_PATH
328
339
  - **STREAM**: Enable/disable streaming responses, default: true, env: AI_STREAM
329
340
 
341
+ Default config of `COMPLETION_PATH` and `ANSWER_PATH` is OpenAI compatible. If you are using OpenAI or other OpenAI compatible LLM provider, you can use the default config.
342
+
343
+ If you wish to use other providers that are not compatible with the openai interface, you can use the following config:
344
+
345
+ - claude:
346
+ - BASE_URL: https://api.anthropic.com/v1
347
+ - COMPLETION_PATH: /messages
348
+ - ANSWER_PATH: content.0.text
349
+ - cohere:
350
+ - BASE_URL: https://api.cohere.com/v2
351
+ - COMPLETION_PATH: /chat
352
+ - ANSWER_PATH: message.content.[0].text
353
+ - google:
354
+ - BASE_URL: https://generativelanguage.googleapis.com/v1beta/openai
355
+ - COMPLETION_PATH: /chat/completions
356
+ - ANSWER_PATH: choices[0].message.content
357
+
358
+ You can use google OpenAI complete endpoint and leave `COMPLETION_PATH` and `ANSWER_PATH` as default. BASE_URL: https://generativelanguage.googleapis.com/v1beta/openai. See https://ai.google.dev/gemini-api/docs/openai
359
+
360
+ Claude also has a testable OpenAI-compatible interface, you can just use Calude endpoint and leave `COMPLETION_PATH` and `ANSWER_PATH` as default. See: https://docs.anthropic.com/en/api/openai-sdk
361
+
362
+ If you not sure how to config `COMPLETION_PATH` and `ANSWER_PATH`, here is a guide:
363
+ 1. **Find the API Endpoint**:
364
+ - Visit the documentation of the LLM provider you want to use.
365
+ - Find the API endpoint for the completion task. This is usually under the "API Reference" or "Developer Documentation" section.
366
+ 2. **Identify the Response Structure**:
367
+ - Look for the structure of the response. This typically includes fields like `choices`, `completion`, etc.
368
+ 3. **Identify the Path Expression**:
369
+ Forexample, claude response structure like this:
370
+ ```json
371
+ {
372
+ "content": [
373
+ {
374
+ "text": "Hi! My name is Claude.",
375
+ "type": "text"
376
+ }
377
+ ],
378
+ "id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
379
+ "model": "claude-3-7-sonnet-20250219",
380
+ "role": "assistant",
381
+ "stop_reason": "end_turn",
382
+ "stop_sequence": null,
383
+ "type": "message",
384
+ "usage": {
385
+ "input_tokens": 2095,
386
+ "output_tokens": 503
387
+ }
388
+ }
389
+ ```
390
+ We are looking for the `text` field, so the path should be 1.Key `content`, 2.First obj `[0]`, 3.Key `text`. So it should be `content.[0].text`.
391
+
392
+
330
393
  ## Usage
331
394
 
332
395
  ### Basic Usage
@@ -456,4 +519,4 @@ Contributions of code, issue reports, or feature suggestions are welcome.
456
519
 
457
520
  ---
458
521
 
459
- *YAICLI - Making your terminal smarter*
522
+ *YAICLI - Making your terminal smarter*
@@ -0,0 +1,7 @@
1
+ pyproject.toml,sha256=X26aCrit45QBISeLMZi0zzjd4M3YOCK_fYA9L-gKmS8,1452
2
+ yaicli.py,sha256=CEQlDJrQn7UEFLpAHbNFyo-chVoFi55P1fblealDhd8,20559
3
+ yaicli-0.0.11.dist-info/METADATA,sha256=0ty4x5vHDyyVJsY5xk4yoII7I3d7d-PhoMWFSS_h-Yg,25934
4
+ yaicli-0.0.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ yaicli-0.0.11.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
6
+ yaicli-0.0.11.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
+ yaicli-0.0.11.dist-info/RECORD,,
yaicli.py CHANGED
@@ -2,14 +2,13 @@ import configparser
2
2
  import json
3
3
  import platform
4
4
  import subprocess
5
- import time
6
5
  from os import getenv
7
6
  from os.path import basename, pathsep
8
7
  from pathlib import Path
9
- from typing import Annotated, Optional
8
+ from typing import Annotated, Optional, Union
10
9
 
10
+ import httpx
11
11
  import jmespath
12
- import requests
13
12
  import typer
14
13
  from distro import name as distro_name
15
14
  from prompt_toolkit import PromptSession
@@ -30,7 +29,7 @@ Rules:
30
29
  5. Return NOTHING except the ready-to-run command"""
31
30
 
32
31
  DEFAULT_PROMPT = (
33
- "You are yaili, a system management and programing assistant, "
32
+ "You are YAICLI, a system management and programing assistant, "
34
33
  "You are managing {_os} operating system with {_shell} shell. "
35
34
  "Your responses should be concise and use Markdown format, "
36
35
  "unless the user explicitly requests more details."
@@ -54,23 +53,7 @@ DEFAULT_CONFIG_MAP = {
54
53
  "STREAM": {"value": "true", "env_key": "AI_STREAM"},
55
54
  }
56
55
 
57
- app = typer.Typer(
58
- name="yaicli",
59
- context_settings={"help_option_names": ["-h", "--help"]},
60
- pretty_exceptions_enable=False,
61
- )
62
-
63
-
64
- class CasePreservingConfigParser(configparser.RawConfigParser):
65
- """Case preserving config parser"""
66
-
67
- def optionxform(self, optionstr):
68
- return optionstr
69
-
70
-
71
- class CLI:
72
- CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
73
- DEFAULT_CONFIG_INI = """[core]
56
+ DEFAULT_CONFIG_INI = """[core]
74
57
  PROVIDER=openai
75
58
  BASE_URL=https://api.openai.com/v1
76
59
  API_KEY=
@@ -87,7 +70,28 @@ ANSWER_PATH=choices[0].message.content
87
70
 
88
71
  # true: streaming response
89
72
  # false: non-streaming response
90
- STREAM=true"""
73
+ STREAM=true
74
+
75
+ TEMPERATURE=0.7
76
+ TOP_P=1.0
77
+ MAX_TOKENS=1024"""
78
+
79
+ app = typer.Typer(
80
+ name="yaicli",
81
+ context_settings={"help_option_names": ["-h", "--help"]},
82
+ pretty_exceptions_enable=False,
83
+ )
84
+
85
+
86
+ class CasePreservingConfigParser(configparser.RawConfigParser):
87
+ """Case preserving config parser"""
88
+
89
+ def optionxform(self, optionstr):
90
+ return optionstr
91
+
92
+
93
+ class CLI:
94
+ CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
91
95
 
92
96
  def __init__(self, verbose: bool = False) -> None:
93
97
  self.verbose = verbose
@@ -123,7 +127,7 @@ STREAM=true"""
123
127
  self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
124
128
  self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
125
129
  with open(self.CONFIG_PATH, "w") as f:
126
- f.write(self.DEFAULT_CONFIG_INI)
130
+ f.write(DEFAULT_CONFIG_INI)
127
131
  else:
128
132
  # Load from configuration file (middle priority)
129
133
  config_parser = CasePreservingConfigParser()
@@ -180,10 +184,11 @@ STREAM=true"""
180
184
  example:
181
185
  ```bash\nls -la\n``` ==> ls -al
182
186
  ```zsh\nls -la\n``` ==> ls -al
183
- ```ls -al``` ==> ls -al
184
- ls -al ==> ls -al
187
+ ```ls -la``` ==> ls -la
188
+ ls -la ==> ls -la
185
189
  ```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
186
190
  ```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
191
+ ```plaintext\nls -la\n``` ==> ls -la
187
192
  """
188
193
  if not command or not command.strip():
189
194
  return ""
@@ -211,65 +216,130 @@ STREAM=true"""
211
216
  # Join the remaining lines and strip any extra whitespace
212
217
  return "\n".join(line.strip() for line in content_lines if line.strip())
213
218
 
214
- def post(self, message: list[dict[str, str]]) -> requests.Response:
219
+ def _get_type_number(self, key, _type: type, default=None):
220
+ """Get number with type from config"""
221
+ try:
222
+ return _type(self.config.get(key, default))
223
+ except ValueError:
224
+ raise ValueError(f"[red]{key} should be {_type} type.[/red]")
225
+
226
+ def post(self, message: list[dict[str, str]]) -> httpx.Response:
215
227
  """Post message to LLM API and return response"""
216
228
  url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
217
229
  body = {
218
230
  "messages": message,
219
231
  "model": self.config.get("MODEL", "gpt-4o"),
220
232
  "stream": self.config.get("STREAM", "true") == "true",
221
- "temperature": 0.7,
222
- "top_p": 1,
233
+ "temperature": self._get_type_number(key="TEMPERATURE", _type=float, default="0.7"),
234
+ "top_p": self._get_type_number(key="TOP_P", _type=float, default="1.0"),
235
+ "max_tokens": self._get_type_number(key="MAX_TOKENS", _type=int, default="1024"),
223
236
  }
224
- response = requests.post(url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"})
237
+ with httpx.Client(timeout=120.0) as client:
238
+ response = client.post(
239
+ url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"}
240
+ )
225
241
  try:
226
242
  response.raise_for_status()
227
- except requests.exceptions.HTTPError as e:
243
+ except httpx.HTTPStatusError as e:
228
244
  self.console.print(f"[red]Error calling API: {e}[/red]")
229
245
  if self.verbose:
230
- self.console.print(f"Reason: {e.response.reason}")
246
+ self.console.print(f"Reason: {e}")
231
247
  self.console.print(f"Response: {response.text}")
232
- raise typer.Exit(code=1) from None
248
+ raise e
233
249
  return response
234
250
 
235
- def _print(self, response: requests.Response, stream: bool = True) -> str:
236
- """Print response from LLM and return full completion"""
251
+ def get_reasoning_content(self, delta: dict) -> Optional[str]:
252
+ # reasoning: openrouter
253
+ # reasoning_content: infi-ai/deepseek
254
+ for k in ("reasoning_content", "reasoning"):
255
+ if k in delta:
256
+ return delta[k]
257
+ return None
258
+
259
+ def _parse_stream_line(self, line: Union[bytes, str]) -> Optional[dict]:
260
+ """Parse a single line from the stream response"""
261
+ if not line:
262
+ return None
263
+
264
+ if isinstance(line, bytes):
265
+ line = line.decode("utf-8")
266
+ if not line.startswith("data: "):
267
+ return None
268
+
269
+ line = line[6:]
270
+ if line == "[DONE]":
271
+ return None
272
+
273
+ try:
274
+ json_data = json.loads(line)
275
+ if not json_data.get("choices"):
276
+ return None
277
+
278
+ return json_data
279
+ except json.JSONDecodeError:
280
+ self.console.print("[red]Error decoding response JSON[/red]")
281
+ if self.verbose:
282
+ self.console.print(f"[red]Error JSON data: {line}[/red]")
283
+ return None
284
+
285
+ def _process_reasoning_content(self, reason: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
286
+ """Process reasoning content in the response"""
287
+ if not in_reasoning:
288
+ in_reasoning = True
289
+ full_completion = "> Reasoning:\n> "
290
+ full_completion += reason.replace("\n", "\n> ")
291
+ return full_completion, in_reasoning
292
+
293
+ def _process_regular_content(self, content: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
294
+ """Process regular content in the response"""
295
+ if in_reasoning:
296
+ in_reasoning = False
297
+ full_completion += "\n\n"
298
+ full_completion += content
299
+ return full_completion, in_reasoning
300
+
301
+ def _print_stream(self, response: httpx.Response) -> str:
302
+ """Print response from LLM in streaming mode"""
237
303
  full_completion = ""
304
+ in_reasoning = False
305
+
306
+ with Live() as live:
307
+ for line in response.iter_lines():
308
+ json_data = self._parse_stream_line(line)
309
+ if not json_data:
310
+ continue
311
+
312
+ delta = json_data["choices"][0]["delta"]
313
+ reason = self.get_reasoning_content(delta)
314
+
315
+ if reason is not None:
316
+ full_completion, in_reasoning = self._process_reasoning_content(
317
+ reason, full_completion, in_reasoning
318
+ )
319
+ else:
320
+ content = delta.get("content", "") or ""
321
+ full_completion, in_reasoning = self._process_regular_content(
322
+ content, full_completion, in_reasoning
323
+ )
324
+
325
+ live.update(Markdown(markup=full_completion), refresh=True)
326
+
327
+ return full_completion
328
+
329
+ def _print_non_stream(self, response: httpx.Response) -> str:
330
+ """Print response from LLM in non-streaming mode"""
331
+ full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
332
+ self.console.print(Markdown(full_completion))
333
+ return full_completion
334
+
335
+ def _print(self, response: httpx.Response, stream: bool = True) -> str:
336
+ """Print response from LLM and return full completion"""
238
337
  if stream:
239
- with Live() as live:
240
- for line in response.iter_lines():
241
- # Skip empty lines
242
- if not line:
243
- continue
244
-
245
- # Process server-sent events
246
- data = line.decode("utf-8")
247
- if not data.startswith("data: "):
248
- continue
249
-
250
- # Extract data portion
251
- data = data[6:]
252
- if data == "[DONE]":
253
- break
254
-
255
- # Parse JSON and update display
256
- try:
257
- json_data = json.loads(data)
258
- content = json_data["choices"][0]["delta"].get("content", "")
259
- full_completion += content
260
- live.update(Markdown(markup=full_completion), refresh=True)
261
- except json.JSONDecodeError:
262
- self.console.print("[red]Error decoding response JSON[/red]")
263
- if self.verbose:
264
- self.console.print(f"[red]Error: {data}[/red]")
265
-
266
- time.sleep(0.01)
338
+ # Streaming response
339
+ full_completion = self._print_stream(response)
267
340
  else:
268
341
  # Non-streaming response
269
- full_completion = jmespath.search(
270
- self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json()
271
- )
272
- self.console.print(Markdown(full_completion))
342
+ full_completion = self._print_non_stream(response)
273
343
  self.console.print() # Add a newline after the response to separate from the next input
274
344
  return full_completion
275
345
 
@@ -292,7 +362,13 @@ STREAM=true"""
292
362
  """Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
293
363
  # Show REPL instructions
294
364
  self._setup_key_bindings()
295
- self.console.print("[bold]Starting REPL loop[/bold]")
365
+ self.console.print("""
366
+ ██ ██ █████ ██ ██████ ██ ██
367
+ ██ ██ ██ ██ ██ ██ ██ ██
368
+ ████ ███████ ██ ██ ██ ██
369
+ ██ ██ ██ ██ ██ ██ ██
370
+ ██ ██ ██ ██ ██████ ███████ ██
371
+ """)
296
372
  self.console.print("[bold]Press TAB to change in chat and exec mode[/bold]")
297
373
  self.console.print("[bold]Type /clear to clear chat history[/bold]")
298
374
  self.console.print("[bold]Type /his to see chat history[/bold]")
@@ -328,12 +404,21 @@ STREAM=true"""
328
404
  message.append({"role": "user", "content": user_input})
329
405
 
330
406
  # Get response from LLM
331
- response = self.post(message)
407
+ try:
408
+ response = self.post(message)
409
+ except ValueError as e:
410
+ self.console.print(f"[red]Error: {e}[/red]")
411
+ return
412
+ except httpx.ConnectError as e:
413
+ self.console.print(f"[red]Error: {e}[/red]")
414
+ continue
415
+ except httpx.HTTPStatusError:
416
+ continue
332
417
  self.console.print("\n[bold green]Assistant:[/bold green]")
333
418
  try:
334
419
  content = self._print(response, stream=self.config["STREAM"] == "true")
335
420
  except Exception as e:
336
- self.console.print(f"[red]Error: {e}[/red]")
421
+ self.console.print(f"[red]Unknown Error: {e}[/red]")
337
422
  continue
338
423
 
339
424
  # Add user input and assistant response to history
@@ -355,24 +440,10 @@ STREAM=true"""
355
440
 
356
441
  self.console.print("[bold green]Exiting...[/bold green]")
357
442
 
358
- def run(self, chat: bool, shell: bool, prompt: str) -> None:
359
- """Run the CLI"""
360
- self.load_config()
361
- if not self.config.get("API_KEY"):
362
- self.console.print("[bold red]API key not set[/bold red]")
363
- self.console.print(
364
- "[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
365
- )
366
- raise typer.Exit(code=1)
443
+ def _run_once(self, prompt: str, shell: bool = False) -> None:
444
+ """Run once with given prompt"""
367
445
  _os = self.detect_os()
368
446
  _shell = self.detect_shell()
369
-
370
- # Handle chat mode
371
- if chat:
372
- self.current_mode = CHAT_MODE
373
- self._run_repl()
374
- return
375
-
376
447
  # Create appropriate system prompt based on mode
377
448
  system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
378
449
  system_content = system_prompt.format(_os=_os, _shell=_shell)
@@ -384,7 +455,14 @@ STREAM=true"""
384
455
  ]
385
456
 
386
457
  # Get response from LLM
387
- response = self.post(message)
458
+ try:
459
+ response = self.post(message)
460
+ except (ValueError, httpx.ConnectError, httpx.HTTPStatusError) as e:
461
+ self.console.print(f"[red]Error: {e}[/red]")
462
+ return
463
+ except Exception as e:
464
+ self.console.print(f"[red]Unknown Error: {e}[/red]")
465
+ return
388
466
  self.console.print("\n[bold green]Assistant:[/bold green]")
389
467
  content = self._print(response, stream=self.config["STREAM"] == "true")
390
468
 
@@ -400,6 +478,23 @@ STREAM=true"""
400
478
  if returncode != 0:
401
479
  self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
402
480
 
481
+ def run(self, chat: bool, shell: bool, prompt: str) -> None:
482
+ """Run the CLI"""
483
+ self.load_config()
484
+ if not self.config.get("API_KEY"):
485
+ self.console.print("[bold red]API key not set[/bold red]")
486
+ self.console.print(
487
+ "[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
488
+ )
489
+ raise typer.Exit(code=1)
490
+
491
+ # Handle chat mode
492
+ if chat:
493
+ self.current_mode = CHAT_MODE
494
+ self._run_repl()
495
+ else:
496
+ self._run_once(prompt, shell)
497
+
403
498
 
404
499
  @app.command()
405
500
  def main(
@@ -1,7 +0,0 @@
1
- pyproject.toml,sha256=0ri15toylKhtFhwAemgdMReD72wetPZIISWhJtpsqe8,1450
2
- yaicli.py,sha256=E6QRi9KgMBoDU-KQ1V7N1SVTAg5PIVIfCMWXLn3taE4,16877
3
- yaicli-0.0.8.dist-info/METADATA,sha256=OIArk-08C1L-hW8Hu9RfGby7PVm0G_C16_DA_peaoxM,23489
4
- yaicli-0.0.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- yaicli-0.0.8.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
6
- yaicli-0.0.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
- yaicli-0.0.8.dist-info/RECORD,,