yaicli 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.0.12"
3
+ version = "0.0.13"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  authors = [{ name = "belingud", email = "im.victor@qq.com" }]
6
6
  readme = "README.md"
@@ -46,10 +46,6 @@ Documentation = "https://github.com/belingud/yaicli"
46
46
  [project.scripts]
47
47
  ai = "yaicli:app"
48
48
 
49
- [tool.pdm.scripts]
50
- bump = "bump2version {args}"
51
- changelog = "just changelog"
52
-
53
49
  [tool.uv]
54
50
  resolution = "highest"
55
51
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.12
3
+ Version: 0.0.13
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -329,14 +329,17 @@ MAX_TOKENS=1024
329
329
 
330
330
  Below are the available configuration options and override environment variables:
331
331
 
332
- - **BASE_URL**: API endpoint URL (default: OpenAI API), env: AI_BASE_URL
333
- - **API_KEY**: Your API key for the LLM provider, env: AI_API_KEY
334
- - **MODEL**: The model to use (e.g., gpt-4o, gpt-3.5-turbo), default: gpt-4o, env: AI_MODEL
335
- - **SHELL_NAME**: Shell to use (auto for automatic detection), default: auto, env: AI_SHELL_NAME
336
- - **OS_NAME**: OS to use (auto for automatic detection), default: auto, env: AI_OS_NAME
337
- - **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions, env: AI_COMPLETION_PATH
338
- - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: AI_ANSWER_PATH
339
- - **STREAM**: Enable/disable streaming responses, default: true, env: AI_STREAM
332
+ - **BASE_URL**: API endpoint URL (default: OpenAI API), env: YAI_BASE_URL
333
+ - **API_KEY**: Your API key for the LLM provider, env: YAI_API_KEY
334
+ - **MODEL**: The model to use (e.g., gpt-4o, gpt-3.5-turbo), default: gpt-4o, env: YAI_MODEL
335
+ - **SHELL_NAME**: Shell to use (auto for automatic detection), default: auto, env: YAI_SHELL_NAME
336
+ - **OS_NAME**: OS to use (auto for automatic detection), default: auto, env: YAI_OS_NAME
337
+ - **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions, env: YAI_COMPLETION_PATH
338
+ - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: YAI_ANSWER_PATH
339
+ - **STREAM**: Enable/disable streaming responses, default: true, env: YAI_STREAM
340
+ - **TEMPERATURE**: Temperature for response generation (default: 0.7), env: YAI_TEMPERATURE
341
+ - **TOP_P**: Top-p sampling for response generation (default: 1.0), env: YAI_TOP_P
342
+ - **MAX_TOKENS**: Maximum number of tokens for response generation (default: 1024), env: YAI_MAX_TOKENS
340
343
 
341
344
  Default config of `COMPLETION_PATH` and `ANSWER_PATH` is OpenAI compatible. If you are using OpenAI or other OpenAI compatible LLM provider, you can use the default config.
342
345
 
@@ -467,10 +470,45 @@ In Execute mode:
467
470
 
468
471
  ## Examples
469
472
 
473
+ ### Have a Chat
474
+
475
+ ```bash
476
+ $ ai "What is the capital of France?"
477
+ Assistant:
478
+ The capital of France is Paris.
479
+ ```
480
+
481
+ ### Command Gen and Run
482
+
483
+ ```bash
484
+ $ ai -s 'Check the current directory size'
485
+ Assistant:
486
+ du -sh .
487
+
488
+ Generated command: du -sh .
489
+ Execute this command? [y/n/e] (n): e
490
+ Edit command, press enter to execute:
491
+ du -sh ./
492
+ Output:
493
+ 109M ./
494
+ ```
495
+
470
496
  ### Chat Mode Example
471
497
 
472
498
  ```bash
473
499
  $ ai --chat
500
+
501
+ ██ ██ █████ ██ ██████ ██ ██
502
+ ██ ██ ██ ██ ██ ██ ██ ██
503
+ ████ ███████ ██ ██ ██ ██
504
+ ██ ██ ██ ██ ██ ██ ██
505
+ ██ ██ ██ ██ ██████ ███████ ██
506
+
507
+ Press TAB to change in chat and exec mode
508
+ Type /clear to clear chat history
509
+ Type /his to see chat history
510
+ Press Ctrl+C or type /exit to exit
511
+
474
512
  💬 > Tell me about the solar system
475
513
 
476
514
  Assistant:
@@ -489,7 +527,17 @@ Certainly! Here’s a brief overview of the solar system:
489
527
  • Dwarf Planets:
490
528
  • Pluto: Once considered the ninth planet, now classified as
491
529
 
492
- 💬 >
530
+ 🚀 > Check the current directory size
531
+ Assistant:
532
+ du -sh .
533
+
534
+ Generated command: du -sh .
535
+ Execute this command? [y/n/e] (n): e
536
+ Edit command, press enter to execute:
537
+ du -sh ./
538
+ Output:
539
+ 109M ./
540
+ 🚀 >
493
541
  ```
494
542
 
495
543
  ### Execute Mode Example
@@ -0,0 +1,7 @@
1
+ pyproject.toml,sha256=15od1R0Bb-b7YKSSlz1SmzGoaNNbfHgv8y5Zr0gXfBU,1452
2
+ yaicli.py,sha256=Cby2e0HHoh7sAOIvAxEKoZA0TRS3A3ikkfZ6o3bem0o,20955
3
+ yaicli-0.0.13.dist-info/METADATA,sha256=5Yc9O8k_N66OpBTqKG9kVGUvXzj2-L3UrXaiZziWfVU,29445
4
+ yaicli-0.0.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ yaicli-0.0.13.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
6
+ yaicli-0.0.13.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
+ yaicli-0.0.13.dist-info/RECORD,,
yaicli.py CHANGED
@@ -19,7 +19,7 @@ from prompt_toolkit.keys import Keys
19
19
  from rich.console import Console
20
20
  from rich.live import Live
21
21
  from rich.markdown import Markdown
22
- from rich.prompt import Confirm
22
+ from rich.prompt import Prompt
23
23
 
24
24
  SHELL_PROMPT = """Your are a Shell Command Generator.
25
25
  Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
@@ -46,14 +46,17 @@ CHAT_MODE = "chat"
46
46
  TEMP_MODE = "temp"
47
47
 
48
48
  DEFAULT_CONFIG_MAP = {
49
- "BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "AI_BASE_URL"},
50
- "API_KEY": {"value": "", "env_key": "AI_API_KEY"},
51
- "MODEL": {"value": "gpt-4o", "env_key": "AI_MODEL"},
52
- "SHELL_NAME": {"value": "auto", "env_key": "AI_SHELL_NAME"},
53
- "OS_NAME": {"value": "auto", "env_key": "AI_OS_NAME"},
54
- "COMPLETION_PATH": {"value": "chat/completions", "env_key": "AI_COMPLETION_PATH"},
55
- "ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "AI_ANSWER_PATH"},
56
- "STREAM": {"value": "true", "env_key": "AI_STREAM"},
49
+ "BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "YAI_BASE_URL"},
50
+ "API_KEY": {"value": "", "env_key": "YAI_API_KEY"},
51
+ "MODEL": {"value": "gpt-4o", "env_key": "YAI_MODEL"},
52
+ "SHELL_NAME": {"value": "auto", "env_key": "YAI_SHELL_NAME"},
53
+ "OS_NAME": {"value": "auto", "env_key": "YAI_OS_NAME"},
54
+ "COMPLETION_PATH": {"value": "chat/completions", "env_key": "YAI_COMPLETION_PATH"},
55
+ "ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "YAI_ANSWER_PATH"},
56
+ "STREAM": {"value": "true", "env_key": "YAI_STREAM"},
57
+ "TEMPERATURE": {"value": "0.7", "env_key": "YAI_TEMPERATURE"},
58
+ "TOP_P": {"value": "1.0", "env_key": "YAI_TOP_P"},
59
+ "MAX_TOKENS": {"value": "1024", "env_key": "YAI_MAX_TOKENS"},
57
60
  }
58
61
 
59
62
  DEFAULT_CONFIG_INI = """[core]
@@ -102,10 +105,14 @@ class CLI:
102
105
  self.bindings = KeyBindings()
103
106
  self.session = PromptSession(key_bindings=self.bindings)
104
107
  self.config = {}
105
- self.history = []
108
+ self.history: list[dict[str, str]] = []
106
109
  self.max_history_length = 25
107
110
  self.current_mode = TEMP_MODE
108
111
 
112
+ def is_stream(self) -> bool:
113
+ """Check if streaming is enabled"""
114
+ return self.config["STREAM"] == "true"
115
+
109
116
  def prepare_chat_loop(self) -> None:
110
117
  """Setup key bindings and history for chat mode"""
111
118
  self._setup_key_bindings()
@@ -232,7 +239,7 @@ class CLI:
232
239
  # Join the remaining lines and strip any extra whitespace
233
240
  return "\n".join(line.strip() for line in content_lines if line.strip())
234
241
 
235
- def _get_type_number(self, key, _type: type, default=None):
242
+ def _get_number_with_type(self, key, _type: type, default=None):
236
243
  """Get number with type from config"""
237
244
  try:
238
245
  return _type(self.config.get(key, default))
@@ -245,10 +252,10 @@ class CLI:
245
252
  body = {
246
253
  "messages": message,
247
254
  "model": self.config.get("MODEL", "gpt-4o"),
248
- "stream": self.config.get("STREAM", "true") == "true",
249
- "temperature": self._get_type_number(key="TEMPERATURE", _type=float, default="0.7"),
250
- "top_p": self._get_type_number(key="TOP_P", _type=float, default="1.0"),
251
- "max_tokens": self._get_type_number(key="MAX_TOKENS", _type=int, default="1024"),
255
+ "stream": self.is_stream(),
256
+ "temperature": self._get_number_with_type(key="TEMPERATURE", _type=float, default="0.7"),
257
+ "top_p": self._get_number_with_type(key="TOP_P", _type=float, default="1.0"),
258
+ "max_tokens": self._get_number_with_type(key="MAX_TOKENS", _type=int, default="1024"),
252
259
  }
253
260
  with httpx.Client(timeout=120.0) as client:
254
261
  response = client.post(
@@ -315,6 +322,7 @@ class CLI:
315
322
 
316
323
  def _print_stream(self, response: httpx.Response) -> str:
317
324
  """Print response from LLM in streaming mode"""
325
+ self.console.print("Assistant:", style="bold green")
318
326
  full_completion = ""
319
327
  in_reasoning = False
320
328
 
@@ -332,30 +340,19 @@ class CLI:
332
340
  reason, full_completion, in_reasoning
333
341
  )
334
342
  else:
335
- content = delta.get("content", "") or ""
336
343
  full_completion, in_reasoning = self._process_regular_content(
337
- content, full_completion, in_reasoning
344
+ delta.get("content", "") or "", full_completion, in_reasoning
338
345
  )
339
346
 
340
347
  live.update(Markdown(markup=full_completion), refresh=True)
341
-
348
+ # self.console.print()
342
349
  return full_completion
343
350
 
344
- def _print_non_stream(self, response: httpx.Response) -> str:
351
+ def _print_normal(self, response: httpx.Response) -> str:
345
352
  """Print response from LLM in non-streaming mode"""
353
+ self.console.print("Assistant:", style="bold green")
346
354
  full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
347
- self.console.print(Markdown(full_completion))
348
- return full_completion
349
-
350
- def _print(self, response: httpx.Response, stream: bool = True) -> str:
351
- """Print response from LLM and return full completion"""
352
- if stream:
353
- # Streaming response
354
- full_completion = self._print_stream(response)
355
- else:
356
- # Non-streaming response
357
- full_completion = self._print_non_stream(response)
358
- self.console.print() # Add a newline after the response to separate from the next input
355
+ self.console.print(Markdown(full_completion + '\n'))
359
356
  return full_completion
360
357
 
361
358
  def get_prompt_tokens(self) -> list[tuple[str, str]]:
@@ -368,9 +365,68 @@ class CLI:
368
365
  if len(self.history) > self.max_history_length:
369
366
  self.history = self.history[-self.max_history_length :]
370
367
 
368
+ def _handle_special_commands(self, user_input: str) -> Optional[bool]:
369
+ """Handle special command return: True-continue loop, False-exit loop, None-non-special command"""
370
+ if user_input.lower() == CMD_EXIT:
371
+ return False
372
+ if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
373
+ self.history.clear()
374
+ self.console.print("Chat history cleared\n", style="bold yellow")
375
+ return True
376
+ if user_input.lower() == CMD_HISTORY:
377
+ self.console.print(self.history)
378
+ return True
379
+ return None
380
+
381
+ def _confirm_and_execute(self, content: str) -> None:
382
+ """Review, edit and execute the command"""
383
+ cmd = self._filter_command(content)
384
+ if not cmd:
385
+ self.console.print("No command generated", style="bold red")
386
+ return
387
+ self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {cmd}")
388
+ _input = Prompt.ask("Execute this command?", choices=['y', 'n', 'e'], default="n", case_sensitive=False)
389
+ if _input == 'y': # execute cmd
390
+ self.console.print("Output:", style="bold green")
391
+ subprocess.call(cmd, shell=True)
392
+ elif _input == 'e': # edit cmd
393
+ cmd = self.session.prompt("Edit command, press enter to execute:\n", key_bindings=None, default=cmd)
394
+ self.console.print("Output:", style="bold green")
395
+ subprocess.call(cmd, shell=True)
396
+
397
+ def _build_messages(self, user_input: str) -> list[dict[str, str]]:
398
+ return [
399
+ {"role": "system", "content": self.get_system_prompt()},
400
+ *self.history,
401
+ {"role": "user", "content": user_input}
402
+ ]
403
+
404
+ def _handle_llm_response(self, response: httpx.Response, user_input: str) -> str:
405
+ """Print LLM response and update history"""
406
+ content = self._print_stream(response) if self.is_stream() else self._print_normal(response)
407
+ self.history.extend([{"role": "user", "content": user_input}, {"role": "assistant", "content": content}])
408
+ self._check_history_len()
409
+ return content
410
+
411
+ def _process_user_input(self, user_input: str) -> bool:
412
+ """Process user input and generate response"""
413
+ try:
414
+ response = self.post(self._build_messages(user_input))
415
+ content = self._handle_llm_response(response, user_input)
416
+ if self.current_mode == EXEC_MODE:
417
+ self._confirm_and_execute(content)
418
+ return True
419
+ except Exception as e:
420
+ self.console.print(f"Error: {e}", style="red")
421
+ return False
422
+
423
+ def get_system_prompt(self) -> str:
424
+ """Return system prompt for current mode"""
425
+ prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
426
+ return prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
427
+
371
428
  def _run_repl(self) -> None:
372
429
  """Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
373
- # Show REPL instructions
374
430
  self.prepare_chat_loop()
375
431
  self.console.print("""
376
432
  ██ ██ █████ ██ ██████ ██ ██
@@ -379,13 +435,13 @@ class CLI:
379
435
  ██ ██ ██ ██ ██ ██ ██
380
436
  ██ ██ ██ ██ ██████ ███████ ██
381
437
  """)
382
- self.console.print("[bold]Press TAB to change in chat and exec mode[/bold]")
383
- self.console.print("[bold]Type /clear to clear chat history[/bold]")
384
- self.console.print("[bold]Type /his to see chat history[/bold]")
385
- self.console.print("[bold]Press Ctrl+C or type /exit to exit[/bold]\n")
438
+ self.console.print("Press TAB to change in chat and exec mode", style="bold")
439
+ self.console.print("Type /clear to clear chat history", style="bold")
440
+ self.console.print("Type /his to see chat history", style="bold")
441
+ self.console.print("Press Ctrl+C or type /exit to exit\n", style="bold")
386
442
 
387
443
  while True:
388
- # Get user input
444
+ self.console.print(Markdown("---"))
389
445
  user_input = self.session.prompt(self.get_prompt_tokens).strip()
390
446
  if not user_input:
391
447
  continue
@@ -402,88 +458,21 @@ class CLI:
402
458
  elif user_input.lower() == CMD_HISTORY:
403
459
  self.console.print(self.history)
404
460
  continue
405
- # Create appropriate system prompt based on mode
406
- system_prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
407
- system_content = system_prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
408
-
409
- # Create message with system prompt and history
410
- message = [{"role": "system", "content": system_content}]
411
- message.extend(self.history)
412
-
413
- # Add current user message
414
- message.append({"role": "user", "content": user_input})
415
-
416
- # Get response from LLM
417
- try:
418
- response = self.post(message)
419
- except ValueError as e:
420
- self.console.print(f"[red]Error: {e}[/red]")
421
- return
422
- except (httpx.ConnectError, httpx.HTTPStatusError) as e:
423
- self.console.print(f"[red]Error: {e}[/red]")
461
+ if not self._process_user_input(user_input):
424
462
  continue
425
- self.console.print("\n[bold green]Assistant:[/bold green]")
426
- try:
427
- content = self._print(response, stream=self.config["STREAM"] == "true")
428
- except Exception as e:
429
- self.console.print(f"[red]Unknown Error: {e}[/red]")
430
- continue
431
-
432
- # Add user input and assistant response to history
433
- self.history.append({"role": "user", "content": user_input})
434
- self.history.append({"role": "assistant", "content": content})
435
-
436
- self._check_history_len()
437
-
438
- # Handle command execution in exec mode
439
- if self.current_mode == EXEC_MODE:
440
- content = self._filter_command(content)
441
- if not content:
442
- self.console.print("[bold red]No command generated[/bold red]")
443
- continue
444
- self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
445
- if Confirm.ask("Execute this command?", default=False):
446
- subprocess.call(content, shell=True)
447
463
 
448
464
  self.console.print("[bold green]Exiting...[/bold green]")
449
465
 
450
466
  def _run_once(self, prompt: str, shell: bool = False) -> None:
451
467
  """Run once with given prompt"""
452
- _os = self.detect_os()
453
- _shell = self.detect_shell()
454
- # Create appropriate system prompt based on mode
455
- system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
456
- system_content = system_prompt.format(_os=_os, _shell=_shell)
457
-
458
- # Create message with system prompt and user input
459
- message = [
460
- {"role": "system", "content": system_content},
461
- {"role": "user", "content": prompt},
462
- ]
463
468
 
464
- # Get response from LLM
465
469
  try:
466
- response = self.post(message)
467
- except (ValueError, httpx.ConnectError, httpx.HTTPStatusError) as e:
468
- self.console.print(f"[red]Error: {e}[/red]")
469
- return
470
+ response = self.post(self._build_messages(prompt))
471
+ content = self._handle_llm_response(response, prompt)
472
+ if shell:
473
+ self._confirm_and_execute(content)
470
474
  except Exception as e:
471
- self.console.print(f"[red]Unknown Error: {e}[/red]")
472
- return
473
- self.console.print("\n[bold green]Assistant:[/bold green]")
474
- content = self._print(response, stream=self.config["STREAM"] == "true")
475
-
476
- # Handle shell mode execution
477
- if shell:
478
- content = self._filter_command(content)
479
- if not content:
480
- self.console.print("[bold red]No command generated[/bold red]")
481
- return
482
- self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
483
- if Confirm.ask("Execute this command?", default=False):
484
- returncode = subprocess.call(content, shell=True)
485
- if returncode != 0:
486
- self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
475
+ self.console.print(f"[red]Error: {e}[/red]")
487
476
 
488
477
  def run(self, chat: bool, shell: bool, prompt: str) -> None:
489
478
  """Run the CLI"""
@@ -493,12 +482,11 @@ class CLI:
493
482
  "[yellow]API key not set. Please set in ~/.config/yaicli/config.ini or AI_API_KEY env[/]"
494
483
  )
495
484
  raise typer.Exit(code=1)
496
-
497
- # Handle chat mode
498
485
  if chat:
499
486
  self.current_mode = CHAT_MODE
500
487
  self._run_repl()
501
488
  else:
489
+ self.current_mode = EXEC_MODE if shell else TEMP_MODE
502
490
  self._run_once(prompt, shell)
503
491
 
504
492
 
@@ -507,13 +495,13 @@ def main(
507
495
  ctx: typer.Context,
508
496
  prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
509
497
  chat: Annotated[
510
- bool, typer.Option("--chat", "-c", help="Start in chat mode", rich_help_panel="Run Option")
498
+ bool, typer.Option("--chat", "-c", help="Start in chat mode", rich_help_panel="Run Options")
511
499
  ] = False,
512
500
  shell: Annotated[
513
- bool, typer.Option("--shell", "-s", help="Generate and execute shell command", rich_help_panel="Run Option")
501
+ bool, typer.Option("--shell", "-s", help="Generate and execute shell command", rich_help_panel="Run Options")
514
502
  ] = False,
515
503
  verbose: Annotated[
516
- bool, typer.Option("--verbose", "-V", help="Show verbose information", rich_help_panel="Run Option")
504
+ bool, typer.Option("--verbose", "-V", help="Show verbose information", rich_help_panel="Run Options")
517
505
  ] = False,
518
506
  template: Annotated[bool, typer.Option("--template", help="Show the config template.")] = False,
519
507
  ):
@@ -1,7 +0,0 @@
1
- pyproject.toml,sha256=NZNzmBt0C_yyfCeVmJeLv05N-WyhNhI2oZCWP6A_nZo,1530
2
- yaicli.py,sha256=aYb-EIxPpT6hfQJ9gulTb1lBWaB7IanoNW4U3ZcFqi0,21263
3
- yaicli-0.0.12.dist-info/METADATA,sha256=KXtToNAbzOpGOtq7Wt-ofFj0_LM7kwvSUlrETWR8uwY,28095
4
- yaicli-0.0.12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- yaicli-0.0.12.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
6
- yaicli-0.0.12.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
- yaicli-0.0.12.dist-info/RECORD,,