yaicli 0.6.3__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.6.3"
3
+ version = "0.6.4"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  authors = [{ name = "belingud", email = "im.victor@qq.com" }]
6
6
  readme = "README.md"
@@ -42,6 +42,15 @@ keywords = [
42
42
  "anthropic",
43
43
  "groq",
44
44
  "cohere",
45
+ "huggingface",
46
+ "chatglm",
47
+ "sambanova",
48
+ "siliconflow",
49
+ "xai",
50
+ "vertexai",
51
+ "deepseek",
52
+ "modelscope",
53
+ "ollama",
45
54
  ]
46
55
  dependencies = [
47
56
  "click>=8.1.8",
@@ -70,11 +79,15 @@ all = [
70
79
  "ollama>=0.5.1",
71
80
  "cohere>=5.15.0",
72
81
  "google-genai>=1.20.0",
82
+ "huggingface-hub>=0.33.0",
73
83
  ]
74
84
  doubao = ["volcengine-python-sdk>=3.0.15"]
75
85
  ollama = ["ollama>=0.5.1"]
76
86
  cohere = ["cohere>=5.15.0"]
77
87
  gemini = ["google-genai>=1.20.0"]
88
+ huggingface = [
89
+ "huggingface-hub>=0.33.0",
90
+ ]
78
91
 
79
92
  [tool.pytest.ini_options]
80
93
  testpaths = ["tests"]
yaicli/cli.py CHANGED
@@ -267,7 +267,7 @@ class CLI:
267
267
  assistant_msg = self.chat.history[i + 1] if (i + 1) < len(self.chat.history) else None
268
268
  self.console.print(f"[dim]{i // 2 + 1}[/dim] [bold blue]User:[/bold blue] {user_msg.content}")
269
269
  if assistant_msg:
270
- md = Markdown(assistant_msg.content, code_theme=cfg["CODE_THEME"])
270
+ md = Markdown(assistant_msg.content or "", code_theme=cfg["CODE_THEME"])
271
271
  padded_md = Padding(md, (0, 0, 0, 4))
272
272
  self.console.print(" Assistant:", style="bold green")
273
273
  self.console.print(padded_md)
@@ -384,7 +384,7 @@ class CLI:
384
384
  self._check_history_len()
385
385
 
386
386
  if self.current_mode == EXEC_MODE:
387
- self._confirm_and_execute(content)
387
+ self._confirm_and_execute(content or "")
388
388
  return True
389
389
 
390
390
  def _confirm_and_execute(self, raw_content: str) -> None:
yaicli/config.py CHANGED
@@ -142,7 +142,7 @@ class Config(dict):
142
142
  if target_type is bool:
143
143
  converted_value = str2bool(raw_value)
144
144
  elif target_type in (int, float, str):
145
- converted_value = target_type(raw_value)
145
+ converted_value = target_type(raw_value) if raw_value else raw_value
146
146
  elif target_type is dict and raw_value:
147
147
  converted_value = json.loads(raw_value)
148
148
  except (ValueError, TypeError, json.JSONDecodeError) as e:
yaicli/const.py CHANGED
@@ -1,5 +1,5 @@
1
1
  try:
2
- from enum import StrEnum
2
+ from enum import StrEnum # type: ignore
3
3
  except ImportError:
4
4
  from enum import Enum
5
5
 
@@ -68,7 +68,7 @@ DEFAULT_JUSTIFY: JustifyMethod = "default"
68
68
  DEFAULT_ROLE_MODIFY_WARNING: BOOL_STR = "true"
69
69
  DEFAULT_ENABLE_FUNCTIONS: BOOL_STR = "true"
70
70
  DEFAULT_SHOW_FUNCTION_OUTPUT: BOOL_STR = "true"
71
- DEFAULT_REASONING_EFFORT: Optional[Literal["low", "high", "medium"]] = ""
71
+ DEFAULT_REASONING_EFFORT: Optional[Literal["low", "high", "medium"]] = None
72
72
 
73
73
 
74
74
  SHELL_PROMPT = """You are YAICLI, a shell command generator.
yaicli/llms/provider.py CHANGED
@@ -43,10 +43,13 @@ class ProviderFactory:
43
43
  "chatglm": (".providers.chatglm_provider", "ChatglmProvider"),
44
44
  "chutes": (".providers.chutes_provider", "ChutesProvider"),
45
45
  "cohere": (".providers.cohere_provider", "CohereProvider"),
46
+ "cohere-bedrock": (".providers.cohere_provider", "CohereBadrockProvider"),
47
+ "cohere-sagemaker": (".providers.cohere_provider", "CohereSagemakerProvider"),
46
48
  "deepseek": (".providers.deepseek_provider", "DeepSeekProvider"),
47
49
  "doubao": (".providers.doubao_provider", "DoubaoProvider"),
48
50
  "gemini": (".providers.gemini_provider", "GeminiProvider"),
49
51
  "groq": (".providers.groq_provider", "GroqProvider"),
52
+ "huggingface": (".providers.huggingface_provider", "HuggingFaceProvider"),
50
53
  "infini-ai": (".providers.infiniai_provider", "InfiniAIProvider"),
51
54
  "minimax": (".providers.minimax_provider", "MinimaxProvider"),
52
55
  "modelscope": (".providers.modelscope_provider", "ModelScopeProvider"),
@@ -1,5 +1,5 @@
1
1
  import json
2
- from typing import Any, Dict, Generator, Optional
2
+ from typing import Generator, Optional
3
3
 
4
4
  from openai._streaming import Stream
5
5
  from openai.types.chat.chat_completion import ChatCompletion, Choice
@@ -14,10 +14,14 @@ class ChatglmProvider(OpenAIProvider):
14
14
 
15
15
  DEFAULT_BASE_URL = "https://open.bigmodel.cn/api/paas/v4/"
16
16
 
17
- def get_completion_params(self) -> Dict[str, Any]:
18
- params = super().get_completion_params()
19
- params["max_tokens"] = params.pop("max_completion_tokens")
20
- return params
17
+ COMPLETION_PARAMS_KEYS = {
18
+ "model": "MODEL",
19
+ "temperature": "TEMPERATURE",
20
+ "top_p": "TOP_P",
21
+ "max_tokens": "MAX_TOKENS",
22
+ "do_sample": "DO_SAMPLE",
23
+ "extra_body": "EXTRA_BODY",
24
+ }
21
25
 
22
26
  def _handle_normal_response(self, response: ChatCompletion) -> Generator[LLMResponse, None, None]:
23
27
  """Handle normal (non-streaming) response
@@ -10,7 +10,8 @@ This module implements Cohere provider classes for different deployment options:
10
10
  from typing import Any, Dict, Generator, List, Optional
11
11
 
12
12
  from cohere import BedrockClientV2, ClientV2, SagemakerClientV2
13
- from cohere.types.tool_call_v2 import ToolCallV2, ToolCallV2Function
13
+ from cohere.types.tool_call_v2 import ToolCallV2
14
+ from cohere.types.tool_call_v2function import ToolCallV2Function
14
15
 
15
16
  from ...config import cfg
16
17
  from ...console import get_console
@@ -179,7 +180,9 @@ class CohereProvider(Provider):
179
180
  continue
180
181
  elif chunk.type == "tool-call-delta":
181
182
  # Tool call arguments being generated: cohere.types.chat_tool_call_delta_event_delta_message.ChatToolCallDeltaEventDeltaMessage
182
- tool_call.arguments += chunk.delta.message.tool_calls.function.arguments
183
+ if not tool_call:
184
+ continue
185
+ tool_call.arguments += chunk.delta.message.tool_calls.function.arguments or ""
183
186
  # Waiting for tool-call-end event
184
187
  continue
185
188
 
@@ -292,7 +295,7 @@ class CohereBadrockProvider(CohereProvider):
292
295
  return self.CLIENT_CLS(**self.client_params)
293
296
 
294
297
 
295
- class CohereSagemaker(CohereBadrockProvider):
298
+ class CohereSagemakerProvider(CohereBadrockProvider):
296
299
  """Cohere provider for AWS Sagemaker integration"""
297
300
 
298
301
  CLIENT_CLS = SagemakerClientV2
@@ -10,5 +10,6 @@ class DeepSeekProvider(OpenAIProvider):
10
10
 
11
11
  def get_completion_params(self) -> Dict[str, Any]:
12
12
  params = super().get_completion_params()
13
- params["max_tokens"] = params.pop("max_completion_tokens")
13
+ if "max_completion_tokens" in params:
14
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
15
  return params
@@ -88,7 +88,9 @@ class GeminiProvider(Provider):
88
88
  content = types.Content(role=self._map_role(msg.role), parts=[types.Part(text=msg.content)])
89
89
  if msg.role == "tool":
90
90
  content.role = "user"
91
- content.parts = [types.Part.from_function_response(name=msg.name, response={"result": msg.content})]
91
+ content.parts = [
92
+ types.Part.from_function_response(name=msg.name or "", response={"result": msg.content})
93
+ ]
92
94
  converted_messages.append(content)
93
95
  return converted_messages
94
96
 
@@ -137,14 +139,14 @@ class GeminiProvider(Provider):
137
139
  self.console.print(gemini_messages)
138
140
  chat_config = self.get_chat_config()
139
141
  chat_config.system_instruction = messages[0].content
140
- chat = self.client.chats.create(model=self.config["MODEL"], history=gemini_messages, config=chat_config)
142
+ chat = self.client.chats.create(model=self.config["MODEL"], history=gemini_messages, config=chat_config) # type: ignore
141
143
  message = messages[-1].content
142
144
 
143
145
  if stream:
144
- response = chat.send_message_stream(message=message)
146
+ response = chat.send_message_stream(message=message) # type: ignore
145
147
  yield from self._handle_stream_response(response)
146
148
  else:
147
- response = chat.send_message(message=message)
149
+ response = chat.send_message(message=message) # type: ignore
148
150
  yield from self._handle_normal_response(response)
149
151
 
150
152
  def _handle_normal_response(self, response) -> Generator[LLMResponse, None, None]:
@@ -158,7 +160,7 @@ class GeminiProvider(Provider):
158
160
  return
159
161
  for part in response.candidates[0].content.parts:
160
162
  if part.thought:
161
- yield LLMResponse(reasoning=part.text, content=None, finish_reason="stop")
163
+ yield LLMResponse(reasoning=part.text, finish_reason="stop")
162
164
  else:
163
165
  yield LLMResponse(reasoning=None, content=part.text, finish_reason="stop")
164
166
 
@@ -181,7 +183,7 @@ class GeminiProvider(Provider):
181
183
  reasoning = None
182
184
  yield LLMResponse(
183
185
  reasoning=reasoning,
184
- content=content,
186
+ content=content or "",
185
187
  tool_call=tool_call if finish_reason == "tool_calls" else None,
186
188
  finish_reason=finish_reason or None,
187
189
  )
@@ -0,0 +1,40 @@
1
+ from typing import Any, Dict
2
+
3
+ from huggingface_hub import InferenceClient
4
+
5
+ from .chatglm_provider import ChatglmProvider
6
+
7
+
8
+ class HuggingFaceProvider(ChatglmProvider):
9
+ """
10
+ HuggingFaceProvider is a provider for the HuggingFace API.
11
+ """
12
+
13
+ CLIENT_CLS = InferenceClient
14
+ DEFAULT_PROVIDER = "hf-inference"
15
+
16
+ COMPLETION_PARAMS_KEYS = {
17
+ "model": "MODEL",
18
+ "temperature": "TEMPERATURE",
19
+ "top_p": "TOP_P",
20
+ "max_tokens": "MAX_TOKENS",
21
+ "extra_body": "EXTRA_BODY",
22
+ }
23
+
24
+ def get_client_params(self) -> Dict[str, Any]:
25
+ client_params = {
26
+ "api_key": self.config["API_KEY"],
27
+ "timeout": self.config["TIMEOUT"],
28
+ "provider": self.config.get("HF_PROVIDER") or self.DEFAULT_PROVIDER,
29
+ }
30
+ if self.config["BASE_URL"]:
31
+ client_params["base_url"] = self.config["BASE_URL"]
32
+ if self.config["EXTRA_HEADERS"]:
33
+ client_params["headers"] = {
34
+ **self.config["EXTRA_HEADERS"],
35
+ "X-Title": self.APP_NAME,
36
+ "HTTP-Referer": self.APP_REFERER,
37
+ }
38
+ if self.config.get("BILL_TO"):
39
+ client_params["bill_to"] = self.config["BILL_TO"]
40
+ return client_params
@@ -1,5 +1,6 @@
1
1
  from typing import Any, Dict
2
2
 
3
+ from ...config import cfg
3
4
  from .openai_provider import OpenAIProvider
4
5
 
5
6
 
@@ -8,7 +9,7 @@ class InfiniAIProvider(OpenAIProvider):
8
9
 
9
10
  DEFAULT_BASE_URL = "https://cloud.infini-ai.com/maas/v1"
10
11
 
11
- def __init__(self, config: dict = ..., **kwargs):
12
+ def __init__(self, config: dict = cfg, **kwargs):
12
13
  super().__init__(config, **kwargs)
13
14
  if self.enable_function:
14
15
  self.console.print("InfiniAI does not support functions, disabled", style="yellow")
@@ -16,5 +17,6 @@ class InfiniAIProvider(OpenAIProvider):
16
17
 
17
18
  def get_completion_params(self) -> Dict[str, Any]:
18
19
  params = super().get_completion_params()
19
- params["max_tokens"] = params.pop("max_completion_tokens")
20
+ if "max_completion_tokens" in params:
21
+ params["max_tokens"] = params.pop("max_completion_tokens")
20
22
  return params
@@ -10,5 +10,6 @@ class ModelScopeProvider(OpenAIProvider):
10
10
 
11
11
  def get_completion_params(self) -> Dict[str, Any]:
12
12
  params = super().get_completion_params()
13
- params["max_tokens"] = params.pop("max_completion_tokens")
13
+ if "max_completion_tokens" in params:
14
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
15
  return params
@@ -19,7 +19,7 @@ class OpenAIProvider(Provider):
19
19
  DEFAULT_BASE_URL = "https://api.openai.com/v1"
20
20
  CLIENT_CLS = openai.OpenAI
21
21
  # Base mapping between config keys and API parameter names
22
- _BASE_COMPLETION_PARAMS_KEYS = {
22
+ COMPLETION_PARAMS_KEYS = {
23
23
  "model": "MODEL",
24
24
  "temperature": "TEMPERATURE",
25
25
  "top_p": "TOP_P",
@@ -69,7 +69,7 @@ class OpenAIProvider(Provider):
69
69
  Returns:
70
70
  Dict[str, str]: Mapping from API parameter names to config keys
71
71
  """
72
- return self._BASE_COMPLETION_PARAMS_KEYS.copy()
72
+ return self.COMPLETION_PARAMS_KEYS.copy()
73
73
 
74
74
  def get_completion_params(self) -> Dict[str, Any]:
75
75
  """
@@ -89,7 +89,7 @@ class OpenAIProvider(Provider):
89
89
  """Convert a list of ChatMessage objects to a list of OpenAI message dicts."""
90
90
  converted_messages = []
91
91
  for msg in messages:
92
- message = {"role": msg.role, "content": msg.content or ""}
92
+ message: Dict[str, Any] = {"role": msg.role, "content": msg.content or ""}
93
93
 
94
94
  if msg.name:
95
95
  message["name"] = msg.name
@@ -10,5 +10,6 @@ class SiliconFlowProvider(OpenAIProvider):
10
10
 
11
11
  def get_completion_params(self) -> Dict[str, Any]:
12
12
  params = super().get_completion_params()
13
- params["max_tokens"] = params.pop("max_completion_tokens")
13
+ if "max_completion_tokens" in params:
14
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
15
  return params
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.6.3
3
+ Version: 0.6.4
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -208,7 +208,7 @@ License: Apache License
208
208
  See the License for the specific language governing permissions and
209
209
  limitations under the License.
210
210
  License-File: LICENSE
211
- Keywords: ai,ai-assistant,ai-chat,ai-interaction,anthropic,chatgpt,claude,cli,cohere,command-line,completion,console-application,conversation,gemini,gpt,groq,inference,interactive,language-model,llm,llms,mistral,nlp,openai,prompt,python-tool,shell-integration,terminal,terminal-interface,text-generation
211
+ Keywords: ai,ai-assistant,ai-chat,ai-interaction,anthropic,chatglm,chatgpt,claude,cli,cohere,command-line,completion,console-application,conversation,deepseek,gemini,gpt,groq,huggingface,inference,interactive,language-model,llm,llms,mistral,modelscope,nlp,ollama,openai,prompt,python-tool,sambanova,shell-integration,siliconflow,terminal,terminal-interface,text-generation,vertexai,xai
212
212
  Classifier: License :: OSI Approved :: Apache Software License
213
213
  Classifier: Operating System :: OS Independent
214
214
  Classifier: Programming Language :: Python :: 3
@@ -226,6 +226,7 @@ Requires-Dist: typer>=0.16.0
226
226
  Provides-Extra: all
227
227
  Requires-Dist: cohere>=5.15.0; extra == 'all'
228
228
  Requires-Dist: google-genai>=1.20.0; extra == 'all'
229
+ Requires-Dist: huggingface-hub>=0.33.0; extra == 'all'
229
230
  Requires-Dist: ollama>=0.5.1; extra == 'all'
230
231
  Requires-Dist: volcengine-python-sdk>=3.0.15; extra == 'all'
231
232
  Provides-Extra: cohere
@@ -234,6 +235,8 @@ Provides-Extra: doubao
234
235
  Requires-Dist: volcengine-python-sdk>=3.0.15; extra == 'doubao'
235
236
  Provides-Extra: gemini
236
237
  Requires-Dist: google-genai>=1.20.0; extra == 'gemini'
238
+ Provides-Extra: huggingface
239
+ Requires-Dist: huggingface-hub>=0.33.0; extra == 'huggingface'
237
240
  Provides-Extra: ollama
238
241
  Requires-Dist: ollama>=0.5.1; extra == 'ollama'
239
242
  Description-Content-Type: text/markdown
@@ -326,14 +329,8 @@ Yaicli has several optional dependencies group, you can copy below commands to i
326
329
  # install all denpendencies
327
330
  pip install 'yaicli[all]'
328
331
 
329
- # install with ollama support
330
- pip instsall 'yaicli[ollama]'
331
-
332
- # install with cohere support
333
- pip install 'yaicli[cohere]'
334
-
335
- # install with doubao support
336
- pip install 'yaicli[doubao]'
332
+ # install with specific provider support
333
+ pip instsall 'yaicli[ollama,cohere,doubao,huggingface,gemini]'
337
334
  ```
338
335
 
339
336
  Install by `uv`.
@@ -342,14 +339,8 @@ Install by `uv`.
342
339
  # install all denpendencies
343
340
  uv tool install 'yaicli[all]'
344
341
 
345
- # install with ollama support
346
- uv tool instsall 'yaicli[ollama]'
347
-
348
- # install with cohere support
349
- uv tool install 'yaicli[cohere]'
350
-
351
- # install with doubao support
352
- uv tool install 'yaicli[doubao]'
342
+ # install with specific provider support
343
+ uv tool instsall 'yaicli[ollama,cohere,doubao,huggingface,gemini]'
353
344
  ```
354
345
 
355
346
  ### Install from Source
@@ -360,6 +351,31 @@ cd yaicli
360
351
  pip install .
361
352
  ```
362
353
 
354
+ ## Buildin Supported Providers
355
+
356
+ - AI21
357
+ - Chatglm
358
+ - Chuts
359
+ - Cohere
360
+ - Cohere Badrock
361
+ - Cohere Sagemaker
362
+ - Deepseek
363
+ - Doubao
364
+ - Gemini
365
+ - Vertex AI
366
+ - Groq
367
+ - Huggingface
368
+ - Minimax
369
+ - ModelScope
370
+ - Ollama
371
+ - Openai
372
+ - Sambanova
373
+ - Siliconflow
374
+ - Targon
375
+ - X AI
376
+ - Yi
377
+ - Unlimited OpenAI-compatible providers
378
+
363
379
  ## ⚙️ Configuration
364
380
 
365
381
  YAICLI uses a simple configuration file to store your preferences and API keys.
@@ -497,6 +513,15 @@ API_KEY=
497
513
  MODEL=gpt-4o
498
514
  ```
499
515
 
516
+ Extra params:
517
+
518
+ ```ini
519
+ # REASONING_EFFORT: [high, midium, low]
520
+ REASONING_EFFORT=
521
+ ```
522
+
523
+ See official for more details: https://platform.openai.com/docs/guides/reasoning?api-mode=chat
524
+
500
525
  #### Deepseek
501
526
 
502
527
  ```ini
@@ -513,6 +538,48 @@ API_KEY=
513
538
  MODEL=deepseek/deepseek-chat-v3-0324
514
539
  ```
515
540
 
541
+ #### Gemini
542
+
543
+ Basic config:
544
+
545
+ ```ini
546
+ PROVIDER=gemini
547
+ API_KEY=
548
+ MODEL=gemini-2.5-flash
549
+ ```
550
+
551
+ Extra params:
552
+
553
+ ```ini
554
+ TOP_K=
555
+ PRESENCE_PENALTY=
556
+ FREQUENCY_PENALTY=
557
+ SEED=
558
+ THINKING_BUDGET=
559
+ API_VERSION=
560
+ BASE_URL=
561
+ ```
562
+
563
+ #### Vertex AI
564
+
565
+ ```ini
566
+ PROVIDER=vertexai
567
+ MODEL=gemini-2.5-flash
568
+ PROJECT=
569
+ LOCATION=
570
+ ```
571
+
572
+ #### Huggingface
573
+
574
+ ```ini
575
+ HF_PROVIDER=sambanova
576
+ PROVIDER=huggingface
577
+ API_KEY=
578
+ MODEL=deepseek-ai/DeepSeek-R1-0528
579
+ ```
580
+
581
+ See official docs for `HF_PROVIDER`: https://huggingface.co/docs/inference-providers/index
582
+
516
583
  #### Groq
517
584
 
518
585
  ```ini
@@ -537,6 +604,15 @@ API_KEY=
537
604
  MODEL=glm-4-plus
538
605
  ```
539
606
 
607
+ Extra params:
608
+
609
+ Check offcial docs: https://bigmodel.cn/dev/api/normal-model/glm-4
610
+
611
+ ```ini
612
+ # true or false
613
+ DO_SAMPLE=
614
+ ```
615
+
540
616
  #### Chutes
541
617
 
542
618
  ```ini
@@ -561,6 +637,16 @@ API_KEY=
561
637
  MODEL=DeepSeek-V3-0324
562
638
  ```
563
639
 
640
+ Only a few models support tool call as below:
641
+
642
+ - Meta-Llama-3.1-8B-Instruct
643
+ - Meta-Llama-3.1-405B-Instruct
644
+ - Meta-Llama-3.3-70B-Instruct
645
+ - Llama-4-Scout-17B-16E-Instruct
646
+ - DeepSeek-V3-0324
647
+
648
+ See official docs for more detail: https://docs.sambanova.ai/cloud/docs/capabilities/function-calling
649
+
564
650
  #### ModelScope
565
651
 
566
652
  ```ini
@@ -604,6 +690,45 @@ API_KEY=
604
690
  MODEL=command-a-03-2025
605
691
  ```
606
692
 
693
+ Check official docs: https://docs.cohere.com/docs/text-gen-quickstart
694
+
695
+ Support keys:
696
+
697
+ ```ini
698
+ ENVIRONMENT=
699
+ ```
700
+
701
+ For private deploy and Azure api, you need to set BASE_URL.
702
+
703
+ ```ini
704
+ PROVIDER=cohere
705
+ API_KEY=
706
+ MODEL=command-a-03-2025
707
+ BASE_URL=<YOUR_ENDPOINT>
708
+ ```
709
+
710
+ For Bedrock and Sagemaker cohere api, you have to set below keys:
711
+
712
+ See https://docs.cohere.com/docs/text-gen-quickstart.
713
+
714
+ ```ini
715
+ PROVIDER=cohere-bedrock
716
+ ; PROVIDER=cohere-sagemaker
717
+ API_KEY=
718
+ MODEL=command-a-03-2025
719
+
720
+ AWS_REGION=xx
721
+ AWS_ACCESS_KEY_ID=xx
722
+ AWS_SECRET_ACCESS_KEY=xx
723
+ AWS_SESSION_TOKEN=xx
724
+ ```
725
+
726
+ Note `MODEL` for Sagemaker should be endpoint name
727
+
728
+ ```ini
729
+ MODEL=<YOUR_ENDPOINT_NAME>
730
+ ```
731
+
607
732
  #### Doubao
608
733
 
609
734
  You have to install doubao dependencies, `pip install 'yaicli[doubao]'`
@@ -1,10 +1,10 @@
1
- pyproject.toml,sha256=BfvXPlqqvIqhYBItAj3HclRLukitrn0kdwIYxdUJBgU,2531
1
+ pyproject.toml,sha256=LQv7NHuPZjn7h03OWDzftK8V0G_OG0626EkpVEUh4IA,2756
2
2
  yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  yaicli/chat.py,sha256=_emvZEdgMBth2nQGaNWPf0P45oW2k3bpuIwqsxFcM5A,13676
4
- yaicli/cli.py,sha256=Eu1CL9ZB8ElvXqpHqmoWoasC0Brc7-j_zd3RmPhdSEE,23310
5
- yaicli/config.py,sha256=HrWYcelLXE61XX719eVcuuo3292xxf1BNQznWdvjQFQ,6535
4
+ yaicli/cli.py,sha256=s8Bj4MSQmLblh4fHBPKS-DvJoAdMmp64KC7e7BwzmOs,23322
5
+ yaicli/config.py,sha256=_xLHgyW5dDg76bL1SyTcrQCnVs4dFpXafIS3sClshK0,6563
6
6
  yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
7
- yaicli/const.py,sha256=Uvdm1rc5zhjE2r9ioCYiSzhk8cT4mfgO2Mm4mNs71Nk,8176
7
+ yaicli/const.py,sha256=G-EhMsSfOjKQLBhpOAc3pRtjvKmmWrwyyuyuGKHt7wk,8194
8
8
  yaicli/entry.py,sha256=Q1eqLE7tcHide7ooyPO7OCJpKE2YVuxR-NNFA2Pt2Hw,8693
9
9
  yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
10
10
  yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
@@ -18,29 +18,30 @@ yaicli/functions/__init__.py,sha256=_FJooQ9GkijG8xLwuU0cr5GBrGnC9Nc6bnCeUjrsT0k,
18
18
  yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
19
19
  yaicli/llms/__init__.py,sha256=x78cJujrJkelXPnzHS6pzHkITZdgLYZqJMnrMHbptoc,134
20
20
  yaicli/llms/client.py,sha256=mkE9KHSuPcJfpNQXbzF2YXGkel3jrOW8KfQ3YYpaK4M,4453
21
- yaicli/llms/provider.py,sha256=YhX6RcMQqhac4EoQTY_AXDm-jtHYfH_K7Jikqvpc3H8,3159
21
+ yaicli/llms/provider.py,sha256=jF15kmY_tZVOjlw0fbHQkEvlmOX57-HBhILzG0KvXyo,3412
22
22
  yaicli/llms/providers/ai21_provider.py,sha256=SvgGj9_87KEqmxCMLbtsSkT8J3rUD7Mb21UF7pMWsks,3035
23
- yaicli/llms/providers/chatglm_provider.py,sha256=1xP4KVAi6SDKZ-lMi2wdzywtDydsTf6jDzh3jBBGMfA,6437
23
+ yaicli/llms/providers/chatglm_provider.py,sha256=QEzALvY5FBhuDCx6rHjLi7GSRTwTHNUwo8gg4FEdrxs,6466
24
24
  yaicli/llms/providers/chutes_provider.py,sha256=mtvWvRRfHPH3JFfzym87wXtPNiMpLnur3805N9acx7E,882
25
- yaicli/llms/providers/cohere_provider.py,sha256=hc6vQxbCHz9kM2tNKK-kGkuOf4-gkskXW9ctr9V4Cxk,10837
26
- yaicli/llms/providers/deepseek_provider.py,sha256=VjGes_jFin5WGYNFxYKMoHwgAQX_eYbYhQKfjeh-9eI,438
25
+ yaicli/llms/providers/cohere_provider.py,sha256=1UPzNqNOwM4_dsP4kvUaL9O6_bKjxm1lO6A0lM7hgS4,10959
26
+ yaicli/llms/providers/deepseek_provider.py,sha256=iIV97x2ZCcwhGkshc8wpRi-YAnAnmo0n-YRegPlaOwQ,488
27
27
  yaicli/llms/providers/doubao_provider.py,sha256=4eOdE91ITUn3uo3mvYAzdrHsuFIIBwZWib21mtZn8OY,1938
28
- yaicli/llms/providers/gemini_provider.py,sha256=iCRDqHRBFeTD_2NQwlsAlxFU7cKK4iyjimaPSp4VySM,7923
28
+ yaicli/llms/providers/gemini_provider.py,sha256=k_6JFmqiYPz5K8IioFic5tp8KAHgeeakjkPyqJVz8BI,8007
29
29
  yaicli/llms/providers/groq_provider.py,sha256=EiS1Yxw5jbAUBFCRYsJ57KYgZPk6oH-_gD72OfW8Oik,1358
30
- yaicli/llms/providers/infiniai_provider.py,sha256=1dseUIZiXsxYRATRtk_obFclyXMwi4glsP7l_tVtnv8,710
30
+ yaicli/llms/providers/huggingface_provider.py,sha256=vDJyyK_aOlvktNvs-cji6pDtmKEp61vuVJ783BZw4pc,1247
31
+ yaicli/llms/providers/infiniai_provider.py,sha256=8-nU6QE58PRoZL9b_HzbPp4yi6OGm7rXtfi9z7bJMOg,786
31
32
  yaicli/llms/providers/minimax_provider.py,sha256=W-j3dzrYMEv14bYt2pCPvPUxvxsUs-iMAcGB9yXakFs,744
32
- yaicli/llms/providers/modelscope_provider.py,sha256=BzBhYixiDEWB7gujQ0rcG__7nsv0psJRxdtYCYXBhdM,454
33
+ yaicli/llms/providers/modelscope_provider.py,sha256=qWM0T7r0Zf8k3pLzjj7_IFdnmnx7S3rJO0f9rRm8-_A,504
33
34
  yaicli/llms/providers/ollama_provider.py,sha256=pjpYjfnHWnExweZi1KGbT07JGkcxzKPhqICo8dD82D0,6967
34
- yaicli/llms/providers/openai_provider.py,sha256=yl1vVKt8QzbN_dbsW_9rY8S_xkXI3Bo3Of4Cf7W3mJc,10075
35
+ yaicli/llms/providers/openai_provider.py,sha256=ENn21QacP2iTcmbxuW7dgiw3_fUr8EGWhNSFR2yxjis,10079
35
36
  yaicli/llms/providers/openrouter_provider.py,sha256=R-7FrUrCAKPZ3gbnuo0M6rPlVw1mvSBjbLGs_FtZWM0,732
36
37
  yaicli/llms/providers/sambanova_provider.py,sha256=FFLrsvARt1UPAFWWgiuB6zvGzGKdtehKL58HdE1fo_M,2254
37
- yaicli/llms/providers/siliconflow_provider.py,sha256=7Ir73me9jGMO5TAZDjrAbX7tbb_QBmLjTGywY0yliqc,446
38
+ yaicli/llms/providers/siliconflow_provider.py,sha256=CW2VSt6evUyFy21vN84Nvmw1P0JpmHBLznsgiXMnHM0,496
38
39
  yaicli/llms/providers/targon_provider.py,sha256=RQ808eS9lvsyvlzyKaQYcN0NimbpoNWgjHUzY1gLNs4,717
39
40
  yaicli/llms/providers/vertexai_provider.py,sha256=_ddrse1LfXRChTgkvxUlexyfJlfr0sVJH-Rmno3djSI,636
40
41
  yaicli/llms/providers/xai_provider.py,sha256=Q6iOvJZOXIAwRiiHMKEBgq8-W6SGVZ9QD1_532bNYfo,199
41
42
  yaicli/llms/providers/yi_provider.py,sha256=EnTm9qTxHPnzERsKqgGnzRIVhXFcAEdYqtOra65pGmY,719
42
- yaicli-0.6.3.dist-info/METADATA,sha256=EfU2thy5G2Ge-BaCM3RT0quP2YUPz0bBWLJjKJugf_w,53677
43
- yaicli-0.6.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
- yaicli-0.6.3.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
45
- yaicli-0.6.3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
46
- yaicli-0.6.3.dist-info/RECORD,,
43
+ yaicli-0.6.4.dist-info/METADATA,sha256=q1r4B-AADLIC9gAYEDd4BDHnAMnbjqDXrrOG3QNxbGc,55786
44
+ yaicli-0.6.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
45
+ yaicli-0.6.4.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
46
+ yaicli-0.6.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
47
+ yaicli-0.6.4.dist-info/RECORD,,
File without changes