yaicli 0.7.0__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +7 -1
- yaicli/const.py +2 -1
- yaicli/exceptions.py +4 -0
- yaicli/llms/client.py +8 -13
- yaicli/llms/provider.py +31 -1
- yaicli/llms/providers/ai21_provider.py +9 -14
- yaicli/llms/providers/chutes_provider.py +8 -17
- yaicli/llms/providers/deepseek_provider.py +8 -8
- yaicli/llms/providers/groq_provider.py +21 -13
- yaicli/llms/providers/infiniai_provider.py +8 -8
- yaicli/llms/providers/minimax_provider.py +8 -14
- yaicli/llms/providers/mistral_provider.py +217 -0
- yaicli/llms/providers/modelscope_provider.py +8 -8
- yaicli/llms/providers/ollama_provider.py +3 -2
- yaicli/llms/providers/openai_provider.py +12 -27
- yaicli/llms/providers/openrouter_provider.py +8 -15
- yaicli/llms/providers/sambanova_provider.py +8 -17
- yaicli/llms/providers/siliconflow_provider.py +8 -8
- yaicli/llms/providers/targon_provider.py +8 -15
- yaicli/llms/providers/yi_provider.py +8 -15
- yaicli/tools/__init__.py +7 -23
- yaicli/tools/mcp.py +8 -0
- yaicli/utils.py +6 -0
- {yaicli-0.7.0.dist-info → yaicli-0.7.2.dist-info}/METADATA +14 -1
- yaicli-0.7.2.dist-info/RECORD +50 -0
- yaicli-0.7.0.dist-info/RECORD +0 -49
- {yaicli-0.7.0.dist-info → yaicli-0.7.2.dist-info}/WHEEL +0 -0
- {yaicli-0.7.0.dist-info → yaicli-0.7.2.dist-info}/entry_points.txt +0 -0
- {yaicli-0.7.0.dist-info → yaicli-0.7.2.dist-info}/licenses/LICENSE +0 -0
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yaicli"
|
3
|
-
version = "0.7.
|
3
|
+
version = "0.7.2"
|
4
4
|
description = "A simple CLI tool to interact with LLM"
|
5
5
|
authors = [{ name = "belingud", email = "im.victor@qq.com" }]
|
6
6
|
readme = "README.md"
|
@@ -55,6 +55,7 @@ keywords = [
|
|
55
55
|
dependencies = [
|
56
56
|
"click>=8.1.8",
|
57
57
|
"distro>=1.9.0",
|
58
|
+
"fastmcp>=2.9.2",
|
58
59
|
"httpx>=0.28.1",
|
59
60
|
"instructor>=1.7.9",
|
60
61
|
"json-repair>=0.44.1",
|
@@ -80,6 +81,7 @@ all = [
|
|
80
81
|
"cohere>=5.15.0",
|
81
82
|
"google-genai>=1.20.0",
|
82
83
|
"huggingface-hub>=0.33.0",
|
84
|
+
"mistralai>=1.8.2",
|
83
85
|
]
|
84
86
|
doubao = ["volcengine-python-sdk>=3.0.15"]
|
85
87
|
ollama = ["ollama>=0.5.1"]
|
@@ -88,6 +90,9 @@ gemini = ["google-genai>=1.20.0"]
|
|
88
90
|
huggingface = [
|
89
91
|
"huggingface-hub>=0.33.0",
|
90
92
|
]
|
93
|
+
mistral = [
|
94
|
+
"mistralai>=1.8.2",
|
95
|
+
]
|
91
96
|
|
92
97
|
[tool.pytest.ini_options]
|
93
98
|
testpaths = ["tests"]
|
@@ -111,6 +116,7 @@ dev = [
|
|
111
116
|
"pytest-cov>=6.1.1",
|
112
117
|
"ruff>=0.11.2",
|
113
118
|
"tox>=4.27.0",
|
119
|
+
"tox-uv>=1.26.1",
|
114
120
|
]
|
115
121
|
|
116
122
|
[tool.isort]
|
yaicli/const.py
CHANGED
@@ -69,7 +69,8 @@ DEFAULT_JUSTIFY: JustifyMethod = "default"
|
|
69
69
|
DEFAULT_ROLE_MODIFY_WARNING: BOOL_STR = "true"
|
70
70
|
DEFAULT_ENABLE_FUNCTIONS: BOOL_STR = "true"
|
71
71
|
DEFAULT_SHOW_FUNCTION_OUTPUT: BOOL_STR = "true"
|
72
|
-
|
72
|
+
# low/high/medium for openai, default/null for groq
|
73
|
+
DEFAULT_REASONING_EFFORT: Optional[Literal["low", "high", "medium", "default", "null"]] = None
|
73
74
|
DEFAULT_ENABLE_MCP: BOOL_STR = "false"
|
74
75
|
DEFAULT_SHOW_MCP_OUTPUT: BOOL_STR = "false"
|
75
76
|
|
yaicli/exceptions.py
CHANGED
yaicli/llms/client.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1
|
-
from typing import Generator, List,
|
1
|
+
from typing import Generator, List, Union
|
2
2
|
|
3
3
|
from ..config import cfg
|
4
4
|
from ..console import get_console
|
5
5
|
from ..schemas import ChatMessage, LLMResponse, RefreshLive, ToolCall
|
6
6
|
from ..tools import execute_tool_call
|
7
7
|
from ..tools.mcp import MCP_TOOL_NAME_PREFIX
|
8
|
-
from .provider import
|
8
|
+
from .provider import ProviderFactory
|
9
9
|
|
10
10
|
|
11
11
|
class LLMClient:
|
@@ -20,8 +20,7 @@ class LLMClient:
|
|
20
20
|
|
21
21
|
def __init__(
|
22
22
|
self,
|
23
|
-
|
24
|
-
provider_name: str = "",
|
23
|
+
provider_name: str,
|
25
24
|
config: dict = cfg,
|
26
25
|
verbose: bool = False,
|
27
26
|
**kwargs,
|
@@ -30,8 +29,7 @@ class LLMClient:
|
|
30
29
|
Initialize LLM client
|
31
30
|
|
32
31
|
Args:
|
33
|
-
|
34
|
-
provider_name: Name of the provider to use if provider not provided
|
32
|
+
provider_name: Name of the provider to use, default to openai if not known
|
35
33
|
config: Configuration dictionary
|
36
34
|
verbose: Whether to enable verbose logging
|
37
35
|
"""
|
@@ -42,13 +40,10 @@ class LLMClient:
|
|
42
40
|
self.enable_mcp = self.config["ENABLE_MCP"]
|
43
41
|
|
44
42
|
# Use provided provider or create one
|
45
|
-
if
|
46
|
-
self.
|
47
|
-
|
48
|
-
|
49
|
-
else:
|
50
|
-
provider_name = config.get("PROVIDER", "openai").lower()
|
51
|
-
self.provider = ProviderFactory.create_provider(provider_name, config=config, verbose=verbose, **kwargs)
|
43
|
+
if provider_name not in ProviderFactory.providers_map:
|
44
|
+
self.console.print(f"Provider {provider_name} not found, using openai as default", style="red")
|
45
|
+
provider_name = "openai"
|
46
|
+
self.provider = ProviderFactory.create_provider(provider_name, config=config, verbose=verbose, **kwargs)
|
52
47
|
|
53
48
|
self.max_recursion_depth = config.get("MAX_RECURSION_DEPTH", 5)
|
54
49
|
|
yaicli/llms/provider.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import importlib
|
2
2
|
from abc import ABC, abstractmethod
|
3
|
-
from typing import Generator, List
|
3
|
+
from typing import Any, Dict, Generator, List
|
4
4
|
|
5
5
|
from ..schemas import ChatMessage, LLMResponse
|
6
6
|
|
@@ -34,6 +34,35 @@ class Provider(ABC):
|
|
34
34
|
"""Return the role that should be used for tool responses"""
|
35
35
|
pass
|
36
36
|
|
37
|
+
def _convert_messages(self, messages: List[ChatMessage]) -> List[Dict[str, Any]]:
|
38
|
+
"""Convert a list of ChatMessage objects to a list of OpenAI message format.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
messages: List of ChatMessage
|
42
|
+
|
43
|
+
Returns:
|
44
|
+
List of OpenAI message format
|
45
|
+
"""
|
46
|
+
converted_messages = []
|
47
|
+
for msg in messages:
|
48
|
+
message: Dict[str, Any] = {"role": msg.role, "content": msg.content or ""}
|
49
|
+
|
50
|
+
if msg.name:
|
51
|
+
message["name"] = msg.name
|
52
|
+
|
53
|
+
if msg.role == "assistant" and msg.tool_calls:
|
54
|
+
message["tool_calls"] = [
|
55
|
+
{"id": tc.id, "type": "function", "function": {"name": tc.name, "arguments": tc.arguments}}
|
56
|
+
for tc in msg.tool_calls
|
57
|
+
]
|
58
|
+
|
59
|
+
if msg.role == "tool" and msg.tool_call_id:
|
60
|
+
message["tool_call_id"] = msg.tool_call_id
|
61
|
+
|
62
|
+
converted_messages.append(message)
|
63
|
+
|
64
|
+
return converted_messages
|
65
|
+
|
37
66
|
|
38
67
|
class ProviderFactory:
|
39
68
|
"""Factory to create LLM provider instances"""
|
@@ -52,6 +81,7 @@ class ProviderFactory:
|
|
52
81
|
"huggingface": (".providers.huggingface_provider", "HuggingFaceProvider"),
|
53
82
|
"infini-ai": (".providers.infiniai_provider", "InfiniAIProvider"),
|
54
83
|
"minimax": (".providers.minimax_provider", "MinimaxProvider"),
|
84
|
+
"mistral": (".providers.mistral_provider", "MistralProvider"),
|
55
85
|
"modelscope": (".providers.modelscope_provider", "ModelScopeProvider"),
|
56
86
|
"ollama": (".providers.ollama_provider", "OllamaProvider"),
|
57
87
|
"openai": (".providers.openai_provider", "OpenAIProvider"),
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import
|
1
|
+
from typing import Generator, Optional
|
2
2
|
|
3
3
|
from openai._streaming import Stream
|
4
4
|
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
@@ -11,19 +11,14 @@ class AI21Provider(OpenAIProvider):
|
|
11
11
|
"""AI21 provider implementation based on openai-compatible API"""
|
12
12
|
|
13
13
|
DEFAULT_BASE_URL = "https://api.ai21.com/studio/v1"
|
14
|
-
|
15
|
-
|
16
|
-
"""
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
"""
|
23
|
-
keys = super().get_completion_params_keys()
|
24
|
-
if "max_completion_tokens" in keys:
|
25
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
26
|
-
return keys
|
14
|
+
COMPLETION_PARAMS_KEYS = {
|
15
|
+
"model": "MODEL",
|
16
|
+
"temperature": "TEMPERATURE",
|
17
|
+
"top_p": "TOP_P",
|
18
|
+
"max_tokens": "MAX_TOKENS",
|
19
|
+
"timeout": "TIMEOUT",
|
20
|
+
"extra_body": "EXTRA_BODY",
|
21
|
+
}
|
27
22
|
|
28
23
|
def _handle_stream_response(self, response: Stream[ChatCompletionChunk]) -> Generator[LLMResponse, None, None]:
|
29
24
|
"""Handle streaming response from AI21 models
|
@@ -5,20 +5,11 @@ class ChutesProvider(OpenAIProvider):
|
|
5
5
|
"""Chutes provider implementation based on openai-compatible API"""
|
6
6
|
|
7
7
|
DEFAULT_BASE_URL = "https://llm.chutes.ai/v1"
|
8
|
-
|
9
|
-
|
10
|
-
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
dict: Modified parameter mapping dictionary
|
17
|
-
"""
|
18
|
-
keys = super().get_completion_params_keys()
|
19
|
-
# Replace max_completion_tokens with max_tokens in the API
|
20
|
-
if "max_completion_tokens" in keys:
|
21
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
22
|
-
# Remove unsupported parameters
|
23
|
-
keys.pop("reasoning_effort", None)
|
24
|
-
return keys
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Any, Dict
|
2
|
-
|
3
1
|
from .openai_provider import OpenAIProvider
|
4
2
|
|
5
3
|
|
@@ -7,9 +5,11 @@ class DeepSeekProvider(OpenAIProvider):
|
|
7
5
|
"""DeepSeek provider implementation based on openai-compatible API"""
|
8
6
|
|
9
7
|
DEFAULT_BASE_URL = "https://api.deepseek.com/v1"
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -7,19 +7,15 @@ class GroqProvider(OpenAIProvider):
|
|
7
7
|
"""Groq provider implementation based on openai-compatible API"""
|
8
8
|
|
9
9
|
DEFAULT_BASE_URL = "https://api.groq.com/openai/v1"
|
10
|
-
|
11
|
-
|
12
|
-
"""
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
keys = super().get_completion_params_keys()
|
20
|
-
if "max_completion_tokens" in keys:
|
21
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
22
|
-
return keys
|
10
|
+
COMPLETION_PARAMS_KEYS = {
|
11
|
+
"model": "MODEL",
|
12
|
+
"temperature": "TEMPERATURE",
|
13
|
+
"top_p": "TOP_P",
|
14
|
+
"max_tokens": "MAX_TOKENS",
|
15
|
+
"timeout": "TIMEOUT",
|
16
|
+
"extra_body": "EXTRA_BODY",
|
17
|
+
"reasoning_effort": "REASONING_EFFORT",
|
18
|
+
}
|
23
19
|
|
24
20
|
def get_completion_params(self) -> Dict[str, Any]:
|
25
21
|
"""
|
@@ -33,4 +29,16 @@ class GroqProvider(OpenAIProvider):
|
|
33
29
|
if self.config["EXTRA_BODY"] and "N" in self.config["EXTRA_BODY"] and self.config["EXTRA_BODY"]["N"] != 1:
|
34
30
|
self.console.print("Groq does not support N parameter, setting N to 1 as Groq default", style="yellow")
|
35
31
|
params["extra_body"]["N"] = 1
|
32
|
+
|
33
|
+
if params.get("reasoning_effort"):
|
34
|
+
if params["reasoning_effort"] not in ("null", "default"):
|
35
|
+
self.console.print(
|
36
|
+
"Groq only supports null or default for reasoning_effort, setting to default", style="yellow"
|
37
|
+
)
|
38
|
+
params["reasoning_effort"] = "default"
|
39
|
+
if "qwen3" not in params["model"]:
|
40
|
+
self.console.print("Groq only supports reasoning_effort for qwen3, setting to null", style="yellow")
|
41
|
+
params["reasoning_effort"] = None
|
42
|
+
if params.get("reasoning_effort") == "null":
|
43
|
+
params["reasoning_effort"] = None
|
36
44
|
return params
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Any, Dict
|
2
|
-
|
3
1
|
from ...config import cfg
|
4
2
|
from .openai_provider import OpenAIProvider
|
5
3
|
|
@@ -8,15 +6,17 @@ class InfiniAIProvider(OpenAIProvider):
|
|
8
6
|
"""InfiniAI provider implementation based on openai-compatible API"""
|
9
7
|
|
10
8
|
DEFAULT_BASE_URL = "https://cloud.infini-ai.com/maas/v1"
|
9
|
+
COMPLETION_PARAMS_KEYS = {
|
10
|
+
"model": "MODEL",
|
11
|
+
"temperature": "TEMPERATURE",
|
12
|
+
"top_p": "TOP_P",
|
13
|
+
"max_tokens": "MAX_TOKENS",
|
14
|
+
"timeout": "TIMEOUT",
|
15
|
+
"extra_body": "EXTRA_BODY",
|
16
|
+
}
|
11
17
|
|
12
18
|
def __init__(self, config: dict = cfg, **kwargs):
|
13
19
|
super().__init__(config, **kwargs)
|
14
20
|
if self.enable_function:
|
15
21
|
self.console.print("InfiniAI does not support functions, disabled", style="yellow")
|
16
22
|
self.enable_function = False
|
17
|
-
|
18
|
-
def get_completion_params(self) -> Dict[str, Any]:
|
19
|
-
params = super().get_completion_params()
|
20
|
-
if "max_completion_tokens" in params:
|
21
|
-
params["max_tokens"] = params.pop("max_completion_tokens")
|
22
|
-
return params
|
@@ -5,17 +5,11 @@ class MinimaxProvider(OpenAIProvider):
|
|
5
5
|
"""Minimax provider implementation based on openai-compatible API"""
|
6
6
|
|
7
7
|
DEFAULT_BASE_URL = "https://api.minimaxi.com/v1"
|
8
|
-
|
9
|
-
|
10
|
-
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
"""
|
17
|
-
keys = super().get_completion_params_keys()
|
18
|
-
# Replace max_completion_tokens with max_tokens in the API
|
19
|
-
if "max_completion_tokens" in keys:
|
20
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
21
|
-
return keys
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -0,0 +1,217 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Any, Dict, Generator, List, Optional, Union
|
3
|
+
|
4
|
+
from mistralai import Mistral
|
5
|
+
from mistralai.models import ChatCompletionResponse, CompletionEvent, ContentChunk
|
6
|
+
from mistralai.models import ToolCall as MistralToolCall
|
7
|
+
from mistralai.utils.eventstreaming import EventStream
|
8
|
+
from pydantic import PydanticDeprecatedSince20, PydanticDeprecatedSince211
|
9
|
+
|
10
|
+
from ...config import cfg
|
11
|
+
from ...console import get_console
|
12
|
+
from ...exceptions import MCPToolsError
|
13
|
+
from ...schemas import ChatMessage, LLMResponse, ToolCall
|
14
|
+
from ...tools import get_openai_mcp_tools, get_openai_schemas
|
15
|
+
from ...utils import gen_tool_call_id
|
16
|
+
from ..provider import Provider
|
17
|
+
|
18
|
+
|
19
|
+
class MistralProvider(Provider):
|
20
|
+
"""Mistral provider implementation based on mistralai library"""
|
21
|
+
|
22
|
+
CLIENT_CLS = Mistral
|
23
|
+
DEFAULT_BASE_URL = "https://api.mistral.ai/v1"
|
24
|
+
|
25
|
+
def __init__(self, config: dict = cfg, verbose: bool = False, **kwargs):
|
26
|
+
"""Initialize Mistral provider
|
27
|
+
|
28
|
+
Args:
|
29
|
+
config: Configuration dictionary
|
30
|
+
verbose: Whether to enable verbose logging
|
31
|
+
"""
|
32
|
+
self.config = config
|
33
|
+
self.verbose = verbose
|
34
|
+
self.enable_functions = config["ENABLE_FUNCTIONS"]
|
35
|
+
self.enable_mcp = config["ENABLE_MCP"]
|
36
|
+
self.client = Mistral(**self.get_client_params())
|
37
|
+
self.console = get_console()
|
38
|
+
|
39
|
+
# Disable pydantic deprecated warnings
|
40
|
+
import warnings
|
41
|
+
|
42
|
+
warnings.filterwarnings("ignore", category=PydanticDeprecatedSince20)
|
43
|
+
warnings.filterwarnings("ignore", category=PydanticDeprecatedSince211)
|
44
|
+
|
45
|
+
def get_client_params(self) -> Dict[str, Any]:
|
46
|
+
"""Get client parameters for Mistral
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
Dict[str, Any]
|
50
|
+
"""
|
51
|
+
client_params = {
|
52
|
+
"api_key": self.config["API_KEY"],
|
53
|
+
"timeout_ms": self.config["TIMEOUT"] * 1000, # Mistral uses milliseconds
|
54
|
+
}
|
55
|
+
if self.config["BASE_URL"]:
|
56
|
+
client_params["server_url"] = self.config["BASE_URL"]
|
57
|
+
if self.config.get("SERVER"):
|
58
|
+
client_params["server"] = self.config["SERVER"]
|
59
|
+
return client_params
|
60
|
+
|
61
|
+
def get_completion_params(self) -> Dict[str, Any]:
|
62
|
+
"""Get completion parameters for Mistral
|
63
|
+
|
64
|
+
Returns:
|
65
|
+
Dict[str, Any]
|
66
|
+
"""
|
67
|
+
params = {
|
68
|
+
"model": self.config["MODEL"],
|
69
|
+
"temperature": self.config["TEMPERATURE"],
|
70
|
+
"top_p": self.config["TOP_P"],
|
71
|
+
"max_tokens": self.config["MAX_TOKENS"],
|
72
|
+
"stream": self.config["STREAM"],
|
73
|
+
"http_headers": {
|
74
|
+
"X-Title": self.APP_NAME,
|
75
|
+
"HTTP_Referer": self.APP_REFERER,
|
76
|
+
},
|
77
|
+
}
|
78
|
+
if self.config["EXTRA_HEADERS"]:
|
79
|
+
params["http_headers"] = {**self.config["EXTRA_HEADERS"], **params["http_headers"]}
|
80
|
+
tools = []
|
81
|
+
if self.enable_functions:
|
82
|
+
tools.extend(get_openai_schemas())
|
83
|
+
if self.enable_mcp:
|
84
|
+
try:
|
85
|
+
tools.extend(get_openai_mcp_tools())
|
86
|
+
except MCPToolsError as e:
|
87
|
+
self.console.print(e, style="red")
|
88
|
+
if tools:
|
89
|
+
params["tools"] = tools
|
90
|
+
params["tool_choice"] = "auto"
|
91
|
+
params["parallel_tool_calls"] = False
|
92
|
+
return params
|
93
|
+
|
94
|
+
def completion(self, messages: List[ChatMessage], stream: bool = False) -> Generator[LLMResponse, None, None]:
|
95
|
+
"""Completion method for Mistral
|
96
|
+
|
97
|
+
Args:
|
98
|
+
messages: List of ChatMessage
|
99
|
+
stream: Whether to stream the response
|
100
|
+
"""
|
101
|
+
# Convert messages to Mistral format
|
102
|
+
mistral_messages = self._convert_messages(messages)
|
103
|
+
if self.verbose:
|
104
|
+
self.console.print("Messages:")
|
105
|
+
self.console.print(mistral_messages)
|
106
|
+
|
107
|
+
params = self.get_completion_params()
|
108
|
+
params["messages"] = mistral_messages
|
109
|
+
|
110
|
+
if stream:
|
111
|
+
response = self.client.chat.stream(**params)
|
112
|
+
yield from self._handle_stream_response(response)
|
113
|
+
else:
|
114
|
+
response = self.client.chat.complete(**params)
|
115
|
+
yield from self._handle_normal_response(response)
|
116
|
+
|
117
|
+
def _handle_normal_response(self, response: ChatCompletionResponse) -> Generator[LLMResponse, None, None]:
|
118
|
+
"""Handle normal (non-streaming) response"""
|
119
|
+
if not response.choices or not response.choices[0].message:
|
120
|
+
content = response.model_dump_json()
|
121
|
+
yield LLMResponse(content=content, finish_reason="stop")
|
122
|
+
return
|
123
|
+
|
124
|
+
choice = response.choices[0]
|
125
|
+
finish_reason = choice.finish_reason
|
126
|
+
content = self.get_content_from_delta_content(choice.message.content) if choice.message.content else ""
|
127
|
+
tool_call: Optional[ToolCall] = None
|
128
|
+
|
129
|
+
# Handle tool calls if present
|
130
|
+
if finish_reason == "tool_calls":
|
131
|
+
tool_call = self._process_tool_call_chunk(choice.message.tool_calls or [])
|
132
|
+
|
133
|
+
yield LLMResponse(content=content, finish_reason=finish_reason, tool_call=tool_call)
|
134
|
+
|
135
|
+
def _handle_stream_response(self, response: EventStream[CompletionEvent]) -> Generator[LLMResponse, None, None]:
|
136
|
+
"""Handle stream response"""
|
137
|
+
tool_call: Optional[ToolCall] = None
|
138
|
+
|
139
|
+
for chunk in response:
|
140
|
+
choice = chunk.data.choices[0]
|
141
|
+
finish_reason = choice.finish_reason
|
142
|
+
delta_content = choice.delta.content
|
143
|
+
delta = choice.delta
|
144
|
+
content = ""
|
145
|
+
|
146
|
+
if delta_content:
|
147
|
+
content = self.get_content_from_delta_content(delta_content)
|
148
|
+
|
149
|
+
# Process tool call information
|
150
|
+
if hasattr(delta, "tool_calls") and delta.tool_calls:
|
151
|
+
tool_call = self._process_tool_call_chunk(delta.tool_calls, tool_call)
|
152
|
+
|
153
|
+
yield LLMResponse(
|
154
|
+
content=content,
|
155
|
+
finish_reason=finish_reason,
|
156
|
+
tool_call=tool_call if finish_reason == "tool_calls" else None,
|
157
|
+
)
|
158
|
+
|
159
|
+
def _process_tool_call_chunk(
|
160
|
+
self, tool_calls: List[MistralToolCall], existing_tool_call: Optional[ToolCall] = None
|
161
|
+
) -> Optional[ToolCall]:
|
162
|
+
"""Process tool call data from a response chunk"""
|
163
|
+
# Initialize tool call object if this is the first chunk with tool call data
|
164
|
+
if existing_tool_call is None and tool_calls:
|
165
|
+
tool = tool_calls[0]
|
166
|
+
existing_tool_call = ToolCall(tool.id or gen_tool_call_id(), tool.function.name, "")
|
167
|
+
|
168
|
+
# Accumulate arguments from multiple chunks
|
169
|
+
if existing_tool_call:
|
170
|
+
for tool in tool_calls:
|
171
|
+
if not tool.function:
|
172
|
+
continue
|
173
|
+
# Ensure arguments is a string
|
174
|
+
tool_args = tool.function.arguments
|
175
|
+
if not isinstance(tool_args, str):
|
176
|
+
tool_args = json.dumps(tool_args)
|
177
|
+
existing_tool_call.arguments += tool_args
|
178
|
+
|
179
|
+
return existing_tool_call
|
180
|
+
|
181
|
+
def get_content_from_delta_content(self, delta_content: Union[str, List[ContentChunk]]) -> str:
|
182
|
+
"""Get content from a delta content
|
183
|
+
|
184
|
+
If the delta content is a string, it will be returned as is.
|
185
|
+
If the delta content is a list of ContentChunk, it will be converted to a string.
|
186
|
+
Args:
|
187
|
+
delta_content: Union[str, List[ContentChunk]]
|
188
|
+
Returns:
|
189
|
+
str
|
190
|
+
"""
|
191
|
+
if isinstance(delta_content, str):
|
192
|
+
return delta_content
|
193
|
+
return self.extract_contents_list(delta_content)
|
194
|
+
|
195
|
+
def extract_contents_list(self, delta_content: List[ContentChunk]) -> str:
|
196
|
+
"""Extract content from a list of ContentChunk
|
197
|
+
|
198
|
+
If the content is a list of ContentChunk, it will be converted to a string.
|
199
|
+
Args:
|
200
|
+
delta_content: List[ContentChunk]
|
201
|
+
Returns:
|
202
|
+
str
|
203
|
+
"""
|
204
|
+
content = ""
|
205
|
+
for i in delta_content:
|
206
|
+
if i.type == "text":
|
207
|
+
content += i.text
|
208
|
+
elif i.type == "image_url":
|
209
|
+
content += i.image_url if isinstance(i.image_url, str) else i.image_url.url
|
210
|
+
elif i.type == "document_url":
|
211
|
+
content += f"[{i.document_name}]({i.document_url})"
|
212
|
+
elif i.type == "reference":
|
213
|
+
content += "Reference IDs: " + json.dumps(i.reference_ids)
|
214
|
+
return content
|
215
|
+
|
216
|
+
def detect_tool_role(self) -> str:
|
217
|
+
return "tool"
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Any, Dict
|
2
|
-
|
3
1
|
from .openai_provider import OpenAIProvider
|
4
2
|
|
5
3
|
|
@@ -7,9 +5,11 @@ class ModelScopeProvider(OpenAIProvider):
|
|
7
5
|
"""ModelScope provider implementation based on openai-compatible API"""
|
8
6
|
|
9
7
|
DEFAULT_BASE_URL = "https://api-inference.modelscope.cn/v1/"
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -3,6 +3,7 @@ import time
|
|
3
3
|
from typing import Any, Dict, Generator, List
|
4
4
|
|
5
5
|
import ollama
|
6
|
+
from ollama import ChatResponse
|
6
7
|
|
7
8
|
from ...config import cfg
|
8
9
|
from ...console import get_console
|
@@ -50,7 +51,7 @@ class OllamaProvider(Provider):
|
|
50
51
|
"""Convert a list of ChatMessage objects to a list of Ollama message dicts."""
|
51
52
|
converted_messages = []
|
52
53
|
for msg in messages:
|
53
|
-
message = {"role": msg.role, "content": msg.content or ""}
|
54
|
+
message: dict[str, Any] = {"role": msg.role, "content": msg.content or ""}
|
54
55
|
|
55
56
|
if msg.name:
|
56
57
|
message["name"] = msg.name
|
@@ -118,7 +119,7 @@ class OllamaProvider(Provider):
|
|
118
119
|
self.console.print(f"Ollama API error: {e}", style="red")
|
119
120
|
yield LLMResponse(content=f"Error calling Ollama API: {str(e)}")
|
120
121
|
|
121
|
-
def _handle_normal_response(self, response:
|
122
|
+
def _handle_normal_response(self, response: ChatResponse) -> Generator[LLMResponse, None, None]:
|
122
123
|
"""Handle normal (non-streaming) response"""
|
123
124
|
content = response.message.content or ""
|
124
125
|
reasoning = response.message.thinking or ""
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import json
|
2
|
+
from copy import deepcopy
|
2
3
|
from typing import Any, Dict, Generator, List, Optional
|
3
4
|
|
4
5
|
import openai
|
@@ -8,9 +9,9 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
|
8
9
|
|
9
10
|
from ...config import cfg
|
10
11
|
from ...console import get_console
|
12
|
+
from ...exceptions import MCPToolsError
|
11
13
|
from ...schemas import ChatMessage, LLMResponse, ToolCall
|
12
|
-
from ...tools import get_openai_schemas
|
13
|
-
from ...tools.mcp import get_mcp_manager
|
14
|
+
from ...tools import get_openai_mcp_tools, get_openai_schemas
|
14
15
|
from ..provider import Provider
|
15
16
|
|
16
17
|
|
@@ -44,7 +45,13 @@ class OpenAIProvider(Provider):
|
|
44
45
|
self.console = get_console()
|
45
46
|
|
46
47
|
# Store completion params
|
47
|
-
self.
|
48
|
+
self._completion_params = None
|
49
|
+
|
50
|
+
@property
|
51
|
+
def completion_params(self) -> Dict[str, Any]:
|
52
|
+
if self._completion_params is None:
|
53
|
+
self._completion_params = self.get_completion_params()
|
54
|
+
return deepcopy(self._completion_params)
|
48
55
|
|
49
56
|
def get_client_params(self) -> Dict[str, Any]:
|
50
57
|
"""Get the client parameters"""
|
@@ -84,28 +91,6 @@ class OpenAIProvider(Provider):
|
|
84
91
|
completion_params[api_key] = self.config[config_key]
|
85
92
|
return completion_params
|
86
93
|
|
87
|
-
def _convert_messages(self, messages: List[ChatMessage]) -> List[Dict[str, Any]]:
|
88
|
-
"""Convert a list of ChatMessage objects to a list of OpenAI message dicts."""
|
89
|
-
converted_messages = []
|
90
|
-
for msg in messages:
|
91
|
-
message: Dict[str, Any] = {"role": msg.role, "content": msg.content or ""}
|
92
|
-
|
93
|
-
if msg.name:
|
94
|
-
message["name"] = msg.name
|
95
|
-
|
96
|
-
if msg.role == "assistant" and msg.tool_calls:
|
97
|
-
message["tool_calls"] = [
|
98
|
-
{"id": tc.id, "type": "function", "function": {"name": tc.name, "arguments": tc.arguments}}
|
99
|
-
for tc in msg.tool_calls
|
100
|
-
]
|
101
|
-
|
102
|
-
if msg.role == "tool" and msg.tool_call_id:
|
103
|
-
message["tool_call_id"] = msg.tool_call_id
|
104
|
-
|
105
|
-
converted_messages.append(message)
|
106
|
-
|
107
|
-
return converted_messages
|
108
|
-
|
109
94
|
def completion(
|
110
95
|
self,
|
111
96
|
messages: List[ChatMessage],
|
@@ -141,8 +126,8 @@ class OpenAIProvider(Provider):
|
|
141
126
|
# Add MCP tools if enabled
|
142
127
|
if self.enable_mcp:
|
143
128
|
try:
|
144
|
-
mcp_tools =
|
145
|
-
except (ValueError, FileNotFoundError) as e:
|
129
|
+
mcp_tools = get_openai_mcp_tools()
|
130
|
+
except (ValueError, FileNotFoundError, MCPToolsError) as e:
|
146
131
|
self.console.print(f"Failed to load MCP tools: {e}", style="red")
|
147
132
|
mcp_tools = []
|
148
133
|
tools.extend(mcp_tools)
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Dict
|
2
|
-
|
3
1
|
from .openai_provider import OpenAIProvider
|
4
2
|
|
5
3
|
|
@@ -7,16 +5,11 @@ class OpenRouterProvider(OpenAIProvider):
|
|
7
5
|
"""OpenRouter provider implementation based on openai-compatible API"""
|
8
6
|
|
9
7
|
DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
|
10
|
-
|
11
|
-
|
12
|
-
"""
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
"""
|
19
|
-
keys = super().get_completion_params_keys()
|
20
|
-
if "max_completion_tokens" in keys:
|
21
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
22
|
-
return keys
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -16,23 +16,14 @@ class SambanovaProvider(OpenAIProvider):
|
|
16
16
|
"DeepSeek-V3-0324",
|
17
17
|
)
|
18
18
|
|
19
|
-
|
20
|
-
"""
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
"""
|
28
|
-
keys = super().get_completion_params_keys()
|
29
|
-
# Replace max_completion_tokens with max_tokens
|
30
|
-
if "max_completion_tokens" in keys:
|
31
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
32
|
-
# Remove unsupported parameters
|
33
|
-
keys.pop("presence_penalty", None)
|
34
|
-
keys.pop("frequency_penalty", None)
|
35
|
-
return keys
|
19
|
+
COMPLETION_PARAMS_KEYS = {
|
20
|
+
"model": "MODEL",
|
21
|
+
"temperature": "TEMPERATURE",
|
22
|
+
"top_p": "TOP_P",
|
23
|
+
"max_tokens": "MAX_TOKENS",
|
24
|
+
"timeout": "TIMEOUT",
|
25
|
+
"extra_body": "EXTRA_BODY",
|
26
|
+
}
|
36
27
|
|
37
28
|
def get_completion_params(self) -> Dict[str, Any]:
|
38
29
|
"""
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Any, Dict
|
2
|
-
|
3
1
|
from .openai_provider import OpenAIProvider
|
4
2
|
|
5
3
|
|
@@ -7,9 +5,11 @@ class SiliconFlowProvider(OpenAIProvider):
|
|
7
5
|
"""SiliconFlow provider implementation based on openai-compatible API"""
|
8
6
|
|
9
7
|
DEFAULT_BASE_URL = "https://api.siliconflow.cn/v1"
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Dict
|
2
|
-
|
3
1
|
from .openai_provider import OpenAIProvider
|
4
2
|
|
5
3
|
|
@@ -7,16 +5,11 @@ class TargonProvider(OpenAIProvider):
|
|
7
5
|
"""Targon provider implementation based on openai-compatible API"""
|
8
6
|
|
9
7
|
DEFAULT_BASE_URL = "https://api.targon.com/v1"
|
10
|
-
|
11
|
-
|
12
|
-
"""
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
"""
|
19
|
-
keys = super().get_completion_params_keys()
|
20
|
-
if "max_completion_tokens" in keys:
|
21
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
22
|
-
return keys
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
@@ -1,5 +1,3 @@
|
|
1
|
-
from typing import Dict
|
2
|
-
|
3
1
|
from .openai_provider import OpenAIProvider
|
4
2
|
|
5
3
|
|
@@ -7,16 +5,11 @@ class YiProvider(OpenAIProvider):
|
|
7
5
|
"""Lingyiwanwu provider implementation based on openai-compatible API"""
|
8
6
|
|
9
7
|
DEFAULT_BASE_URL = "https://api.lingyiwanwu.com/v1"
|
10
|
-
|
11
|
-
|
12
|
-
"""
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
"""
|
19
|
-
keys = super().get_completion_params_keys()
|
20
|
-
if "max_completion_tokens" in keys:
|
21
|
-
keys["max_tokens"] = keys.pop("max_completion_tokens")
|
22
|
-
return keys
|
8
|
+
COMPLETION_PARAMS_KEYS = {
|
9
|
+
"model": "MODEL",
|
10
|
+
"temperature": "TEMPERATURE",
|
11
|
+
"top_p": "TOP_P",
|
12
|
+
"max_tokens": "MAX_TOKENS",
|
13
|
+
"timeout": "TIMEOUT",
|
14
|
+
"extra_body": "EXTRA_BODY",
|
15
|
+
}
|
yaicli/tools/__init__.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1
1
|
from typing import Any, Dict, List, Tuple, cast
|
2
2
|
|
3
3
|
from json_repair import repair_json
|
4
|
-
from mcp import types
|
5
4
|
from rich.panel import Panel
|
6
5
|
|
7
6
|
from ..config import cfg
|
8
7
|
from ..console import get_console
|
8
|
+
from ..exceptions import MCPToolsError
|
9
9
|
from ..schemas import ToolCall
|
10
10
|
from .function import get_function, list_functions
|
11
11
|
from .mcp import MCP_TOOL_NAME_PREFIX, get_mcp, get_mcp_manager, parse_mcp_tool_name
|
@@ -38,31 +38,15 @@ def get_openai_mcp_tools() -> list[dict[str, Any]]:
|
|
38
38
|
|
39
39
|
Returns:
|
40
40
|
List of function schemas in OpenAI format
|
41
|
+
Raises:
|
42
|
+
MCPToolsError: If error getting MCP tools
|
43
|
+
ValueError: If error getting MCP tools
|
44
|
+
FileNotFoundError: If MCP config file not found
|
41
45
|
"""
|
42
|
-
return get_mcp_manager().to_openai_tools()
|
43
|
-
|
44
|
-
|
45
|
-
def execute_mcp_tool(tool_name: str, tool_kwargs: dict) -> str:
|
46
|
-
"""Execute an MCP tool
|
47
|
-
|
48
|
-
Args:
|
49
|
-
tool_name: The name of the tool to execute
|
50
|
-
tool_kwargs: The arguments to pass to the tool
|
51
|
-
"""
|
52
|
-
manager = get_mcp_manager()
|
53
|
-
tool = manager.get_tool(tool_name)
|
54
46
|
try:
|
55
|
-
|
56
|
-
if isinstance(result, list) and len(result) > 0:
|
57
|
-
result = result[0]
|
58
|
-
if isinstance(result, types.TextContent):
|
59
|
-
return result.text
|
60
|
-
else:
|
61
|
-
return str(result)
|
47
|
+
return get_mcp_manager().to_openai_tools()
|
62
48
|
except Exception as e:
|
63
|
-
|
64
|
-
console.print(error_msg, style="red")
|
65
|
-
return error_msg
|
49
|
+
raise MCPToolsError(f"Error getting MCP tools: {e}") from e
|
66
50
|
|
67
51
|
|
68
52
|
def execute_tool_call(tool_call: ToolCall) -> Tuple[str, bool]:
|
yaicli/tools/mcp.py
CHANGED
@@ -200,6 +200,10 @@ class MCPClient:
|
|
200
200
|
This property will be lazy loaded.
|
201
201
|
Returns:
|
202
202
|
List[Tool]: Tool object list from fastmcp.types.Tool
|
203
|
+
Raises:
|
204
|
+
ValueError: If error getting MCP tools
|
205
|
+
FileNotFoundError: If MCP config file not found
|
206
|
+
RuntimeError: If called while the client is not connected.
|
203
207
|
"""
|
204
208
|
if self._tools is None:
|
205
209
|
self._tools = self.list_tools()
|
@@ -213,6 +217,10 @@ class MCPClient:
|
|
213
217
|
This property will be lazy loaded.
|
214
218
|
Returns:
|
215
219
|
Dict[str, MCP]: MCP tool object mapping
|
220
|
+
Raises:
|
221
|
+
ValueError: If error getting MCP tools
|
222
|
+
FileNotFoundError: If MCP config file not found
|
223
|
+
RuntimeError: If called while the client is not connected.
|
216
224
|
"""
|
217
225
|
if self._tools_map is None:
|
218
226
|
self._tools_map = {}
|
yaicli/utils.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
import asyncio
|
2
2
|
import platform
|
3
|
+
import uuid
|
3
4
|
from functools import wraps
|
4
5
|
from os import getenv
|
5
6
|
from os.path import basename, pathsep
|
@@ -166,3 +167,8 @@ def wrap_function(func: Callable) -> Callable:
|
|
166
167
|
return func(*args, **kwargs)
|
167
168
|
|
168
169
|
return wrapper
|
170
|
+
|
171
|
+
|
172
|
+
def gen_tool_call_id() -> str:
|
173
|
+
"""Generate a unique tool call id"""
|
174
|
+
return f"yaicli_{uuid.uuid4()}"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.7.
|
3
|
+
Version: 0.7.2
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -215,6 +215,7 @@ Classifier: Programming Language :: Python :: 3
|
|
215
215
|
Requires-Python: >=3.10
|
216
216
|
Requires-Dist: click>=8.1.8
|
217
217
|
Requires-Dist: distro>=1.9.0
|
218
|
+
Requires-Dist: fastmcp>=2.9.2
|
218
219
|
Requires-Dist: httpx>=0.28.1
|
219
220
|
Requires-Dist: instructor>=1.7.9
|
220
221
|
Requires-Dist: json-repair>=0.44.1
|
@@ -227,6 +228,7 @@ Provides-Extra: all
|
|
227
228
|
Requires-Dist: cohere>=5.15.0; extra == 'all'
|
228
229
|
Requires-Dist: google-genai>=1.20.0; extra == 'all'
|
229
230
|
Requires-Dist: huggingface-hub>=0.33.0; extra == 'all'
|
231
|
+
Requires-Dist: mistralai>=1.8.2; extra == 'all'
|
230
232
|
Requires-Dist: ollama>=0.5.1; extra == 'all'
|
231
233
|
Requires-Dist: volcengine-python-sdk>=3.0.15; extra == 'all'
|
232
234
|
Provides-Extra: cohere
|
@@ -237,6 +239,8 @@ Provides-Extra: gemini
|
|
237
239
|
Requires-Dist: google-genai>=1.20.0; extra == 'gemini'
|
238
240
|
Provides-Extra: huggingface
|
239
241
|
Requires-Dist: huggingface-hub>=0.33.0; extra == 'huggingface'
|
242
|
+
Provides-Extra: mistral
|
243
|
+
Requires-Dist: mistralai>=1.8.2; extra == 'mistral'
|
240
244
|
Provides-Extra: ollama
|
241
245
|
Requires-Dist: ollama>=0.5.1; extra == 'ollama'
|
242
246
|
Description-Content-Type: text/markdown
|
@@ -366,6 +370,7 @@ pip install .
|
|
366
370
|
- Groq
|
367
371
|
- Huggingface
|
368
372
|
- Minimax
|
373
|
+
- Mistral
|
369
374
|
- ModelScope
|
370
375
|
- Ollama
|
371
376
|
- Openai
|
@@ -546,6 +551,14 @@ API_KEY=
|
|
546
551
|
MODEL=deepseek/deepseek-chat-v3-0324
|
547
552
|
```
|
548
553
|
|
554
|
+
#### Mistral
|
555
|
+
|
556
|
+
```ini
|
557
|
+
PROVIDER=mistral
|
558
|
+
API_KEY=
|
559
|
+
MODEL=codestral-latest
|
560
|
+
```
|
561
|
+
|
549
562
|
#### Gemini
|
550
563
|
|
551
564
|
Basic config:
|
@@ -0,0 +1,50 @@
|
|
1
|
+
pyproject.toml,sha256=_AwmHPI84yvRZObatNDKEp_Ee7jE6L4cKyuB7XHO5Us,2862
|
2
|
+
yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
yaicli/chat.py,sha256=_emvZEdgMBth2nQGaNWPf0P45oW2k3bpuIwqsxFcM5A,13676
|
4
|
+
yaicli/cli.py,sha256=s8Bj4MSQmLblh4fHBPKS-DvJoAdMmp64KC7e7BwzmOs,23322
|
5
|
+
yaicli/config.py,sha256=_xLHgyW5dDg76bL1SyTcrQCnVs4dFpXafIS3sClshK0,6563
|
6
|
+
yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
|
7
|
+
yaicli/const.py,sha256=bQCdvbg4DUvyexRdZSbwu4FBpeqDXPJNZkv_LIZYP0o,8932
|
8
|
+
yaicli/entry.py,sha256=jz4sVVy2Nr7uFMqpBhodAjtV5zflxpNHhAROKFRAH58,9633
|
9
|
+
yaicli/exceptions.py,sha256=RQjL7jVBr8v2L05uYVGWqxNNXxAJ6Q0B83mpwg3fJ58,442
|
10
|
+
yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
|
11
|
+
yaicli/printer.py,sha256=g1TS7aDSQlWlSrQRhvNhNqoQKlsaf1lVOyUSK6LQLNQ,7945
|
12
|
+
yaicli/render.py,sha256=k8o2P8fI44PJlyQbs7gmMiu2x2prwajdWn5JIt15BIA,505
|
13
|
+
yaicli/role.py,sha256=PfwiVJIlzg7EzlvMM-kIy6vBK0d5d_J4M1I_fIZGnWk,7399
|
14
|
+
yaicli/schemas.py,sha256=Ty2ybCvld-ritgBZoI3RR93vYfw9LUNqkR8xk8VRZ2A,762
|
15
|
+
yaicli/utils.py,sha256=1gveb8OFuzi5iQxmpb6mB64IJcnUVY6C0XlXa00sXPk,5674
|
16
|
+
yaicli/functions/__init__.py,sha256=o2xwGvLKrvN8Wghhf_bU1klQfFRHW3g-f2BR968g31g,1652
|
17
|
+
yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
|
18
|
+
yaicli/llms/__init__.py,sha256=x78cJujrJkelXPnzHS6pzHkITZdgLYZqJMnrMHbptoc,134
|
19
|
+
yaicli/llms/client.py,sha256=yY8MoqSt5ohLTLROVWYdPI003apWYFyc8RHzCgIg2ms,5549
|
20
|
+
yaicli/llms/provider.py,sha256=aUKJhbJN4uSkjE3mH-jwRXLf_DHTpJjB98Tqr_k3kp8,4488
|
21
|
+
yaicli/llms/providers/ai21_provider.py,sha256=1FuGDqMYPEhbG-LZT6pFlE9Poi58H3Cztg3aLOCPDVo,4292
|
22
|
+
yaicli/llms/providers/chatglm_provider.py,sha256=zno_AbFESsRG3E5-IVnqMc1uA-jTuE1u4u5gU0BkAkQ,7421
|
23
|
+
yaicli/llms/providers/chutes_provider.py,sha256=bskwCEPNEQbIYbGm7Ck73tTDdaS4WBty3L439VWRDVo,436
|
24
|
+
yaicli/llms/providers/cohere_provider.py,sha256=1UPzNqNOwM4_dsP4kvUaL9O6_bKjxm1lO6A0lM7hgS4,10959
|
25
|
+
yaicli/llms/providers/deepseek_provider.py,sha256=UWapiKMu-GLHxf0v6yJYnFOOw8e2yZ54JzzBxzO-Amc,443
|
26
|
+
yaicli/llms/providers/doubao_provider.py,sha256=Tr0EP2fDh9txI-8dp_7BVAMswMkbw0cjBtZd6gI12v8,1453
|
27
|
+
yaicli/llms/providers/gemini_provider.py,sha256=78mOQEcf4uWzNduf8-tH0uMyka7xFbHNRnQQvHu_csU,8113
|
28
|
+
yaicli/llms/providers/groq_provider.py,sha256=ksyibvfWp6i6YbMMCCFMmbp_9Y__0s1sbg06F8oWcb4,1803
|
29
|
+
yaicli/llms/providers/huggingface_provider.py,sha256=XigSh4HDx00aYtBivMc2rwRwW6y6Nf0XgwEiFvcil2E,1239
|
30
|
+
yaicli/llms/providers/infiniai_provider.py,sha256=iFTrsx00WawmoHHrAMQ_KlmjU42bgbkOUYAyRLgZ8sA,741
|
31
|
+
yaicli/llms/providers/minimax_provider.py,sha256=xtXV0zPUWOPMdme6lkpG__rUkgegOGifDt9x9mBzcG4,441
|
32
|
+
yaicli/llms/providers/mistral_provider.py,sha256=Fzo8E_XG0ku8EK1m8meZEbEFoJxxQlZJDKON8GMb8pE,8392
|
33
|
+
yaicli/llms/providers/modelscope_provider.py,sha256=NDR1HGqtNco0C1kRVYBGwyFeQH7RevRTHLhKj1zO6LM,459
|
34
|
+
yaicli/llms/providers/ollama_provider.py,sha256=Qei9dtgKCa1BjjufxLzHFCdZ14idXEdcvCe1m9MfKzk,7013
|
35
|
+
yaicli/llms/providers/openai_provider.py,sha256=rrWedsfHKgXiC4VJ9zWG2Q7ArTL98vSbbzgUhDklh3o,9931
|
36
|
+
yaicli/llms/providers/openrouter_provider.py,sha256=kiliabnpRuSpQ1ry7845BcZB_vMeGb-bvghwvgiKkdI,448
|
37
|
+
yaicli/llms/providers/sambanova_provider.py,sha256=n7CJ5SPPPpkKLY7PBZz_ynRMEyweKTXCR7bhkONs_c8,1756
|
38
|
+
yaicli/llms/providers/siliconflow_provider.py,sha256=GzWXeY_DH8Q5OGp3FXn3TesYAPi0BghRGlBbIkE9yDE,451
|
39
|
+
yaicli/llms/providers/targon_provider.py,sha256=zIM-B9ICeykF48U_3HlPgl28XA79lh4dC7Mv76ZrJ5E,437
|
40
|
+
yaicli/llms/providers/vertexai_provider.py,sha256=_ddrse1LfXRChTgkvxUlexyfJlfr0sVJH-Rmno3djSI,636
|
41
|
+
yaicli/llms/providers/xai_provider.py,sha256=Q6iOvJZOXIAwRiiHMKEBgq8-W6SGVZ9QD1_532bNYfo,199
|
42
|
+
yaicli/llms/providers/yi_provider.py,sha256=_B0Qxdpwn5rC2CXggv-LT6tCyb_AkmlT4HedO0UHdLo,443
|
43
|
+
yaicli/tools/__init__.py,sha256=8H9s4L-aVoUxgJjHDvDLNN7AHINu82D5vc7lXYDhKOU,3637
|
44
|
+
yaicli/tools/function.py,sha256=1yXnpOg7Y2sw_LwTOBH7042cHeBoBCVJJMQTVyQh_Hw,2802
|
45
|
+
yaicli/tools/mcp.py,sha256=zKRzWd3SGPRazZg3GnSc-dfOMOMnPS7kSnw5pujk9io,15400
|
46
|
+
yaicli-0.7.2.dist-info/METADATA,sha256=CpphNmQ5G6tasiM663jVlTL5X9F1wj-OshlcBnD91uM,64568
|
47
|
+
yaicli-0.7.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
48
|
+
yaicli-0.7.2.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
|
49
|
+
yaicli-0.7.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
50
|
+
yaicli-0.7.2.dist-info/RECORD,,
|
yaicli-0.7.0.dist-info/RECORD
DELETED
@@ -1,49 +0,0 @@
|
|
1
|
-
pyproject.toml,sha256=IGnWg-I9UrjiWbtuBK0GwEnTrrsvy6ZCN3GyfAlsGSU,2756
|
2
|
-
yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
yaicli/chat.py,sha256=_emvZEdgMBth2nQGaNWPf0P45oW2k3bpuIwqsxFcM5A,13676
|
4
|
-
yaicli/cli.py,sha256=s8Bj4MSQmLblh4fHBPKS-DvJoAdMmp64KC7e7BwzmOs,23322
|
5
|
-
yaicli/config.py,sha256=_xLHgyW5dDg76bL1SyTcrQCnVs4dFpXafIS3sClshK0,6563
|
6
|
-
yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
|
7
|
-
yaicli/const.py,sha256=O4hNk3ztjJ1o26lMjM1GK5Jpy-oA9NF69Gy528_ccEM,8861
|
8
|
-
yaicli/entry.py,sha256=jz4sVVy2Nr7uFMqpBhodAjtV5zflxpNHhAROKFRAH58,9633
|
9
|
-
yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
|
10
|
-
yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
|
11
|
-
yaicli/printer.py,sha256=g1TS7aDSQlWlSrQRhvNhNqoQKlsaf1lVOyUSK6LQLNQ,7945
|
12
|
-
yaicli/render.py,sha256=k8o2P8fI44PJlyQbs7gmMiu2x2prwajdWn5JIt15BIA,505
|
13
|
-
yaicli/role.py,sha256=PfwiVJIlzg7EzlvMM-kIy6vBK0d5d_J4M1I_fIZGnWk,7399
|
14
|
-
yaicli/schemas.py,sha256=Ty2ybCvld-ritgBZoI3RR93vYfw9LUNqkR8xk8VRZ2A,762
|
15
|
-
yaicli/utils.py,sha256=vCc_HoEKakA8HJ2m7_dIiIvMAIKEFlDpv1w1Yial-EE,5552
|
16
|
-
yaicli/functions/__init__.py,sha256=o2xwGvLKrvN8Wghhf_bU1klQfFRHW3g-f2BR968g31g,1652
|
17
|
-
yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
|
18
|
-
yaicli/llms/__init__.py,sha256=x78cJujrJkelXPnzHS6pzHkITZdgLYZqJMnrMHbptoc,134
|
19
|
-
yaicli/llms/client.py,sha256=yjv8-DjVORdn1PKOC6t6Ei0uB3dz5GQ2bC3zAdKGALg,5759
|
20
|
-
yaicli/llms/provider.py,sha256=jF15kmY_tZVOjlw0fbHQkEvlmOX57-HBhILzG0KvXyo,3412
|
21
|
-
yaicli/llms/providers/ai21_provider.py,sha256=1zHG1xYYL4zEA_hNa0YqmplC00evCKuZPU7r2v54Q8o,4551
|
22
|
-
yaicli/llms/providers/chatglm_provider.py,sha256=zno_AbFESsRG3E5-IVnqMc1uA-jTuE1u4u5gU0BkAkQ,7421
|
23
|
-
yaicli/llms/providers/chutes_provider.py,sha256=mtvWvRRfHPH3JFfzym87wXtPNiMpLnur3805N9acx7E,882
|
24
|
-
yaicli/llms/providers/cohere_provider.py,sha256=1UPzNqNOwM4_dsP4kvUaL9O6_bKjxm1lO6A0lM7hgS4,10959
|
25
|
-
yaicli/llms/providers/deepseek_provider.py,sha256=iIV97x2ZCcwhGkshc8wpRi-YAnAnmo0n-YRegPlaOwQ,488
|
26
|
-
yaicli/llms/providers/doubao_provider.py,sha256=Tr0EP2fDh9txI-8dp_7BVAMswMkbw0cjBtZd6gI12v8,1453
|
27
|
-
yaicli/llms/providers/gemini_provider.py,sha256=78mOQEcf4uWzNduf8-tH0uMyka7xFbHNRnQQvHu_csU,8113
|
28
|
-
yaicli/llms/providers/groq_provider.py,sha256=EiS1Yxw5jbAUBFCRYsJ57KYgZPk6oH-_gD72OfW8Oik,1358
|
29
|
-
yaicli/llms/providers/huggingface_provider.py,sha256=XigSh4HDx00aYtBivMc2rwRwW6y6Nf0XgwEiFvcil2E,1239
|
30
|
-
yaicli/llms/providers/infiniai_provider.py,sha256=8-nU6QE58PRoZL9b_HzbPp4yi6OGm7rXtfi9z7bJMOg,786
|
31
|
-
yaicli/llms/providers/minimax_provider.py,sha256=W-j3dzrYMEv14bYt2pCPvPUxvxsUs-iMAcGB9yXakFs,744
|
32
|
-
yaicli/llms/providers/modelscope_provider.py,sha256=qWM0T7r0Zf8k3pLzjj7_IFdnmnx7S3rJO0f9rRm8-_A,504
|
33
|
-
yaicli/llms/providers/ollama_provider.py,sha256=pjpYjfnHWnExweZi1KGbT07JGkcxzKPhqICo8dD82D0,6967
|
34
|
-
yaicli/llms/providers/openai_provider.py,sha256=96vqKiVScULb9rfCuzrI7e9wCgXiqjc2Lx9XP8zCf9Q,10546
|
35
|
-
yaicli/llms/providers/openrouter_provider.py,sha256=R-7FrUrCAKPZ3gbnuo0M6rPlVw1mvSBjbLGs_FtZWM0,732
|
36
|
-
yaicli/llms/providers/sambanova_provider.py,sha256=FFLrsvARt1UPAFWWgiuB6zvGzGKdtehKL58HdE1fo_M,2254
|
37
|
-
yaicli/llms/providers/siliconflow_provider.py,sha256=CW2VSt6evUyFy21vN84Nvmw1P0JpmHBLznsgiXMnHM0,496
|
38
|
-
yaicli/llms/providers/targon_provider.py,sha256=RQ808eS9lvsyvlzyKaQYcN0NimbpoNWgjHUzY1gLNs4,717
|
39
|
-
yaicli/llms/providers/vertexai_provider.py,sha256=_ddrse1LfXRChTgkvxUlexyfJlfr0sVJH-Rmno3djSI,636
|
40
|
-
yaicli/llms/providers/xai_provider.py,sha256=Q6iOvJZOXIAwRiiHMKEBgq8-W6SGVZ9QD1_532bNYfo,199
|
41
|
-
yaicli/llms/providers/yi_provider.py,sha256=EnTm9qTxHPnzERsKqgGnzRIVhXFcAEdYqtOra65pGmY,719
|
42
|
-
yaicli/tools/__init__.py,sha256=62kSqvh232jog_pb85Tsx4Pe_rI9CAODGMTxAXdKzy0,4112
|
43
|
-
yaicli/tools/function.py,sha256=1yXnpOg7Y2sw_LwTOBH7042cHeBoBCVJJMQTVyQh_Hw,2802
|
44
|
-
yaicli/tools/mcp.py,sha256=-V17cDsQvmdb2eeVnuC_ypg5Rn6f10dUjEptNK1kTxU,15004
|
45
|
-
yaicli-0.7.0.dist-info/METADATA,sha256=KnO0NulMNsdWI2Zru1iKvpspzg-8uBZqv5AeXrX1SeU,64329
|
46
|
-
yaicli-0.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
47
|
-
yaicli-0.7.0.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
|
48
|
-
yaicli-0.7.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
49
|
-
yaicli-0.7.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|