yaicli 0.6.2__py3-none-any.whl → 0.6.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +10 -3
- yaicli/cli.py +1 -8
- yaicli/const.py +1 -1
- yaicli/llms/__init__.py +0 -9
- yaicli/llms/provider.py +2 -0
- yaicli/llms/providers/gemini_provider.py +191 -0
- yaicli/llms/providers/openai_provider.py +14 -7
- yaicli/llms/providers/vertexai_provider.py +18 -0
- {yaicli-0.6.2.dist-info → yaicli-0.6.3.dist-info}/METADATA +5 -2
- {yaicli-0.6.2.dist-info → yaicli-0.6.3.dist-info}/RECORD +13 -11
- {yaicli-0.6.2.dist-info → yaicli-0.6.3.dist-info}/WHEEL +0 -0
- {yaicli-0.6.2.dist-info → yaicli-0.6.3.dist-info}/entry_points.txt +0 -0
- {yaicli-0.6.2.dist-info → yaicli-0.6.3.dist-info}/licenses/LICENSE +0 -0
pyproject.toml
CHANGED
@@ -1,10 +1,10 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yaicli"
|
3
|
-
version = "0.6.
|
3
|
+
version = "0.6.3"
|
4
4
|
description = "A simple CLI tool to interact with LLM"
|
5
5
|
authors = [{ name = "belingud", email = "im.victor@qq.com" }]
|
6
6
|
readme = "README.md"
|
7
|
-
requires-python = ">=3.
|
7
|
+
requires-python = ">=3.10"
|
8
8
|
license = { file = "LICENSE" }
|
9
9
|
classifiers = [
|
10
10
|
"Programming Language :: Python :: 3",
|
@@ -65,10 +65,16 @@ ai = "yaicli.entry:app"
|
|
65
65
|
yaicli = "yaicli.entry:app"
|
66
66
|
|
67
67
|
[project.optional-dependencies]
|
68
|
+
all = [
|
69
|
+
"volcengine-python-sdk>=3.0.15",
|
70
|
+
"ollama>=0.5.1",
|
71
|
+
"cohere>=5.15.0",
|
72
|
+
"google-genai>=1.20.0",
|
73
|
+
]
|
68
74
|
doubao = ["volcengine-python-sdk>=3.0.15"]
|
69
75
|
ollama = ["ollama>=0.5.1"]
|
70
76
|
cohere = ["cohere>=5.15.0"]
|
71
|
-
|
77
|
+
gemini = ["google-genai>=1.20.0"]
|
72
78
|
|
73
79
|
[tool.pytest.ini_options]
|
74
80
|
testpaths = ["tests"]
|
@@ -91,6 +97,7 @@ dev = [
|
|
91
97
|
"pytest>=8.3.5",
|
92
98
|
"pytest-cov>=6.1.1",
|
93
99
|
"ruff>=0.11.2",
|
100
|
+
"tox>=4.27.0",
|
94
101
|
]
|
95
102
|
|
96
103
|
[tool.isort]
|
yaicli/cli.py
CHANGED
@@ -384,14 +384,7 @@ class CLI:
|
|
384
384
|
self._check_history_len()
|
385
385
|
|
386
386
|
if self.current_mode == EXEC_MODE:
|
387
|
-
|
388
|
-
# in case of tool use.
|
389
|
-
final_content = ""
|
390
|
-
if self.chat.history:
|
391
|
-
last_message = self.chat.history[-1]
|
392
|
-
if last_message.role == "assistant":
|
393
|
-
final_content = last_message.content or ""
|
394
|
-
self._confirm_and_execute(final_content)
|
387
|
+
self._confirm_and_execute(content)
|
395
388
|
return True
|
396
389
|
|
397
390
|
def _confirm_and_execute(self, raw_content: str) -> None:
|
yaicli/const.py
CHANGED
@@ -68,7 +68,7 @@ DEFAULT_JUSTIFY: JustifyMethod = "default"
|
|
68
68
|
DEFAULT_ROLE_MODIFY_WARNING: BOOL_STR = "true"
|
69
69
|
DEFAULT_ENABLE_FUNCTIONS: BOOL_STR = "true"
|
70
70
|
DEFAULT_SHOW_FUNCTION_OUTPUT: BOOL_STR = "true"
|
71
|
-
DEFAULT_REASONING_EFFORT: Optional[Literal["low", "high", "medium"]] =
|
71
|
+
DEFAULT_REASONING_EFFORT: Optional[Literal["low", "high", "medium"]] = ""
|
72
72
|
|
73
73
|
|
74
74
|
SHELL_PROMPT = """You are YAICLI, a shell command generator.
|
yaicli/llms/__init__.py
CHANGED
@@ -1,13 +1,4 @@
|
|
1
|
-
from ..config import cfg
|
2
1
|
from .client import LLMClient
|
3
2
|
from .provider import Provider, ProviderFactory
|
4
3
|
|
5
4
|
__all__ = ["LLMClient", "Provider", "ProviderFactory"]
|
6
|
-
|
7
|
-
|
8
|
-
class BaseProvider:
|
9
|
-
def __init__(self) -> None:
|
10
|
-
self.api_key = cfg["API_KEY"]
|
11
|
-
self.model = cfg["MODEL"]
|
12
|
-
self.base_url = cfg["BASE_URL"]
|
13
|
-
self.timeout = cfg["TIMEOUT"]
|
yaicli/llms/provider.py
CHANGED
@@ -45,6 +45,7 @@ class ProviderFactory:
|
|
45
45
|
"cohere": (".providers.cohere_provider", "CohereProvider"),
|
46
46
|
"deepseek": (".providers.deepseek_provider", "DeepSeekProvider"),
|
47
47
|
"doubao": (".providers.doubao_provider", "DoubaoProvider"),
|
48
|
+
"gemini": (".providers.gemini_provider", "GeminiProvider"),
|
48
49
|
"groq": (".providers.groq_provider", "GroqProvider"),
|
49
50
|
"infini-ai": (".providers.infiniai_provider", "InfiniAIProvider"),
|
50
51
|
"minimax": (".providers.minimax_provider", "MinimaxProvider"),
|
@@ -55,6 +56,7 @@ class ProviderFactory:
|
|
55
56
|
"sambanova": (".providers.sambanova_provider", "SambanovaProvider"),
|
56
57
|
"siliconflow": (".providers.siliconflow_provider", "SiliconFlowProvider"),
|
57
58
|
"targon": (".providers.targon_provider", "TargonProvider"),
|
59
|
+
"vertexai": (".providers.vertexai_provider", "VertexAIProvider"),
|
58
60
|
"xai": (".providers.xai_provider", "XaiProvider"),
|
59
61
|
"yi": (".providers.yi_provider", "YiProvider"),
|
60
62
|
}
|
@@ -0,0 +1,191 @@
|
|
1
|
+
import json
|
2
|
+
from functools import wraps
|
3
|
+
from typing import Any, Callable, Dict, Generator, List
|
4
|
+
|
5
|
+
import google.genai as genai
|
6
|
+
from google.genai import types
|
7
|
+
|
8
|
+
from ...config import cfg
|
9
|
+
from ...console import get_console
|
10
|
+
from ...schemas import ChatMessage, LLMResponse
|
11
|
+
from ...tools import get_func_name_map
|
12
|
+
from ..provider import Provider
|
13
|
+
|
14
|
+
|
15
|
+
def wrap_function(func):
|
16
|
+
@wraps(func)
|
17
|
+
def wrapper(*args, **kwargs):
|
18
|
+
return func(*args, **kwargs)
|
19
|
+
|
20
|
+
return wrapper
|
21
|
+
|
22
|
+
|
23
|
+
class GeminiProvider(Provider):
|
24
|
+
"""Gemini provider implementation based on google-genai library"""
|
25
|
+
|
26
|
+
DEFAULT_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
|
27
|
+
|
28
|
+
def __init__(self, config: dict = cfg, verbose: bool = False, **kwargs):
|
29
|
+
self.config = config
|
30
|
+
self.enable_function = self.config["ENABLE_FUNCTIONS"]
|
31
|
+
self.verbose = verbose
|
32
|
+
|
33
|
+
# Initialize client
|
34
|
+
self.client_params = self.get_client_params()
|
35
|
+
self.client = genai.Client(**self.client_params)
|
36
|
+
self.console = get_console()
|
37
|
+
|
38
|
+
def get_client_params(self) -> Dict[str, Any]:
|
39
|
+
"""Get the client parameters"""
|
40
|
+
# Initialize client params
|
41
|
+
return {
|
42
|
+
"api_key": self.config["API_KEY"],
|
43
|
+
}
|
44
|
+
|
45
|
+
def get_chat_config(self):
|
46
|
+
http_options_map = {
|
47
|
+
"timeout": self.config["TIMEOUT"] * 1000, # Timeout for the request in milliseconds.
|
48
|
+
"headers": {**self.config["EXTRA_HEADERS"], "X-Client": self.APP_NAME, "Referer": self.APP_REFERER},
|
49
|
+
}
|
50
|
+
if self.config.get("BASE_URL"):
|
51
|
+
http_options_map["base_url"] = self.config["BASE_URL"]
|
52
|
+
if self.config.get("API_VERSION"):
|
53
|
+
# Specifies the version of the API to use.
|
54
|
+
http_options_map["api_version"] = self.config["API_VERSION"]
|
55
|
+
http_options = types.HttpOptions(**http_options_map)
|
56
|
+
config_map = {
|
57
|
+
"max_output_tokens": self.config["MAX_TOKENS"],
|
58
|
+
"temperature": self.config["TEMPERATURE"],
|
59
|
+
"top_p": self.config["TOP_P"],
|
60
|
+
"http_options": http_options,
|
61
|
+
}
|
62
|
+
if self.config.get("TOP_K"):
|
63
|
+
config_map["top_k"] = self.config["TOP_K"]
|
64
|
+
if self.config.get("PRESENCE_PENALTY"):
|
65
|
+
config_map["presence_penalty"] = self.config["PRESENCE_PENALTY"]
|
66
|
+
if self.config.get("FREQUENCY_PENALTY"):
|
67
|
+
config_map["frequency_penalty"] = self.config["FREQUENCY_PENALTY"]
|
68
|
+
if self.config.get("SEED"):
|
69
|
+
config_map["seed"] = self.config["SEED"]
|
70
|
+
# Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available.
|
71
|
+
thinking_config_map = {"include_thoughts": self.config.get("INCLUDE_THOUGHTS", True)}
|
72
|
+
if self.config.get("THINKING_BUDGET"):
|
73
|
+
thinking_config_map["thinking_budget"] = int(self.config["THINKING_BUDGET"])
|
74
|
+
config_map["thinking_config"] = types.ThinkingConfig(**thinking_config_map)
|
75
|
+
config = types.GenerateContentConfig(**config_map)
|
76
|
+
if self.enable_function:
|
77
|
+
# TODO: support disable automatic function calling
|
78
|
+
# config.automatic_function_calling = types.AutomaticFunctionCallingConfig(disable=False)
|
79
|
+
config.tools = self.gen_gemini_functions()
|
80
|
+
return config
|
81
|
+
|
82
|
+
def _convert_messages(self, messages: List[ChatMessage]) -> List[types.Content]:
|
83
|
+
"""Convert a list of ChatMessage objects to a list of Gemini Content objects."""
|
84
|
+
converted_messages = []
|
85
|
+
for msg in messages:
|
86
|
+
if msg.role == "system":
|
87
|
+
continue
|
88
|
+
content = types.Content(role=self._map_role(msg.role), parts=[types.Part(text=msg.content)])
|
89
|
+
if msg.role == "tool":
|
90
|
+
content.role = "user"
|
91
|
+
content.parts = [types.Part.from_function_response(name=msg.name, response={"result": msg.content})]
|
92
|
+
converted_messages.append(content)
|
93
|
+
return converted_messages
|
94
|
+
|
95
|
+
def _map_role(self, role: str) -> str:
|
96
|
+
"""Map OpenAI roles to Gemini roles"""
|
97
|
+
# Gemini uses "user", "model" instead of "user", "assistant"
|
98
|
+
if role == "assistant":
|
99
|
+
return "model"
|
100
|
+
return role
|
101
|
+
|
102
|
+
def gen_gemini_functions(self) -> List[Callable[..., Any]]:
|
103
|
+
"""Wrap Gemini functions from OpenAI functions for automatic function calling"""
|
104
|
+
func_name_map = get_func_name_map()
|
105
|
+
if not func_name_map:
|
106
|
+
return []
|
107
|
+
funcs = []
|
108
|
+
for func_name, func in func_name_map.items():
|
109
|
+
wrapped_func = wrap_function(func.execute)
|
110
|
+
wrapped_func.__name__ = func_name
|
111
|
+
wrapped_func.__doc__ = func.__doc__
|
112
|
+
funcs.append(wrapped_func)
|
113
|
+
return funcs
|
114
|
+
|
115
|
+
def completion(
|
116
|
+
self,
|
117
|
+
messages: List[ChatMessage],
|
118
|
+
stream: bool = False,
|
119
|
+
) -> Generator[LLMResponse, None, None]:
|
120
|
+
"""
|
121
|
+
Send completion request to Gemini and return responses.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
messages: List of chat messages to send
|
125
|
+
stream: Whether to stream the response
|
126
|
+
|
127
|
+
Yields:
|
128
|
+
LLMResponse: Response objects containing content, tool calls, etc.
|
129
|
+
|
130
|
+
Raises:
|
131
|
+
ValueError: If messages is empty or invalid
|
132
|
+
APIError: If API request fails
|
133
|
+
"""
|
134
|
+
gemini_messages = self._convert_messages(messages)
|
135
|
+
if self.verbose:
|
136
|
+
self.console.print("Messages:")
|
137
|
+
self.console.print(gemini_messages)
|
138
|
+
chat_config = self.get_chat_config()
|
139
|
+
chat_config.system_instruction = messages[0].content
|
140
|
+
chat = self.client.chats.create(model=self.config["MODEL"], history=gemini_messages, config=chat_config)
|
141
|
+
message = messages[-1].content
|
142
|
+
|
143
|
+
if stream:
|
144
|
+
response = chat.send_message_stream(message=message)
|
145
|
+
yield from self._handle_stream_response(response)
|
146
|
+
else:
|
147
|
+
response = chat.send_message(message=message)
|
148
|
+
yield from self._handle_normal_response(response)
|
149
|
+
|
150
|
+
def _handle_normal_response(self, response) -> Generator[LLMResponse, None, None]:
|
151
|
+
"""Handle normal (non-streaming) response"""
|
152
|
+
# TODO: support disable automatic function calling
|
153
|
+
if not response or not response.candidates:
|
154
|
+
yield LLMResponse(
|
155
|
+
content=json.dumps(response.to_json_dict()),
|
156
|
+
finish_reason="stop",
|
157
|
+
)
|
158
|
+
return
|
159
|
+
for part in response.candidates[0].content.parts:
|
160
|
+
if part.thought:
|
161
|
+
yield LLMResponse(reasoning=part.text, content=None, finish_reason="stop")
|
162
|
+
else:
|
163
|
+
yield LLMResponse(reasoning=None, content=part.text, finish_reason="stop")
|
164
|
+
|
165
|
+
def _handle_stream_response(self, response) -> Generator[LLMResponse, None, None]:
|
166
|
+
"""Handle streaming response from Gemini API"""
|
167
|
+
# Initialize tool call object to accumulate tool call data across chunks
|
168
|
+
# TODO: support disable automatic function calling
|
169
|
+
tool_call = None
|
170
|
+
for chunk in response:
|
171
|
+
if not chunk.candidates:
|
172
|
+
continue
|
173
|
+
candidate = chunk.candidates[0]
|
174
|
+
finish_reason = candidate.finish_reason
|
175
|
+
for part in chunk.candidates[0].content.parts:
|
176
|
+
if part.thought:
|
177
|
+
reasoning = part.text
|
178
|
+
content = None
|
179
|
+
else:
|
180
|
+
content = part.text
|
181
|
+
reasoning = None
|
182
|
+
yield LLMResponse(
|
183
|
+
reasoning=reasoning,
|
184
|
+
content=content,
|
185
|
+
tool_call=tool_call if finish_reason == "tool_calls" else None,
|
186
|
+
finish_reason=finish_reason or None,
|
187
|
+
)
|
188
|
+
|
189
|
+
def detect_tool_role(self) -> str:
|
190
|
+
"""Return the role that should be used for tool responses"""
|
191
|
+
return "user"
|
@@ -81,7 +81,7 @@ class OpenAIProvider(Provider):
|
|
81
81
|
completion_params = {}
|
82
82
|
params_keys = self.get_completion_params_keys()
|
83
83
|
for api_key, config_key in params_keys.items():
|
84
|
-
if self.config.get(config_key, None) is not None:
|
84
|
+
if self.config.get(config_key, None) is not None and self.config[config_key] != "":
|
85
85
|
completion_params[api_key] = self.config[config_key]
|
86
86
|
return completion_params
|
87
87
|
|
@@ -140,12 +140,19 @@ class OpenAIProvider(Provider):
|
|
140
140
|
if tools:
|
141
141
|
params["tools"] = tools
|
142
142
|
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
143
|
+
try:
|
144
|
+
if stream:
|
145
|
+
response = self.client.chat.completions.create(**params)
|
146
|
+
yield from self._handle_stream_response(response)
|
147
|
+
else:
|
148
|
+
response = self.client.chat.completions.create(**params)
|
149
|
+
yield from self._handle_normal_response(response)
|
150
|
+
except (openai.APIStatusError, openai.APIResponseValidationError) as e:
|
151
|
+
try:
|
152
|
+
body = e.response.json()
|
153
|
+
except Exception:
|
154
|
+
body = e.response.text
|
155
|
+
self.console.print(f"Error Response: {body}")
|
149
156
|
|
150
157
|
def _handle_normal_response(self, response: ChatCompletion) -> Generator[LLMResponse, None, None]:
|
151
158
|
"""Handle normal (non-streaming) response"""
|
@@ -0,0 +1,18 @@
|
|
1
|
+
from typing import Any, Dict
|
2
|
+
|
3
|
+
from .gemini_provider import GeminiProvider
|
4
|
+
|
5
|
+
|
6
|
+
class VertexAIProvider(GeminiProvider):
|
7
|
+
"""Vertex AI provider implementation based on google-genai library"""
|
8
|
+
|
9
|
+
def get_client_params(self) -> Dict[str, Any]:
|
10
|
+
"""Get the client parameters"""
|
11
|
+
# Initialize client params
|
12
|
+
if not self.config.get("PROJECT") or not self.config.get("LOCATION"):
|
13
|
+
raise ValueError("PROJECT and LOCATION are required for Vertex AI")
|
14
|
+
return {
|
15
|
+
"vertexai": True,
|
16
|
+
"project": self.config.get("PROJECT"),
|
17
|
+
"location": self.config.get("LOCATION"),
|
18
|
+
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.3
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -212,7 +212,7 @@ Keywords: ai,ai-assistant,ai-chat,ai-interaction,anthropic,chatgpt,claude,cli,co
|
|
212
212
|
Classifier: License :: OSI Approved :: Apache Software License
|
213
213
|
Classifier: Operating System :: OS Independent
|
214
214
|
Classifier: Programming Language :: Python :: 3
|
215
|
-
Requires-Python: >=3.
|
215
|
+
Requires-Python: >=3.10
|
216
216
|
Requires-Dist: click>=8.1.8
|
217
217
|
Requires-Dist: distro>=1.9.0
|
218
218
|
Requires-Dist: httpx>=0.28.1
|
@@ -225,12 +225,15 @@ Requires-Dist: socksio>=1.0.0
|
|
225
225
|
Requires-Dist: typer>=0.16.0
|
226
226
|
Provides-Extra: all
|
227
227
|
Requires-Dist: cohere>=5.15.0; extra == 'all'
|
228
|
+
Requires-Dist: google-genai>=1.20.0; extra == 'all'
|
228
229
|
Requires-Dist: ollama>=0.5.1; extra == 'all'
|
229
230
|
Requires-Dist: volcengine-python-sdk>=3.0.15; extra == 'all'
|
230
231
|
Provides-Extra: cohere
|
231
232
|
Requires-Dist: cohere>=5.15.0; extra == 'cohere'
|
232
233
|
Provides-Extra: doubao
|
233
234
|
Requires-Dist: volcengine-python-sdk>=3.0.15; extra == 'doubao'
|
235
|
+
Provides-Extra: gemini
|
236
|
+
Requires-Dist: google-genai>=1.20.0; extra == 'gemini'
|
234
237
|
Provides-Extra: ollama
|
235
238
|
Requires-Dist: ollama>=0.5.1; extra == 'ollama'
|
236
239
|
Description-Content-Type: text/markdown
|
@@ -1,10 +1,10 @@
|
|
1
|
-
pyproject.toml,sha256=
|
1
|
+
pyproject.toml,sha256=BfvXPlqqvIqhYBItAj3HclRLukitrn0kdwIYxdUJBgU,2531
|
2
2
|
yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
yaicli/chat.py,sha256=_emvZEdgMBth2nQGaNWPf0P45oW2k3bpuIwqsxFcM5A,13676
|
4
|
-
yaicli/cli.py,sha256=
|
4
|
+
yaicli/cli.py,sha256=Eu1CL9ZB8ElvXqpHqmoWoasC0Brc7-j_zd3RmPhdSEE,23310
|
5
5
|
yaicli/config.py,sha256=HrWYcelLXE61XX719eVcuuo3292xxf1BNQznWdvjQFQ,6535
|
6
6
|
yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
|
7
|
-
yaicli/const.py,sha256=
|
7
|
+
yaicli/const.py,sha256=Uvdm1rc5zhjE2r9ioCYiSzhk8cT4mfgO2Mm4mNs71Nk,8176
|
8
8
|
yaicli/entry.py,sha256=Q1eqLE7tcHide7ooyPO7OCJpKE2YVuxR-NNFA2Pt2Hw,8693
|
9
9
|
yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
|
10
10
|
yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
|
@@ -16,29 +16,31 @@ yaicli/tools.py,sha256=xw8KEs_xlSf79A2Aq1rAsUWahS6A_e5QMLt7QDXL5bs,5086
|
|
16
16
|
yaicli/utils.py,sha256=bpo3Xhozpxsaci3FtEIKZ32l4ZdyWMsrHjYGX0tB4J4,4541
|
17
17
|
yaicli/functions/__init__.py,sha256=_FJooQ9GkijG8xLwuU0cr5GBrGnC9Nc6bnCeUjrsT0k,1271
|
18
18
|
yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
|
19
|
-
yaicli/llms/__init__.py,sha256=
|
19
|
+
yaicli/llms/__init__.py,sha256=x78cJujrJkelXPnzHS6pzHkITZdgLYZqJMnrMHbptoc,134
|
20
20
|
yaicli/llms/client.py,sha256=mkE9KHSuPcJfpNQXbzF2YXGkel3jrOW8KfQ3YYpaK4M,4453
|
21
|
-
yaicli/llms/provider.py,sha256=
|
21
|
+
yaicli/llms/provider.py,sha256=YhX6RcMQqhac4EoQTY_AXDm-jtHYfH_K7Jikqvpc3H8,3159
|
22
22
|
yaicli/llms/providers/ai21_provider.py,sha256=SvgGj9_87KEqmxCMLbtsSkT8J3rUD7Mb21UF7pMWsks,3035
|
23
23
|
yaicli/llms/providers/chatglm_provider.py,sha256=1xP4KVAi6SDKZ-lMi2wdzywtDydsTf6jDzh3jBBGMfA,6437
|
24
24
|
yaicli/llms/providers/chutes_provider.py,sha256=mtvWvRRfHPH3JFfzym87wXtPNiMpLnur3805N9acx7E,882
|
25
25
|
yaicli/llms/providers/cohere_provider.py,sha256=hc6vQxbCHz9kM2tNKK-kGkuOf4-gkskXW9ctr9V4Cxk,10837
|
26
26
|
yaicli/llms/providers/deepseek_provider.py,sha256=VjGes_jFin5WGYNFxYKMoHwgAQX_eYbYhQKfjeh-9eI,438
|
27
27
|
yaicli/llms/providers/doubao_provider.py,sha256=4eOdE91ITUn3uo3mvYAzdrHsuFIIBwZWib21mtZn8OY,1938
|
28
|
+
yaicli/llms/providers/gemini_provider.py,sha256=iCRDqHRBFeTD_2NQwlsAlxFU7cKK4iyjimaPSp4VySM,7923
|
28
29
|
yaicli/llms/providers/groq_provider.py,sha256=EiS1Yxw5jbAUBFCRYsJ57KYgZPk6oH-_gD72OfW8Oik,1358
|
29
30
|
yaicli/llms/providers/infiniai_provider.py,sha256=1dseUIZiXsxYRATRtk_obFclyXMwi4glsP7l_tVtnv8,710
|
30
31
|
yaicli/llms/providers/minimax_provider.py,sha256=W-j3dzrYMEv14bYt2pCPvPUxvxsUs-iMAcGB9yXakFs,744
|
31
32
|
yaicli/llms/providers/modelscope_provider.py,sha256=BzBhYixiDEWB7gujQ0rcG__7nsv0psJRxdtYCYXBhdM,454
|
32
33
|
yaicli/llms/providers/ollama_provider.py,sha256=pjpYjfnHWnExweZi1KGbT07JGkcxzKPhqICo8dD82D0,6967
|
33
|
-
yaicli/llms/providers/openai_provider.py,sha256=
|
34
|
+
yaicli/llms/providers/openai_provider.py,sha256=yl1vVKt8QzbN_dbsW_9rY8S_xkXI3Bo3Of4Cf7W3mJc,10075
|
34
35
|
yaicli/llms/providers/openrouter_provider.py,sha256=R-7FrUrCAKPZ3gbnuo0M6rPlVw1mvSBjbLGs_FtZWM0,732
|
35
36
|
yaicli/llms/providers/sambanova_provider.py,sha256=FFLrsvARt1UPAFWWgiuB6zvGzGKdtehKL58HdE1fo_M,2254
|
36
37
|
yaicli/llms/providers/siliconflow_provider.py,sha256=7Ir73me9jGMO5TAZDjrAbX7tbb_QBmLjTGywY0yliqc,446
|
37
38
|
yaicli/llms/providers/targon_provider.py,sha256=RQ808eS9lvsyvlzyKaQYcN0NimbpoNWgjHUzY1gLNs4,717
|
39
|
+
yaicli/llms/providers/vertexai_provider.py,sha256=_ddrse1LfXRChTgkvxUlexyfJlfr0sVJH-Rmno3djSI,636
|
38
40
|
yaicli/llms/providers/xai_provider.py,sha256=Q6iOvJZOXIAwRiiHMKEBgq8-W6SGVZ9QD1_532bNYfo,199
|
39
41
|
yaicli/llms/providers/yi_provider.py,sha256=EnTm9qTxHPnzERsKqgGnzRIVhXFcAEdYqtOra65pGmY,719
|
40
|
-
yaicli-0.6.
|
41
|
-
yaicli-0.6.
|
42
|
-
yaicli-0.6.
|
43
|
-
yaicli-0.6.
|
44
|
-
yaicli-0.6.
|
42
|
+
yaicli-0.6.3.dist-info/METADATA,sha256=EfU2thy5G2Ge-BaCM3RT0quP2YUPz0bBWLJjKJugf_w,53677
|
43
|
+
yaicli-0.6.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
44
|
+
yaicli-0.6.3.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
|
45
|
+
yaicli-0.6.3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
46
|
+
yaicli-0.6.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|