yaicli 0.5.9__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. pyproject.toml +35 -12
  2. yaicli/cli.py +31 -20
  3. yaicli/const.py +6 -5
  4. yaicli/entry.py +1 -1
  5. yaicli/llms/__init__.py +13 -0
  6. yaicli/llms/client.py +120 -0
  7. yaicli/llms/provider.py +78 -0
  8. yaicli/llms/providers/ai21_provider.py +66 -0
  9. yaicli/llms/providers/chatglm_provider.py +139 -0
  10. yaicli/llms/providers/chutes_provider.py +14 -0
  11. yaicli/llms/providers/cohere_provider.py +298 -0
  12. yaicli/llms/providers/deepseek_provider.py +14 -0
  13. yaicli/llms/providers/doubao_provider.py +53 -0
  14. yaicli/llms/providers/groq_provider.py +16 -0
  15. yaicli/llms/providers/infiniai_provider.py +20 -0
  16. yaicli/llms/providers/minimax_provider.py +13 -0
  17. yaicli/llms/providers/modelscope_provider.py +14 -0
  18. yaicli/llms/providers/ollama_provider.py +187 -0
  19. yaicli/llms/providers/openai_provider.py +211 -0
  20. yaicli/llms/providers/openrouter_provider.py +14 -0
  21. yaicli/llms/providers/sambanova_provider.py +30 -0
  22. yaicli/llms/providers/siliconflow_provider.py +14 -0
  23. yaicli/llms/providers/targon_provider.py +14 -0
  24. yaicli/llms/providers/yi_provider.py +14 -0
  25. yaicli/printer.py +4 -16
  26. yaicli/schemas.py +12 -3
  27. yaicli/tools.py +59 -3
  28. {yaicli-0.5.9.dist-info → yaicli-0.6.1.dist-info}/METADATA +238 -32
  29. yaicli-0.6.1.dist-info/RECORD +43 -0
  30. yaicli/client.py +0 -391
  31. yaicli-0.5.9.dist-info/RECORD +0 -24
  32. {yaicli-0.5.9.dist-info → yaicli-0.6.1.dist-info}/WHEEL +0 -0
  33. {yaicli-0.5.9.dist-info → yaicli-0.6.1.dist-info}/entry_points.txt +0 -0
  34. {yaicli-0.5.9.dist-info → yaicli-0.6.1.dist-info}/licenses/LICENSE +0 -0
yaicli/client.py DELETED
@@ -1,391 +0,0 @@
1
- import json
2
- from dataclasses import dataclass, field
3
- from typing import Any, Dict, Generator, List, Optional, Union, cast
4
-
5
- import litellm
6
- from json_repair import repair_json
7
- from litellm.types.utils import Choices
8
- from litellm.types.utils import Message as ChoiceMessage
9
- from litellm.types.utils import ModelResponse
10
- from rich.panel import Panel
11
-
12
- from .config import cfg
13
- from .console import get_console
14
- from .schemas import ChatMessage, LLMResponse, ToolCall
15
- from .tools import (
16
- Function,
17
- FunctionName,
18
- get_function,
19
- get_openai_schemas,
20
- list_functions,
21
- )
22
-
23
- litellm.drop_params = True
24
- console = get_console()
25
-
26
-
27
- class RefreshLive:
28
- """Refresh live display"""
29
-
30
-
31
- class StopLive:
32
- """Stop live display"""
33
-
34
-
35
- @dataclass
36
- class LitellmClient:
37
- """OpenAI provider implementation"""
38
-
39
- api_key: str = field(default_factory=lambda: cfg["API_KEY"])
40
- model: str = field(default_factory=lambda: f"{cfg['PROVIDER']}/{cfg['MODEL']}")
41
- base_url: Optional[str] = field(default_factory=lambda: cfg["BASE_URL"])
42
- timeout: int = field(default_factory=lambda: cfg["TIMEOUT"])
43
-
44
- verbose: bool = False
45
-
46
- def __post_init__(self) -> None:
47
- """Initialize OpenAI client"""
48
- self.pre_tool_call_id = None
49
- if cfg["PROVIDER"] == "openrouter":
50
- cfg["EXTRA_HEADERS"].update({"X-Title": "Yaicli", "HTTP-Referer": "https://github.com/belingud/yaicli"})
51
-
52
- def _convert_messages(self, messages: List[ChatMessage]) -> List[Dict[str, Any]]:
53
- """Convert message format to OpenAI API required format"""
54
- openai_messages = []
55
- for msg in messages:
56
- if msg.tool_call_id:
57
- openai_messages.append(
58
- {"role": msg.role, "content": msg.content, "tool_call_id": msg.tool_call_id, "name": msg.name}
59
- )
60
- else:
61
- openai_messages.append({"role": msg.role, "content": msg.content})
62
- return openai_messages
63
-
64
- def _convert_functions(self, _: List[Function]) -> List[Dict[str, Any]]:
65
- """Convert function format to OpenAI API required format"""
66
- return get_openai_schemas()
67
-
68
- def _execute_tool_call(self, tool_call: ToolCall) -> tuple[str, bool]:
69
- """Call function and return result"""
70
- console.print(f"@Function call: {tool_call.name}({tool_call.arguments})", style="blue")
71
-
72
- # 1. Get function
73
- try:
74
- function = get_function(FunctionName(tool_call.name))
75
- except ValueError as e:
76
- error_msg = f"Function '{tool_call.name!r}' not exists: {e}"
77
- console.print(error_msg, style="red")
78
- return error_msg, False
79
-
80
- # 2. Parse function arguments
81
- try:
82
- arguments = repair_json(tool_call.arguments, return_objects=True)
83
- if not isinstance(arguments, dict):
84
- error_msg = f"Invalid arguments type: {arguments!r}, should be JSON object"
85
- console.print(error_msg, style="red")
86
- return error_msg, False
87
- arguments = cast(dict, arguments)
88
- except Exception as e:
89
- error_msg = f"Invalid arguments from llm: {e}\nRaw arguments: {tool_call.arguments!r}"
90
- console.print(error_msg, style="red")
91
- return error_msg, False
92
-
93
- # 3. execute function
94
- try:
95
- function_result = function.execute(**arguments)
96
- if cfg["SHOW_FUNCTION_OUTPUT"]:
97
- panel = Panel(
98
- function_result,
99
- title="Function output",
100
- title_align="left",
101
- expand=False,
102
- border_style="blue",
103
- style="dim",
104
- )
105
- console.print(panel)
106
- return function_result, True
107
- except Exception as e:
108
- error_msg = f"Call function error: {e}\nFunction name: {tool_call.name!r}\nArguments: {arguments!r}"
109
- console.print(error_msg, style="red")
110
- return error_msg, False
111
-
112
- def completion(
113
- self,
114
- messages: List[ChatMessage],
115
- stream: bool = False,
116
- recursion_depth: int = 0,
117
- ) -> Generator[Union[LLMResponse, RefreshLive], None, None]:
118
- """Send message to OpenAI with a maximum recursion depth of 5"""
119
- if self.verbose:
120
- console.print(messages)
121
- openai_messages = self._convert_messages(messages)
122
-
123
- # Prepare request parameters
124
- params: Dict[str, Any] = {
125
- "model": self.model,
126
- "messages": openai_messages,
127
- "temperature": cfg["TEMPERATURE"],
128
- "top_p": cfg["TOP_P"],
129
- "stream": stream,
130
- # Openai: This value is now deprecated in favor of max_completion_tokens.
131
- "max_tokens": cfg["MAX_TOKENS"],
132
- "max_completion_tokens": cfg["MAX_TOKENS"],
133
- # litellm api params
134
- "api_key": self.api_key,
135
- "base_url": self.base_url,
136
- "reasoning_effort": cfg["REASONING_EFFORT"],
137
- }
138
-
139
- # Add optional parameters
140
- if cfg["EXTRA_HEADERS"]:
141
- params["extra_headers"] = cfg["EXTRA_HEADERS"]
142
- if cfg["EXTRA_BODY"]:
143
- params["extra_body"] = cfg["EXTRA_BODY"]
144
- if cfg["ENABLE_FUNCTIONS"]:
145
- params["tools"] = self._convert_functions(list_functions())
146
- params["tool_choice"] = "auto"
147
- params["parallel_tool_calls"] = False
148
- # Send request
149
- response = litellm.completion(**params)
150
- if stream:
151
- response = cast(litellm.CustomStreamWrapper, response)
152
- llm_content_generator = self._handle_stream_response(response)
153
- else:
154
- response = cast(ModelResponse, response)
155
- llm_content_generator = self._handle_normal_response(response)
156
- for llm_content in llm_content_generator:
157
- yield llm_content
158
- if llm_content.tool_call:
159
- if not self.pre_tool_call_id:
160
- self.pre_tool_call_id = llm_content.tool_call.id
161
- elif self.pre_tool_call_id == llm_content.tool_call.id:
162
- continue
163
- # Let live display know we are in next run
164
- yield RefreshLive()
165
-
166
- # execute function call
167
- function_result, _ = self._execute_tool_call(llm_content.tool_call)
168
-
169
- # add function call result
170
- messages.append(
171
- ChatMessage(
172
- role=self.detect_tool_role(cfg["PROVIDER"]),
173
- content=function_result,
174
- name=llm_content.tool_call.name,
175
- tool_call_id=llm_content.tool_call.id,
176
- )
177
- )
178
- # Check if we've exceeded the maximum recursion depth
179
- if recursion_depth >= 5:
180
- console.print("Maximum recursion depth (5) reached, stopping further tool calls", style="yellow")
181
- return
182
-
183
- # Continue with recursion if within limits
184
- if stream:
185
- yield from self.completion(messages, stream=stream, recursion_depth=recursion_depth + 1)
186
- else:
187
- yield from self.completion(messages, stream=stream, recursion_depth=recursion_depth + 1)
188
- # yield StopLive()
189
-
190
- def stream_completion(self, messages: List[ChatMessage], stream: bool = True) -> Generator[LLMResponse, None, None]:
191
- openai_messages = self._convert_messages(messages)
192
- params: Dict[str, Any] = {
193
- "model": self.model,
194
- "messages": openai_messages,
195
- "temperature": cfg["TEMPERATURE"],
196
- "top_p": cfg["TOP_P"],
197
- "stream": stream,
198
- # Openai: This value is now deprecated in favor of max_completion_tokens.
199
- "max_tokens": cfg["MAX_TOKENS"],
200
- "max_completion_tokens": cfg["MAX_TOKENS"],
201
- # litellm api params
202
- "api_key": self.api_key,
203
- "base_url": self.base_url,
204
- }
205
- # Add optional parameters
206
- if cfg["ENABLE_FUNCTIONS"]:
207
- params["tools"] = self._convert_functions(list_functions())
208
- params["tool_choice"] = "auto"
209
- params["parallel_tool_calls"] = False
210
-
211
- # Send request
212
- response = litellm.completion(**params)
213
- response = cast(litellm.CustomStreamWrapper, response)
214
- llm_content_generator = self._handle_stream_response(response)
215
- for llm_content in llm_content_generator:
216
- yield llm_content
217
- if llm_content.tool_call:
218
- if not self.pre_tool_call_id:
219
- self.pre_tool_call_id = llm_content.tool_call.id
220
- elif self.pre_tool_call_id == llm_content.tool_call.id:
221
- continue
222
-
223
- # execute function
224
- function_result, _ = self._execute_tool_call(llm_content.tool_call)
225
-
226
- # add function call result
227
- messages.append(
228
- ChatMessage(
229
- role=self.detect_tool_role(cfg["PROVIDER"]),
230
- content=function_result,
231
- name=llm_content.tool_call.name,
232
- tool_call_id=llm_content.tool_call.id,
233
- )
234
- )
235
-
236
- yield from self.stream_completion(messages)
237
-
238
- def _handle_normal_response(self, response: ModelResponse) -> Generator[LLMResponse, None, None]:
239
- """Handle normal (non-streaming) response
240
-
241
- Returns:
242
- LLMContent object with:
243
- - reasoning: The thinking/reasoning content (if any)
244
- - content: The normal response content
245
- """
246
- choice = response.choices[0]
247
- content = choice.message.content or "" # type: ignore
248
- reasoning = choice.message.reasoning_content # type: ignore
249
- finish_reason = choice.finish_reason
250
- tool_call: Optional[ToolCall] = None
251
-
252
- # Check if the response contains reasoning content
253
- if "<think>" in content and "</think>" in content:
254
- # Extract reasoning content
255
- content = content.lstrip()
256
- if content.startswith("<think>"):
257
- think_end = content.find("</think>")
258
- if think_end != -1:
259
- reasoning = content[7:think_end].strip() # Start after <think>
260
- # Remove the <think> block from the main content
261
- content = content[think_end + 8 :].strip() # Start after </think>
262
- # Check if the response contains reasoning content in model_extra
263
- elif hasattr(choice.message, "model_extra") and choice.message.model_extra: # type: ignore
264
- model_extra = choice.message.model_extra # type: ignore
265
- reasoning = self._get_reasoning_content(model_extra)
266
- if finish_reason == "tool_calls":
267
- if '{"index":' in content or '"tool_calls":' in content:
268
- # Tool call data may in content after the <think> block
269
- # >/n{"index": 0, "tool_call_id": "call_1", "function": {"name": "name", "arguments": "{}"}, "output": null}
270
- tool_index = content.find('{"index":')
271
- if tool_index != -1:
272
- tmp_content = content[tool_index:]
273
- # Tool call data may in content after the <think> block
274
- try:
275
- choice = self.parse_choice_from_content(tmp_content)
276
- except ValueError:
277
- pass
278
- if hasattr(choice, "message") and hasattr(choice.message, "tool_calls") and choice.message.tool_calls: # type: ignore
279
- tool = choice.message.tool_calls[0] # type: ignore
280
- tool_call = ToolCall(tool.id, tool.function.name or "", tool.function.arguments)
281
-
282
- yield LLMResponse(reasoning=reasoning, content=content, finish_reason=finish_reason, tool_call=tool_call)
283
-
284
- def _handle_stream_response(self, response: litellm.CustomStreamWrapper) -> Generator[LLMResponse, None, None]:
285
- """Handle streaming response
286
-
287
- Returns:
288
- Generator yielding LLMContent objects with:
289
- - reasoning: The thinking/reasoning content (if any)
290
- - content: The normal response content
291
- """
292
- full_reasoning = ""
293
- full_content = ""
294
- content = ""
295
- reasoning = ""
296
- tool_id = ""
297
- tool_call_name = ""
298
- arguments = ""
299
- tool_call: Optional[ToolCall] = None
300
- for chunk in response:
301
- # Check if the response contains reasoning content
302
- choice = chunk.choices[0] # type: ignore
303
- delta = choice.delta
304
- finish_reason = choice.finish_reason
305
-
306
- # Concat content
307
- content = delta.content or ""
308
- full_content += content
309
-
310
- # Concat reasoning
311
- reasoning = self._get_reasoning_content(delta)
312
- full_reasoning += reasoning or ""
313
-
314
- if finish_reason:
315
- pass
316
- if finish_reason == "tool_calls" or ('{"index":' in content or '"tool_calls":' in content):
317
- # Tool call data may in content after the <think> block
318
- # >/n{"index": 0, "tool_call_id": "call_1", "function": {"name": "name", "arguments": "{}"}, "output": null}
319
- tool_index = full_content.find('{"index":')
320
- if tool_index != -1:
321
- tmp_content = full_content[tool_index:]
322
- try:
323
- choice = self.parse_choice_from_content(tmp_content)
324
- except ValueError:
325
- pass
326
- if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls: # type: ignore
327
- # Handle tool calls
328
- tool_id = choice.delta.tool_calls[0].id or "" # type: ignore
329
- for tool in choice.delta.tool_calls: # type: ignore
330
- if not tool.function:
331
- continue
332
- tool_call_name = tool.function.name or ""
333
- arguments += tool.function.arguments or ""
334
- tool_call = ToolCall(tool_id, tool_call_name, arguments)
335
- yield LLMResponse(reasoning=reasoning, content=content, tool_call=tool_call, finish_reason=finish_reason)
336
-
337
- def _get_reasoning_content(self, delta: Any) -> Optional[str]:
338
- """Extract reasoning content from delta if available based on specific keys.
339
-
340
- This method checks for various keys that might contain reasoning content
341
- in different API implementations.
342
-
343
- Args:
344
- delta: The delta/model_extra from the API response
345
-
346
- Returns:
347
- The reasoning content string if found, None otherwise
348
- """
349
- if not delta:
350
- return None
351
- # Reasoning content keys from API:
352
- # reasoning_content: deepseek/infi-ai
353
- # reasoning: openrouter
354
- # <think> block implementation not in here
355
- for key in ("reasoning_content", "reasoning"):
356
- # Check if the key exists and its value is a non-empty string
357
- if hasattr(delta, key):
358
- return getattr(delta, key)
359
-
360
- return None # Return None if no relevant key with a string value is found
361
-
362
- def parse_choice_from_content(self, content: str) -> Choices:
363
- """
364
- Parse the choice from the content after <think>...</think> block.
365
- Args:
366
- content: The content from the LLM response
367
- choice_cls: The class to use to parse the choice
368
- Returns:
369
- The choice object
370
- Raises ValueError if the content is not valid JSON
371
- """
372
- try:
373
- content_dict = json.loads(content)
374
- except json.JSONDecodeError:
375
- raise ValueError(f"Invalid message from LLM: {content}")
376
- if "delta" in content_dict:
377
- try:
378
- content_dict["delta"] = ChoiceMessage.model_validate(content_dict["delta"])
379
- except Exception as e:
380
- raise ValueError(f"Invalid message from LLM: {content}") from e
381
- try:
382
- return Choices.model_validate(content_dict)
383
- except Exception as e:
384
- raise ValueError(f"Invalid message from LLM: {content}") from e
385
-
386
- def detect_tool_role(self, provider: str) -> str:
387
- """Detect the role of the tool call"""
388
- if provider == "gemini":
389
- return "user"
390
- else:
391
- return "tool"
@@ -1,24 +0,0 @@
1
- pyproject.toml,sha256=BKd4A76v4AS4RKC2RTfi4OMImeKmyTzNEn8Xx2tWlbM,1964
2
- yaicli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- yaicli/chat.py,sha256=_emvZEdgMBth2nQGaNWPf0P45oW2k3bpuIwqsxFcM5A,13676
4
- yaicli/cli.py,sha256=u4rq04CdLiL-IBzQ0IimPyFAh9XZg9YUTuvM4CxE3pE,22985
5
- yaicli/client.py,sha256=kdEI4HP1zZQHUf-7kNbE9ItpyRN8zcAGyKeBSb1R4Ro,16565
6
- yaicli/config.py,sha256=HrWYcelLXE61XX719eVcuuo3292xxf1BNQznWdvjQFQ,6535
7
- yaicli/console.py,sha256=vARPJd-3lafutsQWrGntQVjLrYqaJD3qisN82pmuhjU,1973
8
- yaicli/const.py,sha256=mt_6m2jo5-tHEmk3DxKCEc5ek9DcyQGHFAn_HEeMP3k,8155
9
- yaicli/entry.py,sha256=gKzN8Yar3tpBd2Z2a80gD3k0W4Sf3lL7jdyws-2y-H0,8687
10
- yaicli/exceptions.py,sha256=WBYg8OTJJzaj7lt6HE7ZyBoe5T6A3yZRNCRfWd4iN0c,372
11
- yaicli/history.py,sha256=s-57X9FMsaQHF7XySq1gGH_jpd_cHHTYafYu2ECuG6M,2472
12
- yaicli/printer.py,sha256=J-QPrb0q4Zrx18vwBGIBDQk7RzT4wXJPwdJ9oXLoEVg,8369
13
- yaicli/render.py,sha256=k8o2P8fI44PJlyQbs7gmMiu2x2prwajdWn5JIt15BIA,505
14
- yaicli/role.py,sha256=PfwiVJIlzg7EzlvMM-kIy6vBK0d5d_J4M1I_fIZGnWk,7399
15
- yaicli/schemas.py,sha256=PiuSY7ORZaA4OL_tYm0inwqirHp5M-F3zcCipLwsH9E,571
16
- yaicli/tools.py,sha256=d-5LXbEB-1Uq5VKSgwlAiNDVOGrHkku2DpmZoorq1zw,3098
17
- yaicli/utils.py,sha256=bpo3Xhozpxsaci3FtEIKZ32l4ZdyWMsrHjYGX0tB4J4,4541
18
- yaicli/functions/__init__.py,sha256=_FJooQ9GkijG8xLwuU0cr5GBrGnC9Nc6bnCeUjrsT0k,1271
19
- yaicli/functions/buildin/execute_shell_command.py,sha256=unl1-F8p6QZajeHdA0u5UpURMJM0WhdWMUWCCCHVRcI,1320
20
- yaicli-0.5.9.dist-info/METADATA,sha256=6pjDNRzZPXd6HJILGfxlIBCs7QG7dK7tmNx5sT1JaTQ,50350
21
- yaicli-0.5.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
- yaicli-0.5.9.dist-info/entry_points.txt,sha256=iYVyQP0PJIm9tQnlQheqT435kK_xdGoi5j9aswGV9hA,66
23
- yaicli-0.5.9.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
24
- yaicli-0.5.9.dist-info/RECORD,,
File without changes