yaicli 0.6.0__tar.gz → 0.6.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {yaicli-0.6.0 → yaicli-0.6.1}/PKG-INFO +1 -1
  2. {yaicli-0.6.0 → yaicli-0.6.1}/pyproject.toml +1 -1
  3. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/provider.py +2 -0
  4. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/ai21_provider.py +5 -4
  5. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/chatglm_provider.py +6 -1
  6. yaicli-0.6.1/yaicli/llms/providers/chutes_provider.py +14 -0
  7. yaicli-0.6.1/yaicli/llms/providers/deepseek_provider.py +14 -0
  8. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/doubao_provider.py +24 -22
  9. yaicli-0.6.1/yaicli/llms/providers/groq_provider.py +16 -0
  10. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/infiniai_provider.py +7 -1
  11. yaicli-0.6.1/yaicli/llms/providers/minimax_provider.py +13 -0
  12. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/modelscope_provider.py +6 -3
  13. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/openai_provider.py +36 -12
  14. yaicli-0.6.1/yaicli/llms/providers/openrouter_provider.py +14 -0
  15. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/sambanova_provider.py +8 -6
  16. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/siliconflow_provider.py +6 -3
  17. yaicli-0.6.1/yaicli/llms/providers/targon_provider.py +14 -0
  18. yaicli-0.6.1/yaicli/llms/providers/yi_provider.py +14 -0
  19. yaicli-0.6.0/yaicli/llms/providers/chutes_provider.py +0 -7
  20. yaicli-0.6.0/yaicli/llms/providers/deepseek_provider.py +0 -11
  21. yaicli-0.6.0/yaicli/llms/providers/groq_provider.py +0 -14
  22. yaicli-0.6.0/yaicli/llms/providers/openrouter_provider.py +0 -11
  23. yaicli-0.6.0/yaicli/llms/providers/yi_provider.py +0 -7
  24. {yaicli-0.6.0 → yaicli-0.6.1}/.gitignore +0 -0
  25. {yaicli-0.6.0 → yaicli-0.6.1}/LICENSE +0 -0
  26. {yaicli-0.6.0 → yaicli-0.6.1}/README.md +0 -0
  27. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/__init__.py +0 -0
  28. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/chat.py +0 -0
  29. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/cli.py +0 -0
  30. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/config.py +0 -0
  31. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/console.py +0 -0
  32. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/const.py +0 -0
  33. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/entry.py +0 -0
  34. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/exceptions.py +0 -0
  35. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/functions/__init__.py +0 -0
  36. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/functions/buildin/execute_shell_command.py +0 -0
  37. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/history.py +0 -0
  38. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/__init__.py +0 -0
  39. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/client.py +0 -0
  40. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/cohere_provider.py +0 -0
  41. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/llms/providers/ollama_provider.py +0 -0
  42. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/printer.py +0 -0
  43. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/render.py +0 -0
  44. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/role.py +0 -0
  45. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/schemas.py +0 -0
  46. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/tools.py +0 -0
  47. {yaicli-0.6.0 → yaicli-0.6.1}/yaicli/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.6.0
3
+ Version: 0.6.1
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.6.0"
3
+ version = "0.6.1"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  authors = [{ name = "belingud", email = "im.victor@qq.com" }]
6
6
  readme = "README.md"
@@ -54,6 +54,8 @@ class ProviderFactory:
54
54
  "ollama": (".providers.ollama_provider", "OllamaProvider"),
55
55
  "cohere": (".providers.cohere_provider", "CohereProvider"),
56
56
  "sambanova": (".providers.sambanova_provider", "SambanovaProvider"),
57
+ "minimax": (".providers.minimax_provider", "MinimaxProvider"),
58
+ "targon": (".providers.targon_provider", "TargonProvider"),
57
59
  }
58
60
 
59
61
  @classmethod
@@ -1,4 +1,4 @@
1
- from typing import Generator, Optional
1
+ from typing import Any, Dict, Generator, Optional
2
2
 
3
3
  from openai._streaming import Stream
4
4
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
@@ -12,9 +12,10 @@ class AI21Provider(OpenAIProvider):
12
12
 
13
13
  DEFAULT_BASE_URL = "https://api.ai21.com/studio/v1"
14
14
 
15
- def __init__(self, config: dict = ..., **kwargs):
16
- super().__init__(config, **kwargs)
17
- self.completion_params["max_tokens"] = self.completion_params.pop("max_completion_tokens")
15
+ def get_completion_params(self) -> Dict[str, Any]:
16
+ params = super().get_completion_params()
17
+ params["max_tokens"] = params.pop("max_completion_tokens")
18
+ return params
18
19
 
19
20
  def _handle_stream_response(self, response: Stream[ChatCompletionChunk]) -> Generator[LLMResponse, None, None]:
20
21
  """Handle streaming response from AI21 models
@@ -1,5 +1,5 @@
1
1
  import json
2
- from typing import Generator, Optional
2
+ from typing import Any, Dict, Generator, Optional
3
3
 
4
4
  from openai._streaming import Stream
5
5
  from openai.types.chat.chat_completion import ChatCompletion, Choice
@@ -14,6 +14,11 @@ class ChatglmProvider(OpenAIProvider):
14
14
 
15
15
  DEFAULT_BASE_URL = "https://open.bigmodel.cn/api/paas/v4/"
16
16
 
17
+ def get_completion_params(self) -> Dict[str, Any]:
18
+ params = super().get_completion_params()
19
+ params["max_tokens"] = params.pop("max_completion_tokens")
20
+ return params
21
+
17
22
  def _handle_normal_response(self, response: ChatCompletion) -> Generator[LLMResponse, None, None]:
18
23
  """Handle normal (non-streaming) response
19
24
  Support both openai capabilities and chatglm
@@ -0,0 +1,14 @@
1
+ from typing import Any, Dict
2
+
3
+ from .openai_provider import OpenAIProvider
4
+
5
+
6
+ class ChutesProvider(OpenAIProvider):
7
+ """Chutes provider implementation based on openai-compatible API"""
8
+
9
+ DEFAULT_BASE_URL = "https://llm.chutes.ai/v1"
10
+
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -0,0 +1,14 @@
1
+ from typing import Any, Dict
2
+
3
+ from .openai_provider import OpenAIProvider
4
+
5
+
6
+ class DeepSeekProvider(OpenAIProvider):
7
+ """DeepSeek provider implementation based on openai-compatible API"""
8
+
9
+ DEFAULT_BASE_URL = "https://api.deepseek.com/v1"
10
+
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -1,3 +1,5 @@
1
+ from typing import Any, Dict
2
+
1
3
  from volcenginesdkarkruntime import Ark
2
4
 
3
5
  from ...config import cfg
@@ -9,43 +11,43 @@ class DoubaoProvider(OpenAIProvider):
9
11
  """Doubao provider implementation based on openai-compatible API"""
10
12
 
11
13
  DEFAULT_BASE_URL = "https://ark.cn-beijing.volces.com/api/v3"
14
+ CLIENT_CLS = Ark
12
15
 
13
16
  def __init__(self, config: dict = cfg, **kwargs):
14
17
  self.config = config
15
18
  self.enable_function = self.config["ENABLE_FUNCTIONS"]
19
+ self.client_params = self.get_client_params()
20
+
21
+ # Initialize client
22
+ self.client = self.CLIENT_CLS(**self.client_params)
23
+ self.console = get_console()
24
+
25
+ # Store completion params
26
+ self.completion_params = self.get_completion_params()
27
+
28
+ def get_client_params(self) -> Dict[str, Any]:
16
29
  # Initialize client params
17
- self.client_params = {"base_url": self.DEFAULT_BASE_URL}
30
+ client_params = {"base_url": self.DEFAULT_BASE_URL}
18
31
  if self.config.get("API_KEY", None):
19
- self.client_params["api_key"] = self.config["API_KEY"]
32
+ client_params["api_key"] = self.config["API_KEY"]
20
33
  if self.config.get("BASE_URL", None):
21
- self.client_params["base_url"] = self.config["BASE_URL"]
34
+ client_params["base_url"] = self.config["BASE_URL"]
22
35
  if self.config.get("AK", None):
23
- self.client_params["ak"] = self.config["AK"]
36
+ client_params["ak"] = self.config["AK"]
24
37
  if self.config.get("SK", None):
25
- self.client_params["sk"] = self.config["SK"]
38
+ client_params["sk"] = self.config["SK"]
26
39
  if self.config.get("REGION", None):
27
- self.client_params["region"] = self.config["REGION"]
40
+ client_params["region"] = self.config["REGION"]
41
+ return client_params
28
42
 
29
- # Initialize client
30
- self.client = Ark(**self.client_params)
31
- self.console = get_console()
32
-
33
- # Store completion params
34
- self.completion_params = {
43
+ def get_completion_params(self) -> Dict[str, Any]:
44
+ params = {
35
45
  "model": self.config["MODEL"],
36
46
  "temperature": self.config["TEMPERATURE"],
37
47
  "top_p": self.config["TOP_P"],
38
48
  "max_tokens": self.config["MAX_TOKENS"],
39
49
  "timeout": self.config["TIMEOUT"],
40
50
  }
41
- # Add extra headers if set
42
- if self.config.get("EXTRA_HEADERS", None):
43
- self.completion_params["extra_headers"] = {
44
- **self.config["EXTRA_HEADERS"],
45
- "X-Title": self.APP_NAME,
46
- "HTTP-Referer": self.APPA_REFERER,
47
- }
48
-
49
- # Add extra body params if set
50
51
  if self.config.get("EXTRA_BODY", None):
51
- self.completion_params["extra_body"] = self.config["EXTRA_BODY"]
52
+ params["extra_body"] = self.config["EXTRA_BODY"]
53
+ return params
@@ -0,0 +1,16 @@
1
+ from typing import Any, Dict
2
+
3
+ from .openai_provider import OpenAIProvider
4
+
5
+
6
+ class GroqProvider(OpenAIProvider):
7
+ """Groq provider implementation based on openai-compatible API"""
8
+
9
+ DEFAULT_BASE_URL = "https://api.groq.com/openai/v1"
10
+
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ if self.config["EXTRA_BODY"] and "N" in self.config["EXTRA_BODY"] and self.config["EXTRA_BODY"]["N"] != 1:
14
+ self.console.print("Groq does not support N parameter, setting N to 1 as Groq default", style="yellow")
15
+ params["extra_body"]["N"] = 1
16
+ return params
@@ -1,3 +1,5 @@
1
+ from typing import Any, Dict
2
+
1
3
  from .openai_provider import OpenAIProvider
2
4
 
3
5
 
@@ -11,4 +13,8 @@ class InfiniAIProvider(OpenAIProvider):
11
13
  if self.enable_function:
12
14
  self.console.print("InfiniAI does not support functions, disabled", style="yellow")
13
15
  self.enable_function = False
14
- self.completion_params["max_tokens"] = self.completion_params.pop("max_completion_tokens")
16
+
17
+ def get_completion_params(self) -> Dict[str, Any]:
18
+ params = super().get_completion_params()
19
+ params["max_tokens"] = params.pop("max_completion_tokens")
20
+ return params
@@ -0,0 +1,13 @@
1
+ from typing import Any, Dict
2
+ from .openai_provider import OpenAIProvider
3
+
4
+
5
+ class MinimaxProvider(OpenAIProvider):
6
+ """Minimax provider implementation based on openai-compatible API"""
7
+
8
+ DEFAULT_BASE_URL = "https://api.minimaxi.com/v1"
9
+
10
+ def get_completion_params(self) -> Dict[str, Any]:
11
+ params = super().get_completion_params()
12
+ params["max_tokens"] = params.pop("max_completion_tokens")
13
+ return params
@@ -1,3 +1,5 @@
1
+ from typing import Any, Dict
2
+
1
3
  from .openai_provider import OpenAIProvider
2
4
 
3
5
 
@@ -6,6 +8,7 @@ class ModelScopeProvider(OpenAIProvider):
6
8
 
7
9
  DEFAULT_BASE_URL = "https://api-inference.modelscope.cn/v1/"
8
10
 
9
- def __init__(self, config: dict = ..., **kwargs):
10
- super().__init__(config, **kwargs)
11
- self.completion_params["max_tokens"] = self.completion_params.pop("max_completion_tokens")
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -1,3 +1,4 @@
1
+ import json
1
2
  from typing import Any, Dict, Generator, List, Optional
2
3
 
3
4
  import openai
@@ -16,41 +17,51 @@ class OpenAIProvider(Provider):
16
17
  """OpenAI provider implementation based on openai library"""
17
18
 
18
19
  DEFAULT_BASE_URL = "https://api.openai.com/v1"
20
+ CLIENT_CLS = openai.OpenAI
19
21
 
20
22
  def __init__(self, config: dict = cfg, verbose: bool = False, **kwargs):
21
23
  self.config = config
22
24
  self.enable_function = self.config["ENABLE_FUNCTIONS"]
23
25
  self.verbose = verbose
26
+
27
+ # Initialize client
28
+ self.client_params = self.get_client_params()
29
+ self.client = self.CLIENT_CLS(**self.client_params)
30
+ self.console = get_console()
31
+
32
+ # Store completion params
33
+ self.completion_params = self.get_completion_params()
34
+
35
+ def get_client_params(self) -> Dict[str, Any]:
36
+ """Get the client parameters"""
24
37
  # Initialize client params
25
- self.client_params = {
38
+ client_params = {
26
39
  "api_key": self.config["API_KEY"],
27
40
  "base_url": self.config["BASE_URL"] or self.DEFAULT_BASE_URL,
28
41
  }
29
42
 
30
43
  # Add extra headers if set
31
44
  if self.config["EXTRA_HEADERS"]:
32
- self.client_params["default_headers"] = {
45
+ client_params["default_headers"] = {
33
46
  **self.config["EXTRA_HEADERS"],
34
47
  "X-Title": self.APP_NAME,
35
48
  "HTTP-Referer": self.APPA_REFERER,
36
49
  }
50
+ return client_params
37
51
 
38
- # Initialize client
39
- self.client = openai.OpenAI(**self.client_params)
40
- self.console = get_console()
41
-
42
- # Store completion params
43
- self.completion_params = {
52
+ def get_completion_params(self) -> Dict[str, Any]:
53
+ """Get the completion parameters"""
54
+ completion_params = {
44
55
  "model": self.config["MODEL"],
45
56
  "temperature": self.config["TEMPERATURE"],
46
57
  "top_p": self.config["TOP_P"],
47
58
  "max_completion_tokens": self.config["MAX_TOKENS"],
48
59
  "timeout": self.config["TIMEOUT"],
49
60
  }
50
-
51
61
  # Add extra body params if set
52
62
  if self.config["EXTRA_BODY"]:
53
- self.completion_params["extra_body"] = self.config["EXTRA_BODY"]
63
+ completion_params["extra_body"] = self.config["EXTRA_BODY"]
64
+ return completion_params
54
65
 
55
66
  def _convert_messages(self, messages: List[ChatMessage]) -> List[Dict[str, Any]]:
56
67
  """Convert a list of ChatMessage objects to a list of OpenAI message dicts."""
@@ -103,6 +114,11 @@ class OpenAIProvider(Provider):
103
114
 
104
115
  def _handle_normal_response(self, response: ChatCompletion) -> Generator[LLMResponse, None, None]:
105
116
  """Handle normal (non-streaming) response"""
117
+ if not response.choices:
118
+ yield LLMResponse(
119
+ content=json.dumps(getattr(response, "base_resp", None) or response.to_dict()), finish_reason="stop"
120
+ )
121
+ return
106
122
  choice = response.choices[0]
107
123
  content = choice.message.content or "" # type: ignore
108
124
  reasoning = choice.message.reasoning_content # type: ignore
@@ -124,12 +140,20 @@ class OpenAIProvider(Provider):
124
140
  """Handle streaming response from OpenAI API"""
125
141
  # Initialize tool call object to accumulate tool call data across chunks
126
142
  tool_call: Optional[ToolCall] = None
127
-
143
+ started = False
128
144
  # Process each chunk in the response stream
129
145
  for chunk in response:
130
- if not chunk.choices:
146
+ if not chunk.choices and not started:
147
+ # Some api could return error message in the first chunk, no choices to handle, return raw response to show the message
148
+ yield LLMResponse(
149
+ content=json.dumps(getattr(chunk, "base_resp", None) or chunk.to_dict()), finish_reason="stop"
150
+ )
151
+ started = True
131
152
  continue
132
153
 
154
+ if not chunk.choices:
155
+ continue
156
+ started = True
133
157
  choice = chunk.choices[0]
134
158
  delta = choice.delta
135
159
  finish_reason = choice.finish_reason
@@ -0,0 +1,14 @@
1
+ from typing import Any, Dict
2
+
3
+ from .openai_provider import OpenAIProvider
4
+
5
+
6
+ class OpenRouterProvider(OpenAIProvider):
7
+ """OpenRouter provider implementation based on openai-compatible API"""
8
+
9
+ DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
10
+
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -1,3 +1,5 @@
1
+ from typing import Any, Dict
2
+
1
3
  from ...const import DEFAULT_TEMPERATURE
2
4
  from .openai_provider import OpenAIProvider
3
5
 
@@ -14,13 +16,13 @@ class SambanovaProvider(OpenAIProvider):
14
16
  "DeepSeek-V3-0324",
15
17
  )
16
18
 
17
- def __init__(self, config: dict = ..., verbose: bool = False, **kwargs):
18
- super().__init__(config, verbose, **kwargs)
19
- self.completion_params.pop("presence_penalty", None)
20
- self.completion_params.pop("frequency_penalty", None)
21
- if self.completion_params.get("temperature") < 0 or self.completion_params.get("temperature") > 1:
19
+ def get_completion_params(self) -> Dict[str, Any]:
20
+ params = super().get_completion_params()
21
+ params.pop("presence_penalty", None)
22
+ params.pop("frequency_penalty", None)
23
+ if params.get("temperature") < 0 or params.get("temperature") > 1:
22
24
  self.console.print("Sambanova temperature must be between 0 and 1, setting to 0.4", style="yellow")
23
- self.completion_params["temperature"] = DEFAULT_TEMPERATURE
25
+ params["temperature"] = DEFAULT_TEMPERATURE
24
26
  if self.enable_function and self.config["MODEL"] not in self.SUPPORT_FUNCTION_CALL_MOELS:
25
27
  self.console.print(
26
28
  f"Sambanova supports function call models: {', '.join(self.SUPPORT_FUNCTION_CALL_MOELS)}",
@@ -1,3 +1,5 @@
1
+ from typing import Any, Dict
2
+
1
3
  from .openai_provider import OpenAIProvider
2
4
 
3
5
 
@@ -6,6 +8,7 @@ class SiliconFlowProvider(OpenAIProvider):
6
8
 
7
9
  DEFAULT_BASE_URL = "https://api.siliconflow.cn/v1"
8
10
 
9
- def __init__(self, config: dict = ..., **kwargs):
10
- super().__init__(config, **kwargs)
11
- self.completion_params["max_tokens"] = self.completion_params.pop("max_completion_tokens")
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -0,0 +1,14 @@
1
+ from typing import Any, Dict
2
+
3
+ from .openai_provider import OpenAIProvider
4
+
5
+
6
+ class TargonProvider(OpenAIProvider):
7
+ """Targon provider implementation based on openai-compatible API"""
8
+
9
+ DEFAULT_BASE_URL = "https://api.targon.com/v1"
10
+
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -0,0 +1,14 @@
1
+ from typing import Any, Dict
2
+
3
+ from .openai_provider import OpenAIProvider
4
+
5
+
6
+ class YiProvider(OpenAIProvider):
7
+ """Lingyiwanwu provider implementation based on openai-compatible API"""
8
+
9
+ DEFAULT_BASE_URL = "https://api.lingyiwanwu.com/v1"
10
+
11
+ def get_completion_params(self) -> Dict[str, Any]:
12
+ params = super().get_completion_params()
13
+ params["max_tokens"] = params.pop("max_completion_tokens")
14
+ return params
@@ -1,7 +0,0 @@
1
- from .openai_provider import OpenAIProvider
2
-
3
-
4
- class ChutesProvider(OpenAIProvider):
5
- """Chutes provider implementation based on openai-compatible API"""
6
-
7
- DEFAULT_BASE_URL = "https://llm.chutes.ai/v1"
@@ -1,11 +0,0 @@
1
- from .openai_provider import OpenAIProvider
2
-
3
-
4
- class DeepSeekProvider(OpenAIProvider):
5
- """DeepSeek provider implementation based on openai-compatible API"""
6
-
7
- DEFAULT_BASE_URL = "https://api.deepseek.com/v1"
8
-
9
- def __init__(self, config: dict = ..., **kwargs):
10
- super().__init__(config, **kwargs)
11
- self.completion_params["max_tokens"] = self.completion_params.pop("max_completion_tokens")
@@ -1,14 +0,0 @@
1
- from .openai_provider import OpenAIProvider
2
-
3
-
4
- class GroqProvider(OpenAIProvider):
5
- """Groq provider implementation based on openai-compatible API"""
6
-
7
- DEFAULT_BASE_URL = "https://api.groq.com/openai/v1"
8
-
9
- def __init__(self, config: dict = ..., **kwargs):
10
- super().__init__(config, **kwargs)
11
- if self.config.get("EXTRA_BODY") and "N" in self.config["EXTRA_BODY"] and self.config["EXTRA_BODY"]["N"] != 1:
12
- self.console.print("Groq does not support N parameter, setting N to 1 as Groq default", style="yellow")
13
- if "extra_body" in self.completion_params:
14
- self.completion_params["extra_body"]["N"] = 1
@@ -1,11 +0,0 @@
1
- from .openai_provider import OpenAIProvider
2
-
3
-
4
- class OpenRouterProvider(OpenAIProvider):
5
- """OpenRouter provider implementation based on openai-compatible API"""
6
-
7
- DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
8
-
9
- def __init__(self, config: dict = ..., **kwargs):
10
- super().__init__(config, **kwargs)
11
- self.completion_params["max_tokens"] = self.completion_params.pop("max_completion_tokens")
@@ -1,7 +0,0 @@
1
- from .openai_provider import OpenAIProvider
2
-
3
-
4
- class YiProvider(OpenAIProvider):
5
- """Yi provider implementation based on openai-compatible API"""
6
-
7
- DEFAULT_BASE_URL = "https://api.lingyiwanwu.com/v1"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes