janito 2.15.0__py3-none-any.whl → 2.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. janito/agent/setup_agent.py +1 -1
  2. janito/cli/chat_mode/session.py +42 -1
  3. janito/cli/cli_commands/list_drivers.py +118 -93
  4. janito/cli/cli_commands/list_providers_region.py +85 -0
  5. janito/cli/cli_commands/set_api_key.py +16 -5
  6. janito/cli/core/getters.py +7 -3
  7. janito/cli/main_cli.py +38 -3
  8. janito/cli/prompt_setup.py +3 -0
  9. janito/cli/single_shot_mode/handler.py +43 -1
  10. janito/drivers/azure_openai/driver.py +14 -5
  11. janito/drivers/cerebras/__init__.py +1 -0
  12. janito/drivers/openai/driver.py +16 -5
  13. janito/drivers/zai/driver.py +19 -22
  14. janito/formatting_token.py +9 -5
  15. janito/llm/auth_utils.py +21 -0
  16. janito/providers/__init__.py +1 -0
  17. janito/providers/alibaba/provider.py +11 -9
  18. janito/providers/anthropic/provider.py +4 -5
  19. janito/providers/azure_openai/provider.py +4 -5
  20. janito/providers/cerebras/__init__.py +1 -0
  21. janito/providers/cerebras/model_info.py +76 -0
  22. janito/providers/cerebras/provider.py +145 -0
  23. janito/providers/deepseek/provider.py +4 -5
  24. janito/providers/google/provider.py +4 -5
  25. janito/providers/moonshotai/provider.py +46 -37
  26. janito/providers/openai/provider.py +45 -39
  27. janito/providers/zai/provider.py +3 -9
  28. janito/regions/__init__.py +16 -0
  29. janito/regions/cli.py +124 -0
  30. janito/regions/geo_utils.py +240 -0
  31. janito/regions/provider_regions.py +158 -0
  32. {janito-2.15.0.dist-info → janito-2.17.0.dist-info}/METADATA +1 -1
  33. {janito-2.15.0.dist-info → janito-2.17.0.dist-info}/RECORD +37 -27
  34. {janito-2.15.0.dist-info → janito-2.17.0.dist-info}/WHEEL +0 -0
  35. {janito-2.15.0.dist-info → janito-2.17.0.dist-info}/entry_points.txt +0 -0
  36. {janito-2.15.0.dist-info → janito-2.17.0.dist-info}/licenses/LICENSE +0 -0
  37. {janito-2.15.0.dist-info → janito-2.17.0.dist-info}/top_level.txt +0 -0
@@ -16,6 +16,16 @@ import openai
16
16
 
17
17
 
18
18
  class OpenAIModelDriver(LLMDriver):
19
+ # Check if required dependencies are available
20
+ try:
21
+ import openai
22
+
23
+ available = True
24
+ unavailable_reason = None
25
+ except ImportError as e:
26
+ available = False
27
+ unavailable_reason = f"Missing dependency: {str(e)}"
28
+
19
29
  def _get_message_from_result(self, result):
20
30
  """Extract the message object from the provider result (OpenAI-specific)."""
21
31
  if hasattr(result, "choices") and result.choices:
@@ -248,11 +258,12 @@ class OpenAIModelDriver(LLMDriver):
248
258
  def _instantiate_openai_client(self, config):
249
259
  try:
250
260
  if not config.api_key:
251
- provider_name = getattr(self, 'provider_name', 'OpenAI-compatible')
252
- print(f"[ERROR] No API key found for provider '{provider_name}'. Please set the API key using:")
253
- print(f" janito --set-api-key YOUR_API_KEY -p {provider_name.lower()}")
254
- print(f"Or set the {provider_name.upper()}_API_KEY environment variable.")
255
- raise ValueError(f"API key is required for provider '{provider_name}'")
261
+ provider_name = getattr(self, "provider_name", "OpenAI-compatible")
262
+ from janito.llm.auth_utils import handle_missing_api_key
263
+
264
+ handle_missing_api_key(
265
+ provider_name, f"{provider_name.upper()}_API_KEY"
266
+ )
256
267
 
257
268
  api_key_display = str(config.api_key)
258
269
  if api_key_display and len(api_key_display) > 8:
@@ -12,18 +12,20 @@ from janito.llm.driver_input import DriverInput
12
12
  from janito.driver_events import RequestFinished, RequestStatus, RateLimitRetry
13
13
  from janito.llm.message_parts import TextMessagePart, FunctionCallMessagePart
14
14
 
15
- try:
16
- import openai
17
- available = True
18
- unavailable_reason = None
19
- except ImportError:
20
- available = False
21
- unavailable_reason = "openai module not installed"
15
+ import openai
22
16
 
23
17
 
24
18
  class ZAIModelDriver(LLMDriver):
25
- available = available
26
- unavailable_reason = unavailable_reason
19
+ # Check if required dependencies are available
20
+ try:
21
+ from zai import ZaiClient
22
+
23
+ available = True
24
+ unavailable_reason = None
25
+ except ImportError as e:
26
+ available = False
27
+ unavailable_reason = f"Missing dependency: {str(e)}"
28
+
27
29
  def _get_message_from_result(self, result):
28
30
  """Extract the message object from the provider result (Z.AI-specific)."""
29
31
  if hasattr(result, "choices") and result.choices:
@@ -256,14 +258,11 @@ class ZAIModelDriver(LLMDriver):
256
258
  try:
257
259
  if not config.api_key:
258
260
  provider_name = getattr(self, "provider_name", "ZAI")
259
- print(
260
- f"[ERROR] No API key found for provider '{provider_name}'. Please set the API key using:"
261
- )
262
- print(f" janito --set-api-key YOUR_API_KEY -p {provider_name.lower()}")
263
- print(
264
- f"Or set the {provider_name.upper()}_API_KEY environment variable."
261
+ from janito.llm.auth_utils import handle_missing_api_key
262
+
263
+ handle_missing_api_key(
264
+ provider_name, f"{provider_name.upper()}_API_KEY"
265
265
  )
266
- raise ValueError(f"API key is required for provider '{provider_name}'")
267
266
 
268
267
  api_key_display = str(config.api_key)
269
268
  if api_key_display and len(api_key_display) > 8:
@@ -284,12 +283,10 @@ class ZAIModelDriver(LLMDriver):
284
283
  flush=True,
285
284
  )
286
285
 
287
- # Use OpenAI SDK for Z.AI API compatibility
288
- try:
289
- import openai
290
- except ImportError:
291
- raise ImportError("openai module is not available. Please install it with: pip install openai")
292
- client = openai.OpenAI(
286
+ # Use the official Z.ai SDK
287
+ from zai import ZaiClient
288
+
289
+ client = ZaiClient(
293
290
  api_key=config.api_key, base_url="https://api.z.ai/api/paas/v4/"
294
291
  )
295
292
  return client
@@ -29,7 +29,7 @@ def format_token_message_summary(
29
29
  msg_count, usage, width=96, use_rich=False, elapsed=None
30
30
  ):
31
31
  """
32
- Returns a string (rich or pt markup) summarizing message count, last token usage, and elapsed time.
32
+ Returns a string (rich or pt markup) summarizing message count, last token usage, elapsed time, and tokens per second.
33
33
  """
34
34
  left = f" Messages: {'[' if use_rich else '<'}msg_count{']' if use_rich else '>'}{msg_count}{'[/msg_count]' if use_rich else '</msg_count>'}"
35
35
  tokens_part = ""
@@ -42,10 +42,14 @@ def format_token_message_summary(
42
42
  f"Completion: {format_tokens(completion_tokens, 'tokens_out', use_rich)}, "
43
43
  f"Total: {format_tokens(total_tokens, 'tokens_total', use_rich)}"
44
44
  )
45
- elapsed_part = (
46
- f" | Elapsed: [cyan]{elapsed:.2f}s[/cyan]" if elapsed is not None else ""
47
- )
48
- return f"{left}{tokens_part}{elapsed_part}"
45
+ elapsed_part = ""
46
+ tps_part = ""
47
+ if elapsed is not None and elapsed > 0:
48
+ elapsed_part = f" | Elapsed: [cyan]{elapsed:.2f}s[/cyan]"
49
+ if usage and total_tokens:
50
+ tokens_per_second = total_tokens / elapsed
51
+ tps_part = f" | TPS: {int(tokens_per_second)}"
52
+ return f"{left}{tokens_part}{elapsed_part}{tps_part}"
49
53
 
50
54
 
51
55
  def print_token_message_summary(
@@ -0,0 +1,21 @@
1
+ """
2
+ Authentication utilities for LLM providers.
3
+ """
4
+
5
+ import sys
6
+
7
+
8
+ def handle_missing_api_key(provider_name: str, env_var_name: str) -> None:
9
+ """
10
+ Handle missing API key by printing error message and exiting.
11
+
12
+ Args:
13
+ provider_name: Name of the provider (e.g., 'alibaba', 'openai')
14
+ env_var_name: Environment variable name (e.g., 'ALIBABA_API_KEY')
15
+ """
16
+ print(
17
+ f"[ERROR] No API key found for provider '{provider_name}'. Please set the API key using:"
18
+ )
19
+ print(f" janito --set-api-key YOUR_API_KEY -p {provider_name}")
20
+ print(f"Or set the {env_var_name} environment variable.")
21
+ sys.exit(1)
@@ -7,3 +7,4 @@ import janito.providers.deepseek.provider
7
7
  import janito.providers.moonshotai.provider
8
8
  import janito.providers.alibaba.provider
9
9
  import janito.providers.zai.provider
10
+ import janito.providers.cerebras.provider
@@ -17,7 +17,7 @@ class AlibabaProvider(LLMProvider):
17
17
  NAME = "alibaba"
18
18
  MAINTAINER = "João Pinto <janito@ikignosis.org>"
19
19
  MODEL_SPECS = MODEL_SPECS
20
- DEFAULT_MODEL = "qwen3-coder-plus" # Options: qwen-turbo, qwen-plus, qwen-max, qwen3-coder-plus
20
+ DEFAULT_MODEL = "qwen3-coder-plus" # 128k context, coding-focused model
21
21
 
22
22
  def __init__(
23
23
  self, auth_manager: LLMAuthManager = None, config: LLMDriverConfig = None
@@ -25,27 +25,29 @@ class AlibabaProvider(LLMProvider):
25
25
  # Always set a tools adapter so that even if the driver is unavailable,
26
26
  # generic code paths that expect provider.execute_tool() continue to work.
27
27
  self._tools_adapter = get_local_tools_adapter()
28
-
28
+
29
29
  # Always initialize _driver_config to avoid AttributeError
30
30
  self._driver_config = config or LLMDriverConfig(model=None)
31
-
31
+
32
32
  if not self.available:
33
33
  self._driver = None
34
34
  else:
35
35
  self.auth_manager = auth_manager or LLMAuthManager()
36
36
  self._api_key = self.auth_manager.get_credentials(type(self).NAME)
37
37
  if not self._api_key:
38
- print(f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:")
39
- print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
40
- print(f"Or set the ALIBABA_API_KEY environment variable.")
41
-
38
+ from janito.llm.auth_utils import handle_missing_api_key
39
+
40
+ handle_missing_api_key(self.name, "ALIBABA_API_KEY")
41
+
42
42
  if not self._driver_config.model:
43
43
  self._driver_config.model = self.DEFAULT_MODEL
44
44
  if not self._driver_config.api_key:
45
45
  self._driver_config.api_key = self._api_key
46
46
  # Set Alibaba international endpoint as default base_url if not provided
47
47
  if not getattr(self._driver_config, "base_url", None):
48
- self._driver_config.base_url = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
48
+ self._driver_config.base_url = (
49
+ "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
50
+ )
49
51
  self.fill_missing_device_info(self._driver_config)
50
52
  self._driver = None # to be provided by factory/agent
51
53
 
@@ -99,4 +101,4 @@ class AlibabaProvider(LLMProvider):
99
101
  return self._tools_adapter.execute_by_name(tool_name, *args, **kwargs)
100
102
 
101
103
 
102
- LLMProviderRegistry.register(AlibabaProvider.NAME, AlibabaProvider)
104
+ LLMProviderRegistry.register(AlibabaProvider.NAME, AlibabaProvider)
@@ -22,11 +22,10 @@ class AnthropicProvider(LLMProvider):
22
22
  self.auth_manager = auth_manager or LLMAuthManager()
23
23
  self._api_key = self.auth_manager.get_credentials(type(self).NAME)
24
24
  if not self._api_key:
25
- print(f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:")
26
- print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
27
- print(f"Or set the ANTHROPIC_API_KEY environment variable.")
28
- return
29
-
25
+ from janito.llm.auth_utils import handle_missing_api_key
26
+
27
+ handle_missing_api_key(self.name, "ANTHROPIC_API_KEY")
28
+
30
29
  self._tools_adapter = get_local_tools_adapter()
31
30
  self._driver_config = config or LLMDriverConfig(model=None)
32
31
  if not getattr(self._driver_config, "model", None):
@@ -33,11 +33,10 @@ class AzureOpenAIProvider(LLMProvider):
33
33
  self._auth_manager = auth_manager or LLMAuthManager()
34
34
  self._api_key = self._auth_manager.get_credentials(type(self).NAME)
35
35
  if not self._api_key:
36
- print(f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:")
37
- print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
38
- print(f"Or set the AZURE_OPENAI_API_KEY environment variable.")
39
- return
40
-
36
+ from janito.llm.auth_utils import handle_missing_api_key
37
+
38
+ handle_missing_api_key(self.name, "AZURE_OPENAI_API_KEY")
39
+
41
40
  self._tools_adapter = get_local_tools_adapter()
42
41
  self._driver_config = config or LLMDriverConfig(model=None)
43
42
  if not self._driver_config.model:
@@ -0,0 +1 @@
1
+ # Cerebras provider package
@@ -0,0 +1,76 @@
1
+ """Model specifications for Cerebras Inference API."""
2
+
3
+ from janito.llm.model import LLMModelInfo
4
+
5
+ MODEL_SPECS = {
6
+ "qwen-3-32b": LLMModelInfo(
7
+ name="qwen-3-32b",
8
+ max_input=128000,
9
+ max_response=16384,
10
+ default_temp=0.7,
11
+ driver="CerebrasModelDriver",
12
+ other={
13
+ "description": "Qwen 3 32B model for general instruction following",
14
+ "pricing": {
15
+ "input_per_1k_tokens": 0.0002,
16
+ "output_per_1k_tokens": 0.0006
17
+ }
18
+ }
19
+ ),
20
+ "qwen-3-235b-a22b-instruct-2507": LLMModelInfo(
21
+ name="qwen-3-235b-a22b-instruct-2507",
22
+ max_input=128000,
23
+ max_response=16384,
24
+ default_temp=0.7,
25
+ driver="CerebrasModelDriver",
26
+ other={
27
+ "description": "Qwen 3 235B A22B instruction-tuned model (preview)",
28
+ "pricing": {
29
+ "input_per_1k_tokens": 0.001,
30
+ "output_per_1k_tokens": 0.003
31
+ }
32
+ }
33
+ ),
34
+ "qwen-3-235b-a22b-thinking-2507": LLMModelInfo(
35
+ name="qwen-3-235b-a22b-thinking-2507",
36
+ max_input=128000,
37
+ max_response=16384,
38
+ default_temp=0.7,
39
+ driver="CerebrasModelDriver",
40
+ other={
41
+ "description": "Qwen 3 235B A22B thinking model for reasoning tasks (preview)",
42
+ "pricing": {
43
+ "input_per_1k_tokens": 0.001,
44
+ "output_per_1k_tokens": 0.003
45
+ }
46
+ }
47
+ ),
48
+ "qwen-3-coder-480b": LLMModelInfo(
49
+ name="qwen-3-coder-480b",
50
+ max_input=128000,
51
+ max_response=16384,
52
+ default_temp=0.7,
53
+ driver="CerebrasModelDriver",
54
+ other={
55
+ "description": "Qwen 3 Coder 480B model for programming tasks (preview)",
56
+ "pricing": {
57
+ "input_per_1k_tokens": 0.002,
58
+ "output_per_1k_tokens": 0.006
59
+ }
60
+ }
61
+ ),
62
+ "gpt-oss-120b": LLMModelInfo(
63
+ name="gpt-oss-120b",
64
+ max_input=128000,
65
+ max_response=16384,
66
+ default_temp=0.7,
67
+ driver="CerebrasModelDriver",
68
+ other={
69
+ "description": "GPT-OSS 120B open-source model (preview)",
70
+ "pricing": {
71
+ "input_per_1k_tokens": 0.0008,
72
+ "output_per_1k_tokens": 0.0024
73
+ }
74
+ }
75
+ )
76
+ }
@@ -0,0 +1,145 @@
1
+ """Cerebras Inference provider implementation."""
2
+
3
+ from typing import Dict, Any
4
+ from janito.llm.provider import LLMProvider
5
+ from janito.llm.auth import LLMAuthManager
6
+ from janito.llm.driver_config import LLMDriverConfig
7
+ from janito.drivers.openai.driver import OpenAIModelDriver
8
+ from janito.tools import get_local_tools_adapter
9
+ from janito.providers.registry import LLMProviderRegistry
10
+ from .model_info import MODEL_SPECS
11
+
12
+
13
+ class CerebrasProvider(LLMProvider):
14
+ """Cerebras Inference API provider."""
15
+
16
+ name = "cerebras"
17
+ NAME = "cerebras"
18
+ DEFAULT_MODEL = "qwen-3-coder-480b"
19
+ MAINTAINER = "João Pinto <janito@ikignosis.org>"
20
+ MODEL_SPECS = MODEL_SPECS
21
+
22
+ def __init__(self, auth_manager: LLMAuthManager = None, config: LLMDriverConfig = None):
23
+ """Initialize Cerebras provider with optional configuration."""
24
+ super().__init__()
25
+ self._tools_adapter = get_local_tools_adapter()
26
+ self._driver = None
27
+ self._tools_adapter = get_local_tools_adapter()
28
+ self._driver = None
29
+ if not self.available:
30
+ return
31
+
32
+ self._initialize_config(auth_manager, config)
33
+ self._setup_model_config()
34
+ self.fill_missing_device_info(self._driver_config)
35
+
36
+ if not self.available:
37
+ return
38
+
39
+ self._initialize_config(None, None)
40
+ self._driver_config.base_url = "https://api.cerebras.ai/v1"
41
+
42
+ def _initialize_config(self, auth_manager, config):
43
+ """Initialize configuration and API key."""
44
+ self.auth_manager = auth_manager or LLMAuthManager()
45
+ self._api_key = self.auth_manager.get_credentials(type(self).NAME)
46
+ if not self._api_key:
47
+ from janito.llm.auth_utils import handle_missing_api_key
48
+
49
+ handle_missing_api_key(self.name, "CEREBRAS_API_KEY")
50
+
51
+ self._driver_config = config or LLMDriverConfig(model=None)
52
+ if not self._driver_config.model:
53
+ self._driver_config.model = self.DEFAULT_MODEL
54
+ if not self._driver_config.api_key:
55
+ self._driver_config.api_key = self._api_key
56
+
57
+ self._setup_model_config()
58
+ self.fill_missing_device_info(self._driver_config)
59
+
60
+ def _setup_model_config(self):
61
+ """Configure token limits based on model specifications."""
62
+ model_name = self._driver_config.model
63
+ model_spec = self.MODEL_SPECS.get(model_name)
64
+
65
+ # Reset token parameters
66
+ if hasattr(self._driver_config, "max_tokens"):
67
+ self._driver_config.max_tokens = None
68
+ if hasattr(self._driver_config, "max_completion_tokens"):
69
+ self._driver_config.max_completion_tokens = None
70
+
71
+ if model_spec:
72
+ # Set context length
73
+ if hasattr(model_spec, "context") and model_spec.context:
74
+ self._driver_config.context_length = model_spec.context
75
+
76
+ # Set max tokens based on model spec
77
+ if hasattr(model_spec, "max_response") and model_spec.max_response:
78
+ self._driver_config.max_tokens = model_spec.max_response
79
+
80
+ # Set max completion tokens if thinking is supported
81
+ if getattr(model_spec, "thinking_supported", False):
82
+ max_cot = getattr(model_spec, "max_cot", None)
83
+ if max_cot and max_cot != "N/A":
84
+ self._driver_config.max_completion_tokens = int(max_cot)
85
+ else:
86
+ max_response = getattr(model_spec, "max_response", None)
87
+ if max_response and max_response != "N/A":
88
+ self._driver_config.max_tokens = int(max_response)
89
+
90
+ @property
91
+ @property
92
+ def driver(self) -> OpenAIModelDriver:
93
+ if not self.available:
94
+ raise ImportError(
95
+ f"CerebrasProvider unavailable: {self.unavailable_reason}"
96
+ )
97
+ if self._driver is None:
98
+ self._driver = self.create_driver()
99
+ return self._driver
100
+
101
+ @property
102
+ def available(self):
103
+ return OpenAIModelDriver.available
104
+
105
+ @property
106
+ def unavailable_reason(self):
107
+ return OpenAIModelDriver.unavailable_reason
108
+
109
+ def create_driver(self) -> OpenAIModelDriver:
110
+ """Create and return an OpenAI-compatible Cerebras driver instance."""
111
+ driver = OpenAIModelDriver(
112
+ tools_adapter=self._tools_adapter, provider_name=self.name
113
+ )
114
+ driver.config = self._driver_config
115
+ return driver
116
+
117
+ @property
118
+ def driver_config(self):
119
+ """Return the driver configuration."""
120
+ return self._driver_config
121
+
122
+ def is_model_available(self, model_name: str) -> bool:
123
+ """Check if a model is available for this provider."""
124
+ return model_name in self.MODEL_SPECS
125
+
126
+ def get_model_info(self, model_name: str = None) -> Dict[str, Any]:
127
+ """Get model information for the specified model or all models."""
128
+ if model_name is None:
129
+ return {
130
+ name: model_info.to_dict()
131
+ for name, model_info in self.MODEL_SPECS.items()
132
+ }
133
+
134
+ if model_name in self.MODEL_SPECS:
135
+ return self.MODEL_SPECS[model_name].to_dict()
136
+
137
+ return None
138
+
139
+ def execute_tool(self, tool_name: str, event_bus, *args, **kwargs):
140
+ self._tools_adapter.event_bus = event_bus
141
+ return self._tools_adapter.execute_by_name(tool_name, *args, **kwargs)
142
+
143
+
144
+ # Register the provider
145
+ LLMProviderRegistry.register(CerebrasProvider.name, CerebrasProvider)
@@ -31,11 +31,10 @@ class DeepSeekProvider(LLMProvider):
31
31
  self.auth_manager = auth_manager or LLMAuthManager()
32
32
  self._api_key = self.auth_manager.get_credentials(type(self).NAME)
33
33
  if not self._api_key:
34
- print(f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:")
35
- print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
36
- print(f"Or set the DEEPSEEK_API_KEY environment variable.")
37
- return
38
-
34
+ from janito.llm.auth_utils import handle_missing_api_key
35
+
36
+ handle_missing_api_key(self.name, "DEEPSEEK_API_KEY")
37
+
39
38
  self._tools_adapter = get_local_tools_adapter()
40
39
  self._driver_config = config or LLMDriverConfig(model=None)
41
40
  if not self._driver_config.model:
@@ -33,11 +33,10 @@ class GoogleProvider(LLMProvider):
33
33
  self.auth_manager = auth_manager or LLMAuthManager()
34
34
  self._api_key = self.auth_manager.get_credentials(type(self).name)
35
35
  if not self._api_key:
36
- print(f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:")
37
- print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
38
- print(f"Or set the GOOGLE_API_KEY environment variable.")
39
- return
40
-
36
+ from janito.llm.auth_utils import handle_missing_api_key
37
+
38
+ handle_missing_api_key(self.name, "GOOGLE_API_KEY")
39
+
41
40
  self._tools_adapter = get_local_tools_adapter()
42
41
  self._driver_config = config or LLMDriverConfig(model=None)
43
42
  # Only set default if model is not set by CLI/config
@@ -17,44 +17,53 @@ class MoonshotAIProvider(LLMProvider):
17
17
  def __init__(
18
18
  self, auth_manager: LLMAuthManager = None, config: LLMDriverConfig = None
19
19
  ):
20
+ self._tools_adapter = get_local_tools_adapter()
21
+ self._driver = None
22
+
20
23
  if not self.available:
21
- self._tools_adapter = get_local_tools_adapter()
22
- self._driver = None
23
- else:
24
- self.auth_manager = auth_manager or LLMAuthManager()
25
- self._api_key = self.auth_manager.get_credentials(type(self).name)
26
- if not self._api_key:
27
- print(f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:")
28
- print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
29
- print(f"Or set the MOONSHOTAI_API_KEY environment variable.")
30
- return
31
-
32
- self._tools_adapter = get_local_tools_adapter()
33
- self._driver_config = config or LLMDriverConfig(model=None)
34
- if not self._driver_config.model:
35
- self._driver_config.model = self.DEFAULT_MODEL
36
- if not self._driver_config.api_key:
37
- self._driver_config.api_key = self._api_key
38
- # Set only the correct token parameter for the model
39
- model_name = self._driver_config.model
40
- model_spec = self.MODEL_SPECS.get(model_name)
41
- if hasattr(self._driver_config, "max_tokens"):
42
- self._driver_config.max_tokens = None
43
- if hasattr(self._driver_config, "max_completion_tokens"):
44
- self._driver_config.max_completion_tokens = None
45
- if model_spec:
46
- if getattr(model_spec, "thinking_supported", False):
47
- max_cot = getattr(model_spec, "max_cot", None)
48
- if max_cot and max_cot != "N/A":
49
- self._driver_config.max_completion_tokens = int(max_cot)
50
- else:
51
- max_response = getattr(model_spec, "max_response", None)
52
- if max_response and max_response != "N/A":
53
- self._driver_config.max_tokens = int(max_response)
54
- self.fill_missing_device_info(self._driver_config)
55
- self._driver = None
56
- # Set MoonshotAI base_url
57
- self._driver_config.base_url = "https://api.moonshot.ai/v1"
24
+ return
25
+
26
+ self._initialize_config(auth_manager, config)
27
+ self._setup_model_config()
28
+ self._driver_config.base_url = "https://api.moonshot.ai/v1"
29
+
30
+ def _initialize_config(self, auth_manager, config):
31
+ """Initialize configuration and API key."""
32
+ self.auth_manager = auth_manager or LLMAuthManager()
33
+ self._api_key = self.auth_manager.get_credentials(type(self).name)
34
+ if not self._api_key:
35
+ from janito.llm.auth_utils import handle_missing_api_key
36
+
37
+ handle_missing_api_key(self.name, "MOONSHOTAI_API_KEY")
38
+
39
+ self._driver_config = config or LLMDriverConfig(model=None)
40
+ if not self._driver_config.model:
41
+ self._driver_config.model = self.DEFAULT_MODEL
42
+ if not self._driver_config.api_key:
43
+ self._driver_config.api_key = self._api_key
44
+
45
+ def _setup_model_config(self):
46
+ """Configure token limits based on model specifications."""
47
+ model_name = self._driver_config.model
48
+ model_spec = self.MODEL_SPECS.get(model_name)
49
+
50
+ # Reset token parameters
51
+ if hasattr(self._driver_config, "max_tokens"):
52
+ self._driver_config.max_tokens = None
53
+ if hasattr(self._driver_config, "max_completion_tokens"):
54
+ self._driver_config.max_completion_tokens = None
55
+
56
+ if model_spec:
57
+ if getattr(model_spec, "thinking_supported", False):
58
+ max_cot = getattr(model_spec, "max_cot", None)
59
+ if max_cot and max_cot != "N/A":
60
+ self._driver_config.max_completion_tokens = int(max_cot)
61
+ else:
62
+ max_response = getattr(model_spec, "max_response", None)
63
+ if max_response and max_response != "N/A":
64
+ self._driver_config.max_tokens = int(max_response)
65
+
66
+ self.fill_missing_device_info(self._driver_config)
58
67
 
59
68
  @property
60
69
  def driver(self) -> OpenAIModelDriver: