janito 2.10.0__py3-none-any.whl → 2.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janito/cli/chat_mode/session.py +9 -0
- janito/cli/core/model_guesser.py +51 -0
- janito/cli/core/runner.py +13 -1
- janito/cli/main_cli.py +7 -0
- janito/drivers/openai/driver.py +3 -2
- janito/drivers/zai/__init__.py +1 -0
- janito/drivers/zai/driver.py +465 -0
- janito/mkdocs.yml +1 -1
- janito/providers/__init__.py +1 -0
- janito/providers/alibaba/model_info.py +7 -0
- janito/providers/alibaba/provider.py +1 -1
- janito/providers/zai/__init__.py +1 -0
- janito/providers/zai/model_info.py +55 -0
- janito/providers/zai/provider.py +128 -0
- janito/providers/zai/schema_generator.py +133 -0
- {janito-2.10.0.dist-info → janito-2.12.0.dist-info}/METADATA +1 -1
- {janito-2.10.0.dist-info → janito-2.12.0.dist-info}/RECORD +21 -15
- janito/docs/PROVIDERS.md +0 -224
- {janito-2.10.0.dist-info → janito-2.12.0.dist-info}/WHEEL +0 -0
- {janito-2.10.0.dist-info → janito-2.12.0.dist-info}/entry_points.txt +0 -0
- {janito-2.10.0.dist-info → janito-2.12.0.dist-info}/licenses/LICENSE +0 -0
- {janito-2.10.0.dist-info → janito-2.12.0.dist-info}/top_level.txt +0 -0
janito/cli/chat_mode/session.py
CHANGED
@@ -109,6 +109,9 @@ class ChatSession:
|
|
109
109
|
)
|
110
110
|
self._support = False
|
111
111
|
|
112
|
+
# Check if multi-line mode should be enabled by default
|
113
|
+
self.multi_line_mode = getattr(args, "multi", False) if args else False
|
114
|
+
|
112
115
|
def _select_profile_and_role(self, args, role):
|
113
116
|
profile = getattr(args, "profile", None) if args is not None else None
|
114
117
|
role_arg = getattr(args, "role", None) if args is not None else None
|
@@ -213,6 +216,11 @@ class ChatSession:
|
|
213
216
|
f"[green]Working Dir:[/green] {cwd_display} | {priv_status}"
|
214
217
|
)
|
215
218
|
|
219
|
+
if self.multi_line_mode:
|
220
|
+
self.console.print(
|
221
|
+
"[blue]Multi-line input mode enabled (Esc+Enter or Ctrl+D to submit)[/blue]"
|
222
|
+
)
|
223
|
+
|
216
224
|
from janito.cli.chat_mode.shell.commands._priv_check import (
|
217
225
|
user_has_any_privileges,
|
218
226
|
)
|
@@ -305,6 +313,7 @@ class ChatSession:
|
|
305
313
|
bottom_toolbar=lambda: get_toolbar_func(
|
306
314
|
self.performance_collector, 0, self.shell_state
|
307
315
|
)(),
|
316
|
+
multiline=self.multi_line_mode,
|
308
317
|
)
|
309
318
|
|
310
319
|
def _handle_input(self, session):
|
@@ -0,0 +1,51 @@
|
|
1
|
+
"""
|
2
|
+
Module for guessing the provider based on model names.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from janito.providers.registry import LLMProviderRegistry
|
6
|
+
|
7
|
+
|
8
|
+
def guess_provider_from_model(model_name: str) -> str:
|
9
|
+
"""
|
10
|
+
Guess the provider based on the model name.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
model_name: The name of the model to guess the provider for
|
14
|
+
|
15
|
+
Returns:
|
16
|
+
The provider name if a match is found, None otherwise
|
17
|
+
"""
|
18
|
+
if not model_name:
|
19
|
+
return None
|
20
|
+
|
21
|
+
model_name = model_name.lower()
|
22
|
+
|
23
|
+
# Check each provider's models
|
24
|
+
for provider_name in LLMProviderRegistry.list_providers():
|
25
|
+
provider_class = LLMProviderRegistry.get(provider_name)
|
26
|
+
if not provider_class:
|
27
|
+
continue
|
28
|
+
|
29
|
+
# Get model specs for this provider
|
30
|
+
try:
|
31
|
+
if hasattr(provider_class, 'MODEL_SPECS'):
|
32
|
+
model_specs = provider_class.MODEL_SPECS
|
33
|
+
for spec_model_name in model_specs.keys():
|
34
|
+
if spec_model_name.lower() == model_name:
|
35
|
+
return provider_name
|
36
|
+
|
37
|
+
# Handle special cases like moonshotai
|
38
|
+
if provider_name == "moonshotai":
|
39
|
+
try:
|
40
|
+
from janito.providers.moonshotai.model_info import MOONSHOTAI_MODEL_SPECS
|
41
|
+
for spec_model_name in MOONSHOTAI_MODEL_SPECS.keys():
|
42
|
+
if spec_model_name.lower() == model_name:
|
43
|
+
return "moonshotai"
|
44
|
+
except ImportError:
|
45
|
+
pass
|
46
|
+
|
47
|
+
except Exception:
|
48
|
+
# Skip providers that have issues accessing model specs
|
49
|
+
continue
|
50
|
+
|
51
|
+
return None
|
janito/cli/core/runner.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
from janito.llm.driver_config import LLMDriverConfig
|
4
4
|
from janito.provider_config import get_config_provider
|
5
|
+
from janito.cli.core.model_guesser import guess_provider_from_model as _guess_provider_from_model
|
5
6
|
from janito.cli.verbose_output import print_verbose_info
|
6
7
|
|
7
8
|
|
@@ -14,6 +15,17 @@ def _choose_provider(args):
|
|
14
15
|
"Default provider", provider, style="magenta", align_content=True
|
15
16
|
)
|
16
17
|
elif provider is None:
|
18
|
+
# Try to guess provider based on model name if -m is provided
|
19
|
+
model = getattr(args, "model", None)
|
20
|
+
if model:
|
21
|
+
guessed_provider = _guess_provider_from_model(model)
|
22
|
+
if guessed_provider:
|
23
|
+
if getattr(args, "verbose", False):
|
24
|
+
print_verbose_info(
|
25
|
+
"Guessed provider", guessed_provider, style="magenta", align_content=True
|
26
|
+
)
|
27
|
+
return guessed_provider
|
28
|
+
|
17
29
|
print(
|
18
30
|
"Error: No provider selected and no provider found in config. Please set a provider using '-p PROVIDER', '--set provider=name', or configure a provider."
|
19
31
|
)
|
@@ -191,4 +203,4 @@ def handle_runner(
|
|
191
203
|
|
192
204
|
|
193
205
|
def get_prompt_mode(args):
|
194
|
-
return "single_shot" if getattr(args, "user_prompt", None) else "chat_mode"
|
206
|
+
return "single_shot" if getattr(args, "user_prompt", None) else "chat_mode"
|
janito/cli/main_cli.py
CHANGED
@@ -21,6 +21,13 @@ definition = [
|
|
21
21
|
"help": "Disable path security: allow tool arguments to use any file/directory path (DANGEROUS)",
|
22
22
|
},
|
23
23
|
),
|
24
|
+
(
|
25
|
+
["--multi"],
|
26
|
+
{
|
27
|
+
"action": "store_true",
|
28
|
+
"help": "Start chat mode with multi-line input as default (no need for /multi command)",
|
29
|
+
},
|
30
|
+
),
|
24
31
|
(
|
25
32
|
["--profile"],
|
26
33
|
{
|
janito/drivers/openai/driver.py
CHANGED
@@ -44,9 +44,10 @@ class OpenAIModelDriver(LLMDriver):
|
|
44
44
|
|
45
45
|
tool_classes = self.tools_adapter.get_tool_classes()
|
46
46
|
tool_schemas = generate_tool_schemas(tool_classes)
|
47
|
-
|
47
|
+
if tool_schemas: # Only add tools if we have actual schemas
|
48
|
+
api_kwargs["tools"] = tool_schemas
|
48
49
|
except Exception as e:
|
49
|
-
|
50
|
+
# Don't add empty tools array - some providers reject it
|
50
51
|
if hasattr(config, "verbose_api") and config.verbose_api:
|
51
52
|
print(f"[OpenAIModelDriver] Tool schema generation failed: {e}")
|
52
53
|
# OpenAI-specific parameters
|
@@ -0,0 +1 @@
|
|
1
|
+
# Z.AI driver package
|
@@ -0,0 +1,465 @@
|
|
1
|
+
import uuid
|
2
|
+
import traceback
|
3
|
+
import re
|
4
|
+
import json
|
5
|
+
import math
|
6
|
+
import time
|
7
|
+
import os
|
8
|
+
import logging
|
9
|
+
from rich import pretty
|
10
|
+
from janito.llm.driver import LLMDriver
|
11
|
+
from janito.llm.driver_input import DriverInput
|
12
|
+
from janito.driver_events import RequestFinished, RequestStatus, RateLimitRetry
|
13
|
+
from janito.llm.message_parts import TextMessagePart, FunctionCallMessagePart
|
14
|
+
|
15
|
+
import openai
|
16
|
+
|
17
|
+
|
18
|
+
class ZAIModelDriver(LLMDriver):
|
19
|
+
def _get_message_from_result(self, result):
|
20
|
+
"""Extract the message object from the provider result (Z.AI-specific)."""
|
21
|
+
if hasattr(result, "choices") and result.choices:
|
22
|
+
return result.choices[0].message
|
23
|
+
return None
|
24
|
+
|
25
|
+
"""
|
26
|
+
Z.AI LLM driver (threaded, queue-based, stateless). Uses input/output queues accessible via instance attributes.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(self, tools_adapter=None, provider_name=None):
|
30
|
+
super().__init__(tools_adapter=tools_adapter, provider_name=provider_name)
|
31
|
+
|
32
|
+
def _prepare_api_kwargs(self, config, conversation):
|
33
|
+
"""
|
34
|
+
Prepares API kwargs for Z.AI, including tool schemas if tools_adapter is present,
|
35
|
+
and Z.AI-specific arguments (model, max_tokens, temperature, etc.).
|
36
|
+
"""
|
37
|
+
api_kwargs = {}
|
38
|
+
# Tool schemas (moved from base)
|
39
|
+
if self.tools_adapter:
|
40
|
+
try:
|
41
|
+
from janito.providers.zai.schema_generator import (
|
42
|
+
generate_tool_schemas,
|
43
|
+
)
|
44
|
+
|
45
|
+
tool_classes = self.tools_adapter.get_tool_classes()
|
46
|
+
tool_schemas = generate_tool_schemas(tool_classes)
|
47
|
+
api_kwargs["tools"] = tool_schemas
|
48
|
+
except Exception as e:
|
49
|
+
api_kwargs["tools"] = []
|
50
|
+
if hasattr(config, "verbose_api") and config.verbose_api:
|
51
|
+
print(f"[ZAIModelDriver] Tool schema generation failed: {e}")
|
52
|
+
# Z.AI-specific parameters
|
53
|
+
if config.model:
|
54
|
+
api_kwargs["model"] = config.model
|
55
|
+
# Prefer max_completion_tokens if present, else fallback to max_tokens (for backward compatibility)
|
56
|
+
if (
|
57
|
+
hasattr(config, "max_completion_tokens")
|
58
|
+
and config.max_completion_tokens is not None
|
59
|
+
):
|
60
|
+
api_kwargs["max_completion_tokens"] = int(config.max_completion_tokens)
|
61
|
+
elif hasattr(config, "max_tokens") and config.max_tokens is not None:
|
62
|
+
# For models that do not support 'max_tokens', map to 'max_completion_tokens'
|
63
|
+
api_kwargs["max_completion_tokens"] = int(config.max_tokens)
|
64
|
+
for p in (
|
65
|
+
"temperature",
|
66
|
+
"top_p",
|
67
|
+
"presence_penalty",
|
68
|
+
"frequency_penalty",
|
69
|
+
"stop",
|
70
|
+
"reasoning_effort",
|
71
|
+
):
|
72
|
+
v = getattr(config, p, None)
|
73
|
+
if v is not None:
|
74
|
+
api_kwargs[p] = v
|
75
|
+
api_kwargs["messages"] = conversation
|
76
|
+
api_kwargs["stream"] = False
|
77
|
+
# Always return the prepared kwargs, even if no tools are registered. The
|
78
|
+
# OpenAI Python SDK expects a **mapping** – passing *None* will raise
|
79
|
+
# ``TypeError: argument after ** must be a mapping, not NoneType``.
|
80
|
+
return api_kwargs
|
81
|
+
|
82
|
+
def _call_api(self, driver_input: DriverInput):
|
83
|
+
"""Call the Z.AI-compatible chat completion endpoint with retry and error handling."""
|
84
|
+
cancel_event = getattr(driver_input, "cancel_event", None)
|
85
|
+
config = driver_input.config
|
86
|
+
conversation = self.convert_history_to_api_messages(
|
87
|
+
driver_input.conversation_history
|
88
|
+
)
|
89
|
+
request_id = getattr(config, "request_id", None)
|
90
|
+
self._print_api_call_start(config)
|
91
|
+
client = self._instantiate_zai_client(config)
|
92
|
+
api_kwargs = self._prepare_api_kwargs(config, conversation)
|
93
|
+
max_retries = getattr(config, "max_retries", 3)
|
94
|
+
attempt = 1
|
95
|
+
while True:
|
96
|
+
try:
|
97
|
+
self._print_api_attempt(config, attempt, max_retries, api_kwargs)
|
98
|
+
if self._check_cancel(cancel_event, request_id, before_call=True):
|
99
|
+
return None
|
100
|
+
result = client.chat.completions.create(**api_kwargs)
|
101
|
+
if self._check_cancel(cancel_event, request_id, before_call=False):
|
102
|
+
return None
|
103
|
+
self._handle_api_success(config, result, request_id)
|
104
|
+
return result
|
105
|
+
except Exception as e:
|
106
|
+
if self._handle_api_exception(
|
107
|
+
e, config, api_kwargs, attempt, max_retries, request_id
|
108
|
+
):
|
109
|
+
attempt += 1
|
110
|
+
continue
|
111
|
+
raise
|
112
|
+
|
113
|
+
def _print_api_call_start(self, config):
|
114
|
+
if getattr(config, "verbose_api", False):
|
115
|
+
tool_adapter_name = (
|
116
|
+
type(self.tools_adapter).__name__ if self.tools_adapter else None
|
117
|
+
)
|
118
|
+
tool_names = []
|
119
|
+
if self.tools_adapter and hasattr(self.tools_adapter, "list_tools"):
|
120
|
+
try:
|
121
|
+
tool_names = self.tools_adapter.list_tools()
|
122
|
+
except Exception:
|
123
|
+
tool_names = ["<error retrieving tools>"]
|
124
|
+
print(
|
125
|
+
f"[verbose-api] Z.AI API call about to be sent. Model: {config.model}, max_tokens: {config.max_tokens}, tools_adapter: {tool_adapter_name}, tool_names: {tool_names}",
|
126
|
+
flush=True,
|
127
|
+
)
|
128
|
+
|
129
|
+
def _print_api_attempt(self, config, attempt, max_retries, api_kwargs):
|
130
|
+
if getattr(config, "verbose_api", False):
|
131
|
+
print(
|
132
|
+
f"[Z.AI] API CALL (attempt {attempt}/{max_retries}): chat.completions.create(**{api_kwargs})",
|
133
|
+
flush=True,
|
134
|
+
)
|
135
|
+
|
136
|
+
def _handle_api_success(self, config, result, request_id):
|
137
|
+
self._print_verbose_result(config, result)
|
138
|
+
usage_dict = self._extract_usage(result)
|
139
|
+
if getattr(config, "verbose_api", False):
|
140
|
+
print(
|
141
|
+
f"[Z.AI][DEBUG] Attaching usage info to RequestFinished: {usage_dict}",
|
142
|
+
flush=True,
|
143
|
+
)
|
144
|
+
self.output_queue.put(
|
145
|
+
RequestFinished(
|
146
|
+
driver_name=self.__class__.__name__,
|
147
|
+
request_id=request_id,
|
148
|
+
response=result,
|
149
|
+
status=RequestStatus.SUCCESS,
|
150
|
+
usage=usage_dict,
|
151
|
+
)
|
152
|
+
)
|
153
|
+
if getattr(config, "verbose_api", False):
|
154
|
+
pretty.install()
|
155
|
+
print("[Z.AI] API RESPONSE:", flush=True)
|
156
|
+
pretty.pprint(result)
|
157
|
+
|
158
|
+
def _handle_api_exception(
|
159
|
+
self, e, config, api_kwargs, attempt, max_retries, request_id
|
160
|
+
):
|
161
|
+
status_code = getattr(e, "status_code", None)
|
162
|
+
err_str = str(e)
|
163
|
+
lower_err = err_str.lower()
|
164
|
+
is_insufficient_quota = (
|
165
|
+
"insufficient_quota" in lower_err
|
166
|
+
or "exceeded your current quota" in lower_err
|
167
|
+
)
|
168
|
+
is_rate_limit = (
|
169
|
+
status_code == 429
|
170
|
+
or "error code: 429" in lower_err
|
171
|
+
or "resource_exhausted" in lower_err
|
172
|
+
) and not is_insufficient_quota
|
173
|
+
if not is_rate_limit or attempt > max_retries:
|
174
|
+
self._handle_fatal_exception(e, config, api_kwargs)
|
175
|
+
retry_delay = self._extract_retry_delay_seconds(e)
|
176
|
+
if retry_delay is None:
|
177
|
+
retry_delay = min(2 ** (attempt - 1), 30)
|
178
|
+
self.output_queue.put(
|
179
|
+
RateLimitRetry(
|
180
|
+
driver_name=self.__class__.__name__,
|
181
|
+
request_id=request_id,
|
182
|
+
attempt=attempt,
|
183
|
+
retry_delay=retry_delay,
|
184
|
+
error=err_str,
|
185
|
+
details={},
|
186
|
+
)
|
187
|
+
)
|
188
|
+
if getattr(config, "verbose_api", False):
|
189
|
+
print(
|
190
|
+
f"[Z.AI][RateLimit] Attempt {attempt}/{max_retries} failed with rate-limit. Waiting {retry_delay}s before retry.",
|
191
|
+
flush=True,
|
192
|
+
)
|
193
|
+
start_wait = time.time()
|
194
|
+
while time.time() - start_wait < retry_delay:
|
195
|
+
if self._check_cancel(
|
196
|
+
getattr(config, "cancel_event", None), request_id, before_call=False
|
197
|
+
):
|
198
|
+
return False
|
199
|
+
time.sleep(0.1)
|
200
|
+
return True
|
201
|
+
|
202
|
+
def _extract_retry_delay_seconds(self, exception) -> float | None:
|
203
|
+
"""Extract the retry delay in seconds from the provider error response.
|
204
|
+
|
205
|
+
Handles both the Google Gemini style ``RetryInfo`` protobuf (where it's a
|
206
|
+
``retryDelay: '41s'`` string in JSON) and any number found after the word
|
207
|
+
``retryDelay``. Returns ``None`` if no delay could be parsed.
|
208
|
+
"""
|
209
|
+
try:
|
210
|
+
# Some SDKs expose the raw response JSON on e.args[0]
|
211
|
+
if hasattr(exception, "response") and hasattr(exception.response, "text"):
|
212
|
+
payload = exception.response.text
|
213
|
+
else:
|
214
|
+
payload = str(exception)
|
215
|
+
# Look for 'retryDelay': '41s' or similar
|
216
|
+
m = re.search(
|
217
|
+
r"retryDelay['\"]?\s*[:=]\s*['\"]?(\d+(?:\.\d+)?)(s)?", payload
|
218
|
+
)
|
219
|
+
if m:
|
220
|
+
return float(m.group(1))
|
221
|
+
# Fallback: generic number of seconds in the message
|
222
|
+
m2 = re.search(r"(\d+(?:\.\d+)?)\s*s(?:econds)?", payload)
|
223
|
+
if m2:
|
224
|
+
return float(m2.group(1))
|
225
|
+
except Exception:
|
226
|
+
pass
|
227
|
+
return None
|
228
|
+
|
229
|
+
def _handle_fatal_exception(self, e, config, api_kwargs):
|
230
|
+
"""Common path for unrecoverable exceptions.
|
231
|
+
|
232
|
+
Prints diagnostics (respecting ``verbose_api``) then re-raises the
|
233
|
+
exception so standard error handling in ``LLMDriver`` continues.
|
234
|
+
"""
|
235
|
+
is_verbose = getattr(config, "verbose_api", False)
|
236
|
+
if is_verbose:
|
237
|
+
print(f"[ERROR] Exception during Z.AI API call: {e}", flush=True)
|
238
|
+
print(f"[ERROR] config: {config}", flush=True)
|
239
|
+
print(
|
240
|
+
f"[ERROR] api_kwargs: {api_kwargs if 'api_kwargs' in locals() else 'N/A'}",
|
241
|
+
flush=True,
|
242
|
+
)
|
243
|
+
print("[ERROR] Full stack trace:", flush=True)
|
244
|
+
print(traceback.format_exc(), flush=True)
|
245
|
+
raise
|
246
|
+
|
247
|
+
def _instantiate_zai_client(self, config):
|
248
|
+
try:
|
249
|
+
if not config.api_key:
|
250
|
+
provider_name = getattr(self, "provider_name", "ZAI")
|
251
|
+
print(
|
252
|
+
f"[ERROR] No API key found for provider '{provider_name}'. Please set the API key using:"
|
253
|
+
)
|
254
|
+
print(f" janito --set-api-key YOUR_API_KEY -p {provider_name.lower()}")
|
255
|
+
print(
|
256
|
+
f"Or set the {provider_name.upper()}_API_KEY environment variable."
|
257
|
+
)
|
258
|
+
raise ValueError(f"API key is required for provider '{provider_name}'")
|
259
|
+
|
260
|
+
api_key_display = str(config.api_key)
|
261
|
+
if api_key_display and len(api_key_display) > 8:
|
262
|
+
api_key_display = api_key_display[:4] + "..." + api_key_display[-4:]
|
263
|
+
client_kwargs = {
|
264
|
+
"api_key": config.api_key,
|
265
|
+
"base_url": "https://api.z.ai/api/paas/v4",
|
266
|
+
}
|
267
|
+
|
268
|
+
# HTTP debug wrapper
|
269
|
+
if os.environ.get("ZAI_DEBUG_HTTP", "0") == "1":
|
270
|
+
from http.client import HTTPConnection
|
271
|
+
|
272
|
+
HTTPConnection.debuglevel = 1
|
273
|
+
logging.basicConfig()
|
274
|
+
logging.getLogger().setLevel(logging.DEBUG)
|
275
|
+
requests_log = logging.getLogger("http.client")
|
276
|
+
requests_log.setLevel(logging.DEBUG)
|
277
|
+
requests_log.propagate = True
|
278
|
+
print(
|
279
|
+
"[ZAIModelDriver] HTTP debug enabled via ZAI_DEBUG_HTTP=1",
|
280
|
+
flush=True,
|
281
|
+
)
|
282
|
+
|
283
|
+
client = openai.OpenAI(**client_kwargs)
|
284
|
+
return client
|
285
|
+
except Exception as e:
|
286
|
+
print(
|
287
|
+
f"[ERROR] Exception during Z.AI client instantiation: {e}", flush=True
|
288
|
+
)
|
289
|
+
print(traceback.format_exc(), flush=True)
|
290
|
+
raise
|
291
|
+
|
292
|
+
def _check_cancel(self, cancel_event, request_id, before_call=True):
|
293
|
+
if cancel_event is not None and cancel_event.is_set():
|
294
|
+
status = RequestStatus.CANCELLED
|
295
|
+
reason = (
|
296
|
+
"Cancelled before API call"
|
297
|
+
if before_call
|
298
|
+
else "Cancelled during API call"
|
299
|
+
)
|
300
|
+
self.output_queue.put(
|
301
|
+
RequestFinished(
|
302
|
+
driver_name=self.__class__.__name__,
|
303
|
+
request_id=request_id,
|
304
|
+
status=status,
|
305
|
+
reason=reason,
|
306
|
+
)
|
307
|
+
)
|
308
|
+
return True
|
309
|
+
return False
|
310
|
+
|
311
|
+
def _print_verbose_result(self, config, result):
|
312
|
+
if config.verbose_api:
|
313
|
+
print("[Z.AI] API RAW RESULT:", flush=True)
|
314
|
+
pretty.pprint(result)
|
315
|
+
if hasattr(result, "__dict__"):
|
316
|
+
print("[Z.AI] API RESULT __dict__:", flush=True)
|
317
|
+
pretty.pprint(result.__dict__)
|
318
|
+
try:
|
319
|
+
print("[Z.AI] API RESULT as dict:", dict(result), flush=True)
|
320
|
+
except Exception:
|
321
|
+
pass
|
322
|
+
print(
|
323
|
+
f"[Z.AI] API RESULT .usage: {getattr(result, 'usage', None)}",
|
324
|
+
flush=True,
|
325
|
+
)
|
326
|
+
try:
|
327
|
+
print(f"[Z.AI] API RESULT ['usage']: {result['usage']}", flush=True)
|
328
|
+
except Exception:
|
329
|
+
pass
|
330
|
+
if not hasattr(result, "usage") or getattr(result, "usage", None) is None:
|
331
|
+
print(
|
332
|
+
"[Z.AI][WARNING] No usage info found in API response.", flush=True
|
333
|
+
)
|
334
|
+
|
335
|
+
def _extract_usage(self, result):
|
336
|
+
usage = getattr(result, "usage", None)
|
337
|
+
if usage is not None:
|
338
|
+
usage_dict = self._usage_to_dict(usage)
|
339
|
+
if usage_dict is None:
|
340
|
+
print(
|
341
|
+
"[Z.AI][WARNING] Could not convert usage to dict, using string fallback.",
|
342
|
+
flush=True,
|
343
|
+
)
|
344
|
+
usage_dict = str(usage)
|
345
|
+
else:
|
346
|
+
usage_dict = self._extract_usage_from_result_dict(result)
|
347
|
+
return usage_dict
|
348
|
+
|
349
|
+
def _usage_to_dict(self, usage):
|
350
|
+
if hasattr(usage, "model_dump") and callable(getattr(usage, "model_dump")):
|
351
|
+
try:
|
352
|
+
return usage.model_dump()
|
353
|
+
except Exception:
|
354
|
+
pass
|
355
|
+
if hasattr(usage, "dict") and callable(getattr(usage, "dict")):
|
356
|
+
try:
|
357
|
+
return usage.dict()
|
358
|
+
except Exception:
|
359
|
+
pass
|
360
|
+
try:
|
361
|
+
return dict(usage)
|
362
|
+
except Exception:
|
363
|
+
try:
|
364
|
+
return vars(usage)
|
365
|
+
except Exception:
|
366
|
+
pass
|
367
|
+
return None
|
368
|
+
|
369
|
+
def _extract_usage_from_result_dict(self, result):
|
370
|
+
try:
|
371
|
+
return result["usage"]
|
372
|
+
except Exception:
|
373
|
+
return None
|
374
|
+
|
375
|
+
def convert_history_to_api_messages(self, conversation_history):
|
376
|
+
"""
|
377
|
+
Convert LLMConversationHistory to the list of dicts required by Z.AI's API.
|
378
|
+
Handles 'tool_results' and 'tool_calls' roles for compliance.
|
379
|
+
"""
|
380
|
+
api_messages = []
|
381
|
+
for msg in conversation_history.get_history():
|
382
|
+
self._append_api_message(api_messages, msg)
|
383
|
+
self._replace_none_content(api_messages)
|
384
|
+
return api_messages
|
385
|
+
|
386
|
+
def _append_api_message(self, api_messages, msg):
|
387
|
+
role = msg.get("role")
|
388
|
+
content = msg.get("content")
|
389
|
+
if role == "tool_results":
|
390
|
+
self._handle_tool_results(api_messages, content)
|
391
|
+
elif role == "tool_calls":
|
392
|
+
self._handle_tool_calls(api_messages, content)
|
393
|
+
else:
|
394
|
+
self._handle_other_roles(api_messages, msg, role, content)
|
395
|
+
|
396
|
+
def _handle_tool_results(self, api_messages, content):
|
397
|
+
try:
|
398
|
+
results = json.loads(content) if isinstance(content, str) else content
|
399
|
+
except Exception:
|
400
|
+
results = [content]
|
401
|
+
for result in results:
|
402
|
+
if isinstance(result, dict):
|
403
|
+
api_messages.append(
|
404
|
+
{
|
405
|
+
"role": "tool",
|
406
|
+
"content": result.get("content", ""),
|
407
|
+
"name": result.get("name", ""),
|
408
|
+
"tool_call_id": result.get("tool_call_id", ""),
|
409
|
+
}
|
410
|
+
)
|
411
|
+
else:
|
412
|
+
api_messages.append(
|
413
|
+
{
|
414
|
+
"role": "tool",
|
415
|
+
"content": str(result),
|
416
|
+
"name": "",
|
417
|
+
"tool_call_id": "",
|
418
|
+
}
|
419
|
+
)
|
420
|
+
|
421
|
+
def _handle_tool_calls(self, api_messages, content):
|
422
|
+
try:
|
423
|
+
tool_calls = json.loads(content) if isinstance(content, str) else content
|
424
|
+
except Exception:
|
425
|
+
tool_calls = []
|
426
|
+
api_messages.append(
|
427
|
+
{"role": "assistant", "content": "", "tool_calls": tool_calls}
|
428
|
+
)
|
429
|
+
|
430
|
+
def _handle_other_roles(self, api_messages, msg, role, content):
|
431
|
+
if role == "function":
|
432
|
+
name = ""
|
433
|
+
if isinstance(msg, dict):
|
434
|
+
metadata = msg.get("metadata", {})
|
435
|
+
name = metadata.get("name", "") if isinstance(metadata, dict) else ""
|
436
|
+
api_messages.append({"role": "tool", "content": content, "name": name})
|
437
|
+
else:
|
438
|
+
api_messages.append(msg)
|
439
|
+
|
440
|
+
def _replace_none_content(self, api_messages):
|
441
|
+
for m in api_messages:
|
442
|
+
if m.get("content", None) is None:
|
443
|
+
m["content"] = ""
|
444
|
+
|
445
|
+
def _convert_completion_message_to_parts(self, message):
|
446
|
+
"""
|
447
|
+
Convert a Z.AI completion message object to a list of MessagePart objects.
|
448
|
+
Handles text, tool calls, and can be extended for other types.
|
449
|
+
"""
|
450
|
+
parts = []
|
451
|
+
# Text content
|
452
|
+
content = getattr(message, "content", None)
|
453
|
+
if content:
|
454
|
+
parts.append(TextMessagePart(content=content))
|
455
|
+
# Tool calls
|
456
|
+
tool_calls = getattr(message, "tool_calls", None) or []
|
457
|
+
for tool_call in tool_calls:
|
458
|
+
parts.append(
|
459
|
+
FunctionCallMessagePart(
|
460
|
+
tool_call_id=getattr(tool_call, "id", ""),
|
461
|
+
function=getattr(tool_call, "function", None),
|
462
|
+
)
|
463
|
+
)
|
464
|
+
# Extend here for other message part types if needed
|
465
|
+
return parts
|
janito/mkdocs.yml
CHANGED
janito/providers/__init__.py
CHANGED
@@ -30,4 +30,11 @@ MODEL_SPECS = {
|
|
30
30
|
category="Alibaba Qwen3 Coder Plus Model (OpenAI-compatible)",
|
31
31
|
driver="OpenAIModelDriver",
|
32
32
|
),
|
33
|
+
"qwen3-coder-480b-a35b-instruct": LLMModelInfo(
|
34
|
+
name="qwen3-coder-480b-a35b-instruct",
|
35
|
+
context=262144,
|
36
|
+
max_response=65536,
|
37
|
+
category="Alibaba Qwen3 Coder 480B A35B Instruct Model (OpenAI-compatible)",
|
38
|
+
driver="OpenAIModelDriver",
|
39
|
+
),
|
33
40
|
}
|
@@ -17,7 +17,7 @@ class AlibabaProvider(LLMProvider):
|
|
17
17
|
NAME = "alibaba"
|
18
18
|
MAINTAINER = "João Pinto <janito@ikignosis.org>"
|
19
19
|
MODEL_SPECS = MODEL_SPECS
|
20
|
-
DEFAULT_MODEL = "
|
20
|
+
DEFAULT_MODEL = "qwen3-coder-plus" # Options: qwen-turbo, qwen-plus, qwen-max, qwen3-coder-plus
|
21
21
|
|
22
22
|
def __init__(
|
23
23
|
self, auth_manager: LLMAuthManager = None, config: LLMDriverConfig = None
|
@@ -0,0 +1 @@
|
|
1
|
+
# Z.AI provider package
|
@@ -0,0 +1,55 @@
|
|
1
|
+
from janito.llm.model import LLMModelInfo
|
2
|
+
|
3
|
+
MODEL_SPECS = {
|
4
|
+
"glm-4.5": LLMModelInfo(
|
5
|
+
name="glm-4.5",
|
6
|
+
context=128000,
|
7
|
+
max_input=128000,
|
8
|
+
max_cot=4096,
|
9
|
+
max_response=4096,
|
10
|
+
thinking_supported=True,
|
11
|
+
other={
|
12
|
+
"description": "Z.AI's GLM-4.5 model for advanced reasoning and conversation",
|
13
|
+
"supports_tools": True,
|
14
|
+
"supports_images": True,
|
15
|
+
"supports_audio": False,
|
16
|
+
"supports_video": False,
|
17
|
+
"input_cost_per_1k": 0.0005,
|
18
|
+
"output_cost_per_1k": 0.0015,
|
19
|
+
},
|
20
|
+
),
|
21
|
+
"glm-4": LLMModelInfo(
|
22
|
+
name="glm-4",
|
23
|
+
context=128000,
|
24
|
+
max_input=128000,
|
25
|
+
max_cot="N/A",
|
26
|
+
max_response=4096,
|
27
|
+
thinking_supported=False,
|
28
|
+
other={
|
29
|
+
"description": "Z.AI's GLM-4 model for general purpose tasks",
|
30
|
+
"supports_tools": True,
|
31
|
+
"supports_images": True,
|
32
|
+
"supports_audio": False,
|
33
|
+
"supports_video": False,
|
34
|
+
"input_cost_per_1k": 0.0003,
|
35
|
+
"output_cost_per_1k": 0.0009,
|
36
|
+
},
|
37
|
+
),
|
38
|
+
"glm-4v": LLMModelInfo(
|
39
|
+
name="glm-4v",
|
40
|
+
context=128000,
|
41
|
+
max_input=128000,
|
42
|
+
max_cot="N/A",
|
43
|
+
max_response=4096,
|
44
|
+
thinking_supported=False,
|
45
|
+
other={
|
46
|
+
"description": "Z.AI's GLM-4V vision model for image understanding",
|
47
|
+
"supports_tools": True,
|
48
|
+
"supports_images": True,
|
49
|
+
"supports_audio": False,
|
50
|
+
"supports_video": False,
|
51
|
+
"input_cost_per_1k": 0.0004,
|
52
|
+
"output_cost_per_1k": 0.0012,
|
53
|
+
},
|
54
|
+
),
|
55
|
+
}
|
@@ -0,0 +1,128 @@
|
|
1
|
+
from janito.llm.provider import LLMProvider
|
2
|
+
from janito.llm.model import LLMModelInfo
|
3
|
+
from janito.llm.auth import LLMAuthManager
|
4
|
+
from janito.llm.driver_config import LLMDriverConfig
|
5
|
+
from janito.drivers.zai.driver import ZAIModelDriver
|
6
|
+
from janito.tools import get_local_tools_adapter
|
7
|
+
from janito.providers.registry import LLMProviderRegistry
|
8
|
+
from .model_info import MODEL_SPECS
|
9
|
+
from queue import Queue
|
10
|
+
|
11
|
+
available = ZAIModelDriver.available
|
12
|
+
unavailable_reason = ZAIModelDriver.unavailable_reason
|
13
|
+
|
14
|
+
|
15
|
+
class ZAIProvider(LLMProvider):
|
16
|
+
name = "zai"
|
17
|
+
NAME = "zai"
|
18
|
+
MAINTAINER = "João Pinto <janito@ikignosis.org>"
|
19
|
+
MODEL_SPECS = MODEL_SPECS
|
20
|
+
DEFAULT_MODEL = "glm-4.5" # Options: glm-4.5, glm-4, glm-4v
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self, auth_manager: LLMAuthManager = None, config: LLMDriverConfig = None
|
24
|
+
):
|
25
|
+
if not self.available:
|
26
|
+
self._setup_unavailable()
|
27
|
+
else:
|
28
|
+
self._setup_available(auth_manager, config)
|
29
|
+
|
30
|
+
def _setup_unavailable(self):
|
31
|
+
# Even when the ZAI driver is unavailable we still need a tools adapter
|
32
|
+
# so that any generic logic that expects `execute_tool()` to work does not
|
33
|
+
# crash with an AttributeError when it tries to access `self._tools_adapter`.
|
34
|
+
self._tools_adapter = get_local_tools_adapter()
|
35
|
+
self._driver = None
|
36
|
+
|
37
|
+
def _setup_available(self, auth_manager, config):
|
38
|
+
self.auth_manager = auth_manager or LLMAuthManager()
|
39
|
+
self._api_key = self.auth_manager.get_credentials(type(self).NAME)
|
40
|
+
if not self._api_key:
|
41
|
+
print(
|
42
|
+
f"[ERROR] No API key found for provider '{self.name}'. Please set the API key using:"
|
43
|
+
)
|
44
|
+
print(f" janito --set-api-key YOUR_API_KEY -p {self.name}")
|
45
|
+
print(f"Or set the ZAI_API_KEY environment variable.")
|
46
|
+
return
|
47
|
+
|
48
|
+
self._tools_adapter = get_local_tools_adapter()
|
49
|
+
self._driver_config = config or LLMDriverConfig(model=None)
|
50
|
+
if not self._driver_config.model:
|
51
|
+
self._driver_config.model = self.DEFAULT_MODEL
|
52
|
+
if not self._driver_config.api_key:
|
53
|
+
self._driver_config.api_key = self._api_key
|
54
|
+
|
55
|
+
self._configure_model_tokens()
|
56
|
+
self.fill_missing_device_info(self._driver_config)
|
57
|
+
self._driver = None # to be provided by factory/agent
|
58
|
+
|
59
|
+
def _configure_model_tokens(self):
|
60
|
+
# Set only the correct token parameter for the model
|
61
|
+
model_name = self._driver_config.model
|
62
|
+
model_spec = self.MODEL_SPECS.get(model_name)
|
63
|
+
# Remove both to avoid stale values
|
64
|
+
if hasattr(self._driver_config, "max_tokens"):
|
65
|
+
self._driver_config.max_tokens = None
|
66
|
+
if hasattr(self._driver_config, "max_completion_tokens"):
|
67
|
+
self._driver_config.max_completion_tokens = None
|
68
|
+
if model_spec:
|
69
|
+
if getattr(model_spec, "thinking_supported", False):
|
70
|
+
max_cot = getattr(model_spec, "max_cot", None)
|
71
|
+
if max_cot and max_cot != "N/A":
|
72
|
+
self._driver_config.max_completion_tokens = int(max_cot)
|
73
|
+
else:
|
74
|
+
max_response = getattr(model_spec, "max_response", None)
|
75
|
+
if max_response and max_response != "N/A":
|
76
|
+
self._driver_config.max_tokens = int(max_response)
|
77
|
+
|
78
|
+
@property
|
79
|
+
def driver(self) -> ZAIModelDriver:
|
80
|
+
if not self.available:
|
81
|
+
raise ImportError(f"ZAIProvider unavailable: {self.unavailable_reason}")
|
82
|
+
return self._driver
|
83
|
+
|
84
|
+
@property
|
85
|
+
def available(self):
|
86
|
+
return available
|
87
|
+
|
88
|
+
@property
|
89
|
+
def unavailable_reason(self):
|
90
|
+
return unavailable_reason
|
91
|
+
|
92
|
+
def create_driver(self):
|
93
|
+
"""
|
94
|
+
Creates and returns a new ZAIModelDriver instance with input/output queues.
|
95
|
+
"""
|
96
|
+
driver = ZAIModelDriver(
|
97
|
+
tools_adapter=self._tools_adapter, provider_name=self.NAME
|
98
|
+
)
|
99
|
+
driver.config = self._driver_config
|
100
|
+
# NOTE: The caller is responsible for calling driver.start() if background processing is needed.
|
101
|
+
return driver
|
102
|
+
|
103
|
+
def create_agent(self, tools_adapter=None, agent_name: str = None, **kwargs):
|
104
|
+
from janito.llm.agent import LLMAgent
|
105
|
+
|
106
|
+
# Always create a new driver with the passed-in tools_adapter
|
107
|
+
if tools_adapter is None:
|
108
|
+
tools_adapter = get_local_tools_adapter()
|
109
|
+
# Should use new-style driver construction via queues/factory (handled elsewhere)
|
110
|
+
raise NotImplementedError(
|
111
|
+
"create_agent must be constructed via new factory using input/output queues and config."
|
112
|
+
)
|
113
|
+
|
114
|
+
@property
|
115
|
+
def model_name(self):
|
116
|
+
return self._driver_config.model
|
117
|
+
|
118
|
+
@property
|
119
|
+
def driver_config(self):
|
120
|
+
"""Public, read-only access to the provider's LLMDriverConfig object."""
|
121
|
+
return self._driver_config
|
122
|
+
|
123
|
+
def execute_tool(self, tool_name: str, event_bus, *args, **kwargs):
|
124
|
+
self._tools_adapter.event_bus = event_bus
|
125
|
+
return self._tools_adapter.execute_by_name(tool_name, *args, **kwargs)
|
126
|
+
|
127
|
+
|
128
|
+
LLMProviderRegistry.register(ZAIProvider.NAME, ZAIProvider)
|
@@ -0,0 +1,133 @@
|
|
1
|
+
"""
|
2
|
+
Generate OpenAI-compatible tool schemas for Z.AI API.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import inspect
|
6
|
+
from typing import get_type_hints, Dict, Any, Optional, List, Union
|
7
|
+
from janito.tools import Tool
|
8
|
+
|
9
|
+
|
10
|
+
def generate_tool_schemas(tool_classes):
|
11
|
+
"""
|
12
|
+
Generate OpenAI-compatible tool schemas from tool classes.
|
13
|
+
|
14
|
+
Args:
|
15
|
+
tool_classes: List of Tool classes to generate schemas for
|
16
|
+
|
17
|
+
Returns:
|
18
|
+
List of OpenAI-compatible tool schemas
|
19
|
+
"""
|
20
|
+
schemas = []
|
21
|
+
for tool_class in tool_classes:
|
22
|
+
schema = generate_tool_schema(tool_class)
|
23
|
+
if schema:
|
24
|
+
schemas.append(schema)
|
25
|
+
return schemas
|
26
|
+
|
27
|
+
|
28
|
+
def generate_tool_schema(tool_class):
|
29
|
+
"""
|
30
|
+
Generate an OpenAI-compatible tool schema from a Tool class.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
tool_class: Tool class to generate schema for
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
OpenAI-compatible tool schema dict
|
37
|
+
"""
|
38
|
+
if not issubclass(tool_class, Tool):
|
39
|
+
return None
|
40
|
+
|
41
|
+
tool_instance = tool_class()
|
42
|
+
|
43
|
+
# Get the execute method
|
44
|
+
execute_method = getattr(tool_class, "execute", None)
|
45
|
+
if not execute_method:
|
46
|
+
return None
|
47
|
+
|
48
|
+
# Get method signature and type hints
|
49
|
+
try:
|
50
|
+
sig = inspect.signature(execute_method)
|
51
|
+
type_hints = get_type_hints(execute_method)
|
52
|
+
except (ValueError, TypeError):
|
53
|
+
return None
|
54
|
+
|
55
|
+
# Build parameters schema
|
56
|
+
properties = {}
|
57
|
+
required = []
|
58
|
+
|
59
|
+
for param_name, param in sig.parameters.items():
|
60
|
+
if param_name == "self":
|
61
|
+
continue
|
62
|
+
|
63
|
+
param_type = type_hints.get(param_name, str)
|
64
|
+
param_schema = python_type_to_json_schema(param_type)
|
65
|
+
|
66
|
+
# Add description if available
|
67
|
+
if hasattr(tool_instance, "get_parameter_description"):
|
68
|
+
desc = tool_instance.get_parameter_description(param_name)
|
69
|
+
if desc:
|
70
|
+
param_schema["description"] = desc
|
71
|
+
|
72
|
+
properties[param_name] = param_schema
|
73
|
+
|
74
|
+
# Check if required
|
75
|
+
if param.default == inspect.Parameter.empty:
|
76
|
+
required.append(param_name)
|
77
|
+
|
78
|
+
schema = {
|
79
|
+
"type": "function",
|
80
|
+
"function": {
|
81
|
+
"name": tool_class.__name__,
|
82
|
+
"description": getattr(
|
83
|
+
tool_instance, "description", f"Execute {tool_class.__name__}"
|
84
|
+
),
|
85
|
+
"parameters": {
|
86
|
+
"type": "object",
|
87
|
+
"properties": properties,
|
88
|
+
"required": required,
|
89
|
+
"additionalProperties": False,
|
90
|
+
},
|
91
|
+
},
|
92
|
+
}
|
93
|
+
|
94
|
+
return schema
|
95
|
+
|
96
|
+
|
97
|
+
def python_type_to_json_schema(python_type):
|
98
|
+
"""
|
99
|
+
Convert Python type hints to JSON schema types.
|
100
|
+
|
101
|
+
Args:
|
102
|
+
python_type: Python type hint
|
103
|
+
|
104
|
+
Returns:
|
105
|
+
JSON schema dict
|
106
|
+
"""
|
107
|
+
if python_type == str:
|
108
|
+
return {"type": "string"}
|
109
|
+
elif python_type == int:
|
110
|
+
return {"type": "integer"}
|
111
|
+
elif python_type == float:
|
112
|
+
return {"type": "number"}
|
113
|
+
elif python_type == bool:
|
114
|
+
return {"type": "boolean"}
|
115
|
+
elif hasattr(python_type, "__origin__"):
|
116
|
+
# Handle generic types
|
117
|
+
origin = python_type.__origin__
|
118
|
+
if origin == list or origin == List:
|
119
|
+
args = getattr(python_type, "__args__", (str,))
|
120
|
+
item_type = args[0] if args else str
|
121
|
+
return {"type": "array", "items": python_type_to_json_schema(item_type)}
|
122
|
+
elif origin == dict or origin == Dict:
|
123
|
+
return {"type": "object"}
|
124
|
+
elif origin == Union:
|
125
|
+
args = getattr(python_type, "__args__", ())
|
126
|
+
# Handle Optional types (Union with None)
|
127
|
+
if len(args) == 2 and type(None) in args:
|
128
|
+
non_none_type = args[0] if args[1] is type(None) else args[1]
|
129
|
+
schema = python_type_to_json_schema(non_none_type)
|
130
|
+
return schema
|
131
|
+
|
132
|
+
# Default fallback
|
133
|
+
return {"type": "string"}
|
@@ -11,7 +11,7 @@ janito/exceptions.py,sha256=l4AepRdWwAuLp5fUygrBBgO9rpwgfV6JvY1afOdq2pw,913
|
|
11
11
|
janito/formatting.py,sha256=SMvOmL6bst3KGcI7OLa5D4DE127fQYuHZS3oY8OPsn8,2031
|
12
12
|
janito/formatting_token.py,sha256=9Pz0svhV0pyNuGRXSmVkGDaQC8N-koTkf50AJR_gtSo,2217
|
13
13
|
janito/gitignore_utils.py,sha256=P3iF9fMWAomqULq2xsoqHtI9uNIFSPcagljrxZshmLw,4110
|
14
|
-
janito/mkdocs.yml,sha256=
|
14
|
+
janito/mkdocs.yml,sha256=hceV1mnCuj5Eo42jJQHSAhqHC4emiMExgnISiiwqSow,925
|
15
15
|
janito/perf_singleton.py,sha256=g1h0Sdf4ydzegeEpJlMhQt4H0GQZ2hryXrdYOTL-b30,113
|
16
16
|
janito/performance_collector.py,sha256=RYu4av16Trj3RljJZ8-2Gbn1KlGdJUosrcVFYtwviNI,6285
|
17
17
|
janito/platform_discovery.py,sha256=JN3kC7hkxdvuj-AyrJTlbbDJjtNHke3fdlZDqGi_uz0,4621
|
@@ -28,7 +28,7 @@ janito/cli/__init__.py,sha256=xaPDOrWphBbCR63Xpcx_yfpXSJIlCaaICc4j2qpWqrM,194
|
|
28
28
|
janito/cli/config.py,sha256=HkZ14701HzIqrvaNyDcDhGlVHfpX_uHlLp2rHmhRm_k,872
|
29
29
|
janito/cli/console.py,sha256=gJolqzWL7jEPLxeuH-CwBDRFpXt976KdZOEAB2tdBDs,64
|
30
30
|
janito/cli/main.py,sha256=s5odou0txf8pzTf1ADk2yV7T5m8B6cejJ81e7iu776U,312
|
31
|
-
janito/cli/main_cli.py,sha256=
|
31
|
+
janito/cli/main_cli.py,sha256=lPY3080rvDvDPQE_lhAwEuzrUeQ2Fg-DfEt4J-bWblE,14381
|
32
32
|
janito/cli/prompt_core.py,sha256=F68J4Xl6jZMYFN4oBBYZFj15Jp-HTYoLub4bw2XpNRU,11648
|
33
33
|
janito/cli/prompt_handler.py,sha256=SnPTlL64noeAMGlI08VBDD5IDD8jlVMIYA4-fS8zVLg,215
|
34
34
|
janito/cli/prompt_setup.py,sha256=1s5yccFaWMgDkUjkvnTYGEWJAFPJ6hIiqwbrLfzWxMI,2038
|
@@ -39,7 +39,7 @@ janito/cli/chat_mode/bindings.py,sha256=odjc5_-YW1t2FRhBUNRNoBMoQIg5sMz3ktV7xG0A
|
|
39
39
|
janito/cli/chat_mode/chat_entry.py,sha256=RFdPd23jsA2DMHRacpjAdwI_1dFBaWrtnwyQEgb2fHA,475
|
40
40
|
janito/cli/chat_mode/prompt_style.py,sha256=vsqQ9xxmrYjj1pWuVe9CayQf39fo2EIXrkKPkflSVn4,805
|
41
41
|
janito/cli/chat_mode/script_runner.py,sha256=wOwEn4bgmjqHqjTqtfyaSOnRPsGf4ZVW-YAWhEeqxXU,6507
|
42
|
-
janito/cli/chat_mode/session.py,sha256=
|
42
|
+
janito/cli/chat_mode/session.py,sha256=o-Eh3oGAMpSdOj38xTubVi8_z3Pz3HUfGd-ZyeDlejw,13262
|
43
43
|
janito/cli/chat_mode/session_profile_select.py,sha256=CJ2g3VbPGWfBNrNkYYX57oIJZJ-hIZBNGB-zcdjC9vk,5379
|
44
44
|
janito/cli/chat_mode/toolbar.py,sha256=bJ9jPaTInp2gB3yjSVJp8mpNEFiOslzNhVaiqpXJhKc,3025
|
45
45
|
janito/cli/chat_mode/shell/autocomplete.py,sha256=lE68MaVaodbA2VfUM0_YLqQVLBJAE_BJsd5cMtwuD-g,793
|
@@ -85,19 +85,21 @@ janito/cli/cli_commands/show_system_prompt.py,sha256=9ZJGW7lIGJ9LX2JZiWVEm4AbaD0
|
|
85
85
|
janito/cli/core/__init__.py,sha256=YH95fhgY9yBX8RgqX9dSrEkl4exjV0T4rbmJ6xUpG-Y,196
|
86
86
|
janito/cli/core/event_logger.py,sha256=1X6lR0Ax7AgF8HlPWFoY5Ystuu7Bh4ooTo78vXzeGB0,2008
|
87
87
|
janito/cli/core/getters.py,sha256=AO34OBhh3f1Sx2mWVYQIvH-4fcmXG7b2471XdKNZdYs,1856
|
88
|
-
janito/cli/core/
|
88
|
+
janito/cli/core/model_guesser.py,sha256=jzkkiQ-J2buT2Omh6jYZHa8-zCJxqKQBL08Z58pe1_o,1741
|
89
|
+
janito/cli/core/runner.py,sha256=3vP92XEUzzHeOWbMHg82iISsXVUAM7y8YKWGNSIMyA8,8337
|
89
90
|
janito/cli/core/setters.py,sha256=PD3aT1y1q8XWQVtRNfrU0dtlW4JGdn6BMJyP7FCQWhc,4623
|
90
91
|
janito/cli/core/unsetters.py,sha256=FEw9gCt0vRvoCt0kRSNfVB2tzi_TqppJIx2nHPP59-k,2012
|
91
92
|
janito/cli/single_shot_mode/__init__.py,sha256=Ct99pKe9tINzVW6oedZJfzfZQKWpXz-weSSCn0hrwHY,115
|
92
93
|
janito/cli/single_shot_mode/handler.py,sha256=U70X7c9MHbmj1vQlTI-Ao2JvRprpLbPh9wL5gAMbEhs,3790
|
93
94
|
janito/docs/GETTING_STARTED.md,sha256=EbXV7B3XxjSy1E0XQJFOVITVbTmZBVB7pjth2Mb4_rg,2835
|
94
|
-
janito/docs/PROVIDERS.md,sha256=ZJK6A2j7uA651K5ypDnm-UQsnorCZvcU4qUrBPxpf0Y,4775
|
95
95
|
janito/drivers/dashscope.bak.zip,sha256=9Pv4Xyciju8jO1lEMFVgYXexoZkxmDO3Ig6vw3ODfL8,4936
|
96
96
|
janito/drivers/driver_registry.py,sha256=sbij7R71JJqJVeMfmaU-FKsEuZVO8oEn6Qp8020hdZw,773
|
97
97
|
janito/drivers/openai_responses.bak.zip,sha256=E43eDCHGa2tCtdjzj_pMnWDdnsOZzj8BJTR5tJp8wcM,13352
|
98
98
|
janito/drivers/azure_openai/driver.py,sha256=rec2D4DDuMjdnbGNIsrnB0oiwuxL_zBykJeUGa-PffI,4074
|
99
99
|
janito/drivers/openai/README.md,sha256=bgPdaYX0pyotCoJ9t3cJbYM-teQ_YM1DAFEKLCMP32Q,666
|
100
|
-
janito/drivers/openai/driver.py,sha256=
|
100
|
+
janito/drivers/openai/driver.py,sha256=O0AAp-aF3TKQLp_FSsRWm_QDG_mKliLlpDjf09fWzl4,19061
|
101
|
+
janito/drivers/zai/__init__.py,sha256=rleES3ZJEslJ8M02TdTPyxHKXxA4-e2fDJa6yjuzY8s,22
|
102
|
+
janito/drivers/zai/driver.py,sha256=jI_ddVK_vT-cRTpZ_8piXSJEFZ6zLTeRyT0Jzxjtd70,18926
|
101
103
|
janito/event_bus/__init__.py,sha256=VG6GOhKMBh0O_92D-zW8a3YitJPKDajGgPiFezTXlNE,77
|
102
104
|
janito/event_bus/bus.py,sha256=LokZbAdwcWhWOyKSp7H3Ism57x4EZhxlRPjl3NE4UKU,2847
|
103
105
|
janito/event_bus/event.py,sha256=MtgcBPD7cvCuubiLIyo-BWcsNSO-941HLk6bScHTJtQ,427
|
@@ -117,12 +119,12 @@ janito/llm/driver_input.py,sha256=Zq7IO4KdQPUraeIo6XoOaRy1IdQAyYY15RQw4JU30uA,38
|
|
117
119
|
janito/llm/message_parts.py,sha256=QY_0kDjaxdoErDgKPRPv1dNkkYJuXIBmHWNLiOEKAH4,1365
|
118
120
|
janito/llm/model.py,sha256=42hjcffZDTuzjAJoVhDcDqwIXm6rUmmi5UwTOYopf5w,1131
|
119
121
|
janito/llm/provider.py,sha256=3FbhQPrWBSEoIdIi-5DWIh0DD_CM570EFf1NcuGyGko,7961
|
120
|
-
janito/providers/__init__.py,sha256
|
122
|
+
janito/providers/__init__.py,sha256=P2r90SUduTqn0CumjpJ9yojx2BUKDVy136xdbA8I6VU,407
|
121
123
|
janito/providers/dashscope.bak.zip,sha256=BwXxRmZreEivvRtmqbr5BR62IFVlNjAf4y6DrF2BVJo,5998
|
122
124
|
janito/providers/registry.py,sha256=Ygwv9eVrTXOKhv0EKxSWQXO5WMHvajWE2Q_Lc3p7dKo,730
|
123
125
|
janito/providers/alibaba/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
124
|
-
janito/providers/alibaba/model_info.py,sha256=
|
125
|
-
janito/providers/alibaba/provider.py,sha256=
|
126
|
+
janito/providers/alibaba/model_info.py,sha256=evAW2CZUX9qgRAzDhZTp47ZNj4G1T7W66gkV60Nfan8,1264
|
127
|
+
janito/providers/alibaba/provider.py,sha256=JOkR0pKCpuG1z5KQ35TaEw6Egfp7g1gU7XiT8aeZp-0,4304
|
126
128
|
janito/providers/anthropic/model_info.py,sha256=m6pBh0Ia8_xC1KZ7ke_4HeHIFw7nWjnYVItnRpkCSWc,1206
|
127
129
|
janito/providers/anthropic/provider.py,sha256=JS74pDs7gSpwvG0jY-MDO5rljO0JJOffSjaL1LK1YlE,3165
|
128
130
|
janito/providers/azure_openai/model_info.py,sha256=TMSqEpQROIIYUGAyulYJ5xGhj7CbLoaKL_JXeLbXaG0,689
|
@@ -140,6 +142,10 @@ janito/providers/openai/__init__.py,sha256=f0m16-sIqScjL9Mp4A0CQBZx6H3PTEy0cnE08
|
|
140
142
|
janito/providers/openai/model_info.py,sha256=cz08O26Ychm-aP3T8guJRqpR96Im9Cwtgl2iMgM7tJs,3384
|
141
143
|
janito/providers/openai/provider.py,sha256=U9Bp9g2KQ58J6-B5vDgsXM05xASsgaWQOofewC7hiXs,5145
|
142
144
|
janito/providers/openai/schema_generator.py,sha256=hTqeLcPTR8jeKn5DUUpo7b-EZ-V-g1WwXiX7MbHnFzE,2234
|
145
|
+
janito/providers/zai/__init__.py,sha256=qtIr9_QBFaXG8xB6cRDGhS7se6ir11CWseI9azLMRBo,24
|
146
|
+
janito/providers/zai/model_info.py,sha256=8lwwoqwuKEkrMvXWyt1iq_H_Bf0GzZk37byjSzs_YDo,1708
|
147
|
+
janito/providers/zai/provider.py,sha256=mzGICaeg1NX_PghH17TDMxz_fKLeF4QHq98eVTuRUes,5234
|
148
|
+
janito/providers/zai/schema_generator.py,sha256=xIc1U_AYKX0cIU89RyJ63mo6rk-Mebx5CQ-qFfmd4ZQ,3934
|
143
149
|
janito/tools/DOCSTRING_STANDARD.txt,sha256=VLPwNgjxRVD_xZSSVvUZ4H-4bBwM-VKh_RyfzYQsYSs,1735
|
144
150
|
janito/tools/README.md,sha256=5HkLpF5k4PENJER7SlDPRXj0yo9mpHvAHW4uuzhq4ak,115
|
145
151
|
janito/tools/__init__.py,sha256=W1B39PztC2UF7PS2WyLH6el32MFOETMlN1-LurOROCg,1171
|
@@ -201,9 +207,9 @@ janito/tools/adapters/local/validate_file_syntax/ps1_validator.py,sha256=TeIkPt0
|
|
201
207
|
janito/tools/adapters/local/validate_file_syntax/python_validator.py,sha256=BfCO_K18qy92m-2ZVvHsbEU5e11OPo1pO9Vz4G4616E,130
|
202
208
|
janito/tools/adapters/local/validate_file_syntax/xml_validator.py,sha256=AijlsP_PgNuC8ZbGsC5vOTt3Jur76otQzkd_7qR0QFY,284
|
203
209
|
janito/tools/adapters/local/validate_file_syntax/yaml_validator.py,sha256=TgyI0HRL6ug_gBcWEm5TGJJuA4E34ZXcIzMpAbv3oJs,155
|
204
|
-
janito-2.
|
205
|
-
janito-2.
|
206
|
-
janito-2.
|
207
|
-
janito-2.
|
208
|
-
janito-2.
|
209
|
-
janito-2.
|
210
|
+
janito-2.12.0.dist-info/licenses/LICENSE,sha256=GSAKapQH5ZIGWlpQTA7v5YrfECyaxaohUb1vJX-qepw,1090
|
211
|
+
janito-2.12.0.dist-info/METADATA,sha256=28P5foSbPIT1QiWcLpVgWVSKLmzgG2JpwEJY8aEbsPE,16365
|
212
|
+
janito-2.12.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
213
|
+
janito-2.12.0.dist-info/entry_points.txt,sha256=wIo5zZxbmu4fC-ZMrsKD0T0vq7IqkOOLYhrqRGypkx4,48
|
214
|
+
janito-2.12.0.dist-info/top_level.txt,sha256=m0NaVCq0-ivxbazE2-ND0EA9Hmuijj_OGkmCbnBcCig,7
|
215
|
+
janito-2.12.0.dist-info/RECORD,,
|
janito/docs/PROVIDERS.md
DELETED
@@ -1,224 +0,0 @@
|
|
1
|
-
# Provider Configuration Guide
|
2
|
-
|
3
|
-
This guide covers how to configure and use different LLM providers with Janito.
|
4
|
-
|
5
|
-
## MoonshotAI (Recommended)
|
6
|
-
|
7
|
-
**MoonshotAI** is the recommended default provider for Janito, offering excellent performance and competitive pricing.
|
8
|
-
|
9
|
-
### Setup
|
10
|
-
```bash
|
11
|
-
# Set API key
|
12
|
-
janito --set-api-key YOUR_API_KEY -p moonshotai
|
13
|
-
|
14
|
-
# Set as default provider
|
15
|
-
janito --set provider=moonshotai
|
16
|
-
janito --set model=kimi-k1-8k
|
17
|
-
```
|
18
|
-
|
19
|
-
### Available Models
|
20
|
-
|
21
|
-
- **kimi-k1-8k**: Fast, general-purpose model (8k context)
|
22
|
-
- **kimi-k1-32k**: Extended context model (32k context)
|
23
|
-
- **kimi-k1-128k**: Long context model (128k context)
|
24
|
-
- **kimi-k2-turbo-preview**: Latest enhanced model
|
25
|
-
|
26
|
-
### Environment Variables
|
27
|
-
```bash
|
28
|
-
export MOONSHOTAI_API_KEY=your_key_here
|
29
|
-
```
|
30
|
-
|
31
|
-
## OpenAI
|
32
|
-
|
33
|
-
### Setup
|
34
|
-
```bash
|
35
|
-
# Set API key
|
36
|
-
janito --set-api-key YOUR_API_KEY -p openai
|
37
|
-
|
38
|
-
# Use specific model
|
39
|
-
janito -p openai -m gpt-4 "Your prompt"
|
40
|
-
```
|
41
|
-
|
42
|
-
### Available Models
|
43
|
-
|
44
|
-
- **gpt-4**: Most capable model
|
45
|
-
- **gpt-4-turbo**: Faster, more efficient
|
46
|
-
- **gpt-3.5-turbo**: Cost-effective option
|
47
|
-
|
48
|
-
### Environment Variables
|
49
|
-
```bash
|
50
|
-
export OPENAI_API_KEY=your_key_here
|
51
|
-
```
|
52
|
-
|
53
|
-
## Anthropic
|
54
|
-
|
55
|
-
### Setup
|
56
|
-
```bash
|
57
|
-
# Set API key
|
58
|
-
janito --set-api-key YOUR_API_KEY -p anthropic
|
59
|
-
|
60
|
-
# Use Claude models
|
61
|
-
janito -p anthropic -m claude-3-5-sonnet-20241022 "Your prompt"
|
62
|
-
```
|
63
|
-
|
64
|
-
### Available Models
|
65
|
-
|
66
|
-
- **claude-3-5-sonnet-20241022**: Most capable
|
67
|
-
- **claude-3-opus-20240229**: High performance
|
68
|
-
- **claude-3-haiku-20240307**: Fast and cost-effective
|
69
|
-
|
70
|
-
### Environment Variables
|
71
|
-
```bash
|
72
|
-
export ANTHROPIC_API_KEY=your_key_here
|
73
|
-
```
|
74
|
-
|
75
|
-
## Google
|
76
|
-
|
77
|
-
### Setup
|
78
|
-
```bash
|
79
|
-
# Set API key
|
80
|
-
janito --set-api-key YOUR_API_KEY -p google
|
81
|
-
|
82
|
-
# Use Gemini models
|
83
|
-
janito -p google -m gemini-2.0-flash-exp "Your prompt"
|
84
|
-
```
|
85
|
-
|
86
|
-
### Available Models
|
87
|
-
|
88
|
-
- **gemini-2.0-flash-exp**: Latest experimental model
|
89
|
-
- **gemini-1.5-pro**: Production-ready
|
90
|
-
- **gemini-1.5-flash**: Fast and efficient
|
91
|
-
|
92
|
-
### Environment Variables
|
93
|
-
```bash
|
94
|
-
export GOOGLE_API_KEY=your_key_here
|
95
|
-
```
|
96
|
-
|
97
|
-
## Azure OpenAI
|
98
|
-
|
99
|
-
### Setup
|
100
|
-
```bash
|
101
|
-
# Set configuration
|
102
|
-
janito --set-api-key YOUR_API_KEY -p azure-openai
|
103
|
-
janito --set azure_deployment_name=your_deployment_name -p azure-openai
|
104
|
-
```
|
105
|
-
|
106
|
-
### Configuration
|
107
|
-
|
108
|
-
Requires both API key and deployment name:
|
109
|
-
|
110
|
-
- **API Key**: Your Azure OpenAI key
|
111
|
-
- **Deployment Name**: Your Azure deployment name
|
112
|
-
- **Base URL**: Your Azure endpoint URL
|
113
|
-
|
114
|
-
### Environment Variables
|
115
|
-
```bash
|
116
|
-
export AZURE_OPENAI_API_KEY=your_key_here
|
117
|
-
export AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/
|
118
|
-
```
|
119
|
-
|
120
|
-
## Other Providers
|
121
|
-
|
122
|
-
Janito also supports these providers through OpenAI-compatible APIs:
|
123
|
-
|
124
|
-
### Alibaba Cloud
|
125
|
-
```bash
|
126
|
-
janito --set-api-key YOUR_KEY -p alibaba
|
127
|
-
```
|
128
|
-
|
129
|
-
### DeepSeek
|
130
|
-
```bash
|
131
|
-
janito --set-api-key YOUR_KEY -p deepseek
|
132
|
-
```
|
133
|
-
|
134
|
-
### Groq
|
135
|
-
```bash
|
136
|
-
janito --set-api-key YOUR_KEY -p groq
|
137
|
-
```
|
138
|
-
|
139
|
-
### Mistral
|
140
|
-
```bash
|
141
|
-
janito --set-api-key YOUR_KEY -p mistral
|
142
|
-
```
|
143
|
-
|
144
|
-
## Configuration Management
|
145
|
-
|
146
|
-
### Check Current Configuration
|
147
|
-
```bash
|
148
|
-
janito --show-config
|
149
|
-
```
|
150
|
-
|
151
|
-
### List All Providers
|
152
|
-
```bash
|
153
|
-
janito --list-providers
|
154
|
-
```
|
155
|
-
|
156
|
-
### List Models for a Provider
|
157
|
-
```bash
|
158
|
-
janito -p moonshotai --list-models
|
159
|
-
janito -p openai --list-models
|
160
|
-
```
|
161
|
-
|
162
|
-
### Switch Providers
|
163
|
-
```bash
|
164
|
-
# Temporarily for one command
|
165
|
-
janito -p openai -m gpt-4 "Your prompt"
|
166
|
-
|
167
|
-
# Permanently as default
|
168
|
-
janito --set provider=openai
|
169
|
-
janito --set model=gpt-4
|
170
|
-
```
|
171
|
-
|
172
|
-
## Advanced Configuration
|
173
|
-
|
174
|
-
### Custom Base URLs
|
175
|
-
For OpenAI-compatible providers, you can set custom base URLs:
|
176
|
-
|
177
|
-
```bash
|
178
|
-
janito --set base_url=https://your-custom-endpoint.com -p openai
|
179
|
-
```
|
180
|
-
|
181
|
-
### Provider-Specific Settings
|
182
|
-
Each provider can have custom settings:
|
183
|
-
|
184
|
-
```bash
|
185
|
-
# Set temperature for a specific provider/model
|
186
|
-
janito --set temperature=0.7 -p moonshotai -m kimi-k1-8k
|
187
|
-
|
188
|
-
# Set max tokens
|
189
|
-
janito --set max_tokens=2000 -p openai -m gpt-4
|
190
|
-
```
|
191
|
-
|
192
|
-
## Troubleshooting
|
193
|
-
|
194
|
-
### Provider Not Found
|
195
|
-
```bash
|
196
|
-
# Check if provider is registered
|
197
|
-
janito --list-providers
|
198
|
-
|
199
|
-
# Re-register provider
|
200
|
-
janito --set-api-key YOUR_KEY -p PROVIDER_NAME
|
201
|
-
```
|
202
|
-
|
203
|
-
### API Key Issues
|
204
|
-
```bash
|
205
|
-
# Check current API key
|
206
|
-
janito --show-config
|
207
|
-
|
208
|
-
# Reset API key
|
209
|
-
janito --set-api-key NEW_KEY -p PROVIDER_NAME
|
210
|
-
```
|
211
|
-
|
212
|
-
### Model Not Available
|
213
|
-
```bash
|
214
|
-
# List available models for provider
|
215
|
-
janito -p PROVIDER_NAME --list-models
|
216
|
-
```
|
217
|
-
|
218
|
-
## Best Practices
|
219
|
-
|
220
|
-
1. **Start with MoonshotAI**: It's the recommended default for good reason
|
221
|
-
2. **Use environment variables**: For CI/CD and containerized environments
|
222
|
-
3. **Test different models**: Each has different strengths and pricing
|
223
|
-
4. **Monitor usage**: Keep track of API costs and rate limits
|
224
|
-
5. **Use profiles**: Set up different configurations for different use cases
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|