ccs-llmconnector 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.2.dist-info}/METADATA +1 -1
- ccs_llmconnector-1.1.2.dist-info/RECORD +16 -0
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.2.dist-info}/WHEEL +1 -1
- llmconnector/__init__.py +21 -21
- llmconnector/anthropic_client.py +266 -266
- llmconnector/client.py +291 -291
- llmconnector/client_cli.py +42 -42
- llmconnector/gemini_client.py +406 -391
- llmconnector/grok_client.py +270 -270
- llmconnector/openai_client.py +256 -256
- llmconnector/types.py +48 -48
- llmconnector/utils.py +77 -77
- ccs_llmconnector-1.1.0.dist-info/RECORD +0 -16
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.2.dist-info}/entry_points.txt +0 -0
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.2.dist-info}/licenses/LICENSE +0 -0
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.2.dist-info}/top_level.txt +0 -0
llmconnector/client_cli.py
CHANGED
|
@@ -82,35 +82,35 @@ def _build_parser() -> argparse.ArgumentParser:
|
|
|
82
82
|
default=32000,
|
|
83
83
|
help="Maximum output tokens (provider-specific meaning)",
|
|
84
84
|
)
|
|
85
|
-
p_respond.add_argument(
|
|
86
|
-
"--reasoning-effort",
|
|
87
|
-
choices=["low", "medium", "high"],
|
|
88
|
-
default=None,
|
|
89
|
-
help="Optional reasoning effort hint if supported",
|
|
90
|
-
)
|
|
91
|
-
p_respond.add_argument(
|
|
92
|
-
"--request-id",
|
|
93
|
-
default=None,
|
|
94
|
-
help="Optional request identifier for tracing/logging",
|
|
95
|
-
)
|
|
96
|
-
p_respond.add_argument(
|
|
97
|
-
"--timeout-s",
|
|
98
|
-
type=float,
|
|
99
|
-
default=None,
|
|
100
|
-
help="Optional timeout in seconds",
|
|
101
|
-
)
|
|
102
|
-
p_respond.add_argument(
|
|
103
|
-
"--max-retries",
|
|
104
|
-
type=int,
|
|
105
|
-
default=0,
|
|
106
|
-
help="Number of retries for transient failures",
|
|
107
|
-
)
|
|
108
|
-
p_respond.add_argument(
|
|
109
|
-
"--retry-backoff-s",
|
|
110
|
-
type=float,
|
|
111
|
-
default=0.5,
|
|
112
|
-
help="Base delay in seconds for exponential backoff",
|
|
113
|
-
)
|
|
85
|
+
p_respond.add_argument(
|
|
86
|
+
"--reasoning-effort",
|
|
87
|
+
choices=["low", "medium", "high"],
|
|
88
|
+
default=None,
|
|
89
|
+
help="Optional reasoning effort hint if supported",
|
|
90
|
+
)
|
|
91
|
+
p_respond.add_argument(
|
|
92
|
+
"--request-id",
|
|
93
|
+
default=None,
|
|
94
|
+
help="Optional request identifier for tracing/logging",
|
|
95
|
+
)
|
|
96
|
+
p_respond.add_argument(
|
|
97
|
+
"--timeout-s",
|
|
98
|
+
type=float,
|
|
99
|
+
default=None,
|
|
100
|
+
help="Optional timeout in seconds",
|
|
101
|
+
)
|
|
102
|
+
p_respond.add_argument(
|
|
103
|
+
"--max-retries",
|
|
104
|
+
type=int,
|
|
105
|
+
default=0,
|
|
106
|
+
help="Number of retries for transient failures",
|
|
107
|
+
)
|
|
108
|
+
p_respond.add_argument(
|
|
109
|
+
"--retry-backoff-s",
|
|
110
|
+
type=float,
|
|
111
|
+
default=0.5,
|
|
112
|
+
help="Base delay in seconds for exponential backoff",
|
|
113
|
+
)
|
|
114
114
|
|
|
115
115
|
# models: list available models
|
|
116
116
|
p_models = subparsers.add_parser(
|
|
@@ -172,19 +172,19 @@ def _cmd_respond(args: argparse.Namespace) -> int:
|
|
|
172
172
|
print("Error: provide a prompt or at least one image.", file=sys.stderr)
|
|
173
173
|
return 2
|
|
174
174
|
try:
|
|
175
|
-
output = client.generate_response(
|
|
176
|
-
provider=provider,
|
|
177
|
-
api_key=api_key,
|
|
178
|
-
prompt=prompt,
|
|
179
|
-
model=model,
|
|
180
|
-
max_tokens=args.max_tokens,
|
|
181
|
-
reasoning_effort=args.reasoning_effort,
|
|
182
|
-
images=images,
|
|
183
|
-
request_id=args.request_id,
|
|
184
|
-
timeout_s=args.timeout_s,
|
|
185
|
-
max_retries=args.max_retries,
|
|
186
|
-
retry_backoff_s=args.retry_backoff_s,
|
|
187
|
-
)
|
|
175
|
+
output = client.generate_response(
|
|
176
|
+
provider=provider,
|
|
177
|
+
api_key=api_key,
|
|
178
|
+
prompt=prompt,
|
|
179
|
+
model=model,
|
|
180
|
+
max_tokens=args.max_tokens,
|
|
181
|
+
reasoning_effort=args.reasoning_effort,
|
|
182
|
+
images=images,
|
|
183
|
+
request_id=args.request_id,
|
|
184
|
+
timeout_s=args.timeout_s,
|
|
185
|
+
max_retries=args.max_retries,
|
|
186
|
+
retry_backoff_s=args.retry_backoff_s,
|
|
187
|
+
)
|
|
188
188
|
except Exception as exc: # pragma: no cover - CLI surface
|
|
189
189
|
print(f"Error: {exc}", file=sys.stderr)
|
|
190
190
|
return 2
|