ccs-llmconnector 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,325 +1,325 @@
1
- """Command-line client for llmconnector.
2
-
3
- Reads the API key from an environment variable derived from the provider name
4
- ("{PROVIDER}_API_KEY", e.g. OPENAI_API_KEY) and exposes
5
- simple commands to generate a response or list available models for a
6
- provider.
7
- """
8
-
9
- from __future__ import annotations
10
-
11
- import argparse
12
- import json
13
- import os
14
- import sys
15
- from typing import Sequence
16
-
17
- # Support both package execution and direct file execution.
18
- # When run as a script (no package), add the parent of this file's directory to sys.path
19
- # so that `import llmconnector` resolves.
20
- try:
21
- if __package__ in (None, ""):
22
- # Running as a script: add repo/src to path
23
- _here = os.path.dirname(os.path.abspath(__file__))
24
- _pkg_root = os.path.dirname(_here)
25
- if _pkg_root not in sys.path:
26
- sys.path.insert(0, _pkg_root)
27
- from llmconnector.client import LLMClient # type: ignore
28
- else:
29
- from .client import LLMClient
30
- except Exception: # pragma: no cover - defensive import fallback
31
- # Last-resort fallback if the above logic fails in unusual environments
32
- from llmconnector.client import LLMClient # type: ignore
33
-
34
-
35
- def _env_api_key(provider: str) -> str:
36
- env_name = f"{provider.upper()}_API_KEY"
37
- api_key = os.environ.get(env_name, "")
38
- if not api_key:
39
- raise SystemExit(
40
- f"Missing {env_name} environment variable. Set it before running."
41
- )
42
- return api_key
43
-
44
-
45
- def _build_parser() -> argparse.ArgumentParser:
46
- parser = argparse.ArgumentParser(
47
- prog="client_cli",
48
- description="CLI for provider-agnostic LLM requests",
49
- )
50
-
51
- subparsers = parser.add_subparsers(dest="command", required=True)
52
-
53
- # respond: generate a model response
54
- p_respond = subparsers.add_parser(
55
- "respond", help="Generate a response from a provider model"
56
- )
57
- p_respond.add_argument(
58
- "--provider",
59
- required=False,
60
- help="Provider to use (e.g. openai, gemini, anthropic, grok). If omitted, you will be prompted.",
61
- )
62
- p_respond.add_argument(
63
- "--model",
64
- required=False,
65
- help="Model identifier for the provider (e.g. gpt-4o). If omitted, you will be prompted.",
66
- )
67
- p_respond.add_argument(
68
- "--prompt",
69
- default="",
70
- help="Text prompt to send (omit if only using images)",
71
- )
72
- p_respond.add_argument(
73
- "--image",
74
- action="append",
75
- dest="images",
76
- default=None,
77
- help="Image path or URL; may be provided multiple times",
78
- )
79
- p_respond.add_argument(
80
- "--max-tokens",
81
- type=int,
82
- default=32000,
83
- help="Maximum output tokens (provider-specific meaning)",
84
- )
85
- p_respond.add_argument(
86
- "--reasoning-effort",
87
- choices=["low", "medium", "high"],
88
- default=None,
89
- help="Optional reasoning effort hint if supported",
90
- )
91
-
92
- # models: list available models
93
- p_models = subparsers.add_parser(
94
- "models", help="List models available to the provider"
95
- )
96
- p_models.add_argument(
97
- "--provider",
98
- required=False,
99
- help="Provider to query (e.g. openai, gemini, anthropic, grok). If omitted, you will be prompted.",
100
- )
101
- p_models.add_argument(
102
- "--json",
103
- action="store_true",
104
- help="Output as JSON (default is human-readable)",
105
- )
106
-
107
- # all-models: list models for all registered providers
108
- p_all_models = subparsers.add_parser(
109
- "all-models", help="List models for all registered providers"
110
- )
111
- p_all_models.add_argument(
112
- "--json",
113
- action="store_true",
114
- help="Output as JSON (default is human-readable)",
115
- )
116
-
117
- return parser
118
-
119
-
120
- def _cmd_respond(args: argparse.Namespace) -> int:
121
- client = LLMClient()
122
- provider = args.provider
123
- if not provider:
124
- # Try to hint known providers
125
- try:
126
- known = sorted(LLMClient._discover_default_providers().keys()) # type: ignore[attr-defined]
127
- except Exception:
128
- known = []
129
- hint = f" ({'/'.join(known)})" if known else ""
130
- provider = input(f"Provider{hint}: ").strip()
131
- if not provider:
132
- print("Error: provider is required.", file=sys.stderr)
133
- return 2
134
-
135
- api_key = _env_api_key(provider)
136
-
137
- model = args.model
138
- if not model:
139
- model = input("Model id: ").strip()
140
- if not model:
141
- print("Error: model is required.", file=sys.stderr)
142
- return 2
143
-
144
- prompt = args.prompt
145
- images: Sequence[str] | None = args.images
146
- if not prompt and not images:
147
- prompt = input("Prompt: ")
148
- if not prompt and not images:
149
- print("Error: provide a prompt or at least one image.", file=sys.stderr)
150
- return 2
151
- try:
152
- output = client.generate_response(
153
- provider=provider,
154
- api_key=api_key,
155
- prompt=prompt,
156
- model=model,
157
- max_tokens=args.max_tokens,
158
- reasoning_effort=args.reasoning_effort,
159
- images=images,
160
- )
161
- except Exception as exc: # pragma: no cover - CLI surface
162
- print(f"Error: {exc}", file=sys.stderr)
163
- return 2
164
-
165
- print(output)
166
- return 0
167
-
168
-
169
- def _cmd_models(args: argparse.Namespace) -> int:
170
- client = LLMClient()
171
- provider = args.provider
172
- if not provider:
173
- try:
174
- known = sorted(LLMClient._discover_default_providers().keys()) # type: ignore[attr-defined]
175
- except Exception:
176
- known = []
177
- hint = f" ({'/'.join(known)})" if known else ""
178
- provider = input(f"Provider{hint}: ").strip()
179
- if not provider:
180
- print("Error: provider is required.", file=sys.stderr)
181
- return 2
182
-
183
- api_key = _env_api_key(provider)
184
- try:
185
- models = client.list_models(provider=provider, api_key=api_key)
186
- except Exception as exc: # pragma: no cover - CLI surface
187
- print(f"Error: {exc}", file=sys.stderr)
188
- return 2
189
-
190
- if args.json:
191
- print(json.dumps(models, indent=2))
192
- else:
193
- if not models:
194
- print("No models found.")
195
- else:
196
- for m in models:
197
- mid = m.get("id") or "<unknown>"
198
- name = m.get("display_name") or ""
199
- if name:
200
- print(f"{mid} - {name}")
201
- else:
202
- print(mid)
203
- return 0
204
-
205
-
206
- def _env_api_key_with_fallbacks(provider: str) -> tuple[str | None, list[str]]:
207
- """Return the API key for a provider, trying common env var fallbacks.
208
-
209
- Returns a tuple of (api_key_or_none, tried_env_names).
210
- """
211
- name = provider.upper()
212
- # Default convention first
213
- env_names: list[str] = [f"{name}_API_KEY"]
214
-
215
- # Provider-specific fallbacks commonly used by SDKs/docs
216
- if provider.lower() == "gemini":
217
- env_names.extend(["GOOGLE_API_KEY"]) # google-genai default
218
- elif provider.lower() in {"grok", "xai"}:
219
- env_names.extend(["XAI_API_KEY", "GROK_API_KEY"]) # prefer XAI
220
- elif provider.lower() == "openai":
221
- # OPENAI_API_KEY already covered by default
222
- pass
223
- elif provider.lower() == "anthropic":
224
- # ANTHROPIC_API_KEY already covered by default
225
- pass
226
-
227
- for env_name in env_names:
228
- val = os.environ.get(env_name)
229
- if val:
230
- return val, env_names
231
-
232
- return None, env_names
233
-
234
-
235
- def _cmd_all_models(args: argparse.Namespace) -> int:
236
- client = LLMClient()
237
-
238
- # Group provider names by underlying client instance to avoid duplicates
239
- by_client: dict[int, dict[str, object]] = {}
240
- for name, prov_client in getattr(client, "_providers", {}).items(): # type: ignore[attr-defined]
241
- key = id(prov_client)
242
- group = by_client.setdefault(key, {"names": [], "client": prov_client})
243
- names = group["names"] # type: ignore[assignment]
244
- assert isinstance(names, list)
245
- names.append(name)
246
-
247
- results: list[dict[str, object]] = []
248
- for entry in by_client.values():
249
- names = sorted(entry["names"]) # type: ignore[index]
250
- primary = names[0]
251
- display_name = "/".join(names)
252
-
253
- api_key, tried = _env_api_key_with_fallbacks(primary)
254
- if not api_key:
255
- results.append(
256
- {
257
- "provider": display_name,
258
- "error": f"missing API key (tried: {', '.join(tried)})",
259
- "models": [],
260
- }
261
- )
262
- continue
263
-
264
- try:
265
- models = client.list_models(provider=primary, api_key=api_key)
266
- except Exception as exc: # pragma: no cover - CLI surface
267
- results.append(
268
- {
269
- "provider": display_name,
270
- "error": str(exc),
271
- "models": [],
272
- }
273
- )
274
- continue
275
-
276
- results.append({"provider": display_name, "models": models})
277
-
278
- if args.json:
279
- print(json.dumps(results, indent=2))
280
- else:
281
- if not results:
282
- print("No providers registered.")
283
- return 0
284
-
285
- for item in results:
286
- provider_label = str(item.get("provider", "<unknown>"))
287
- print(f"== {provider_label} ==")
288
- if item.get("error"):
289
- print(f" Skipped: {item['error']}")
290
- continue
291
- models = item.get("models") or []
292
- if not models:
293
- print(" No models found.")
294
- continue
295
- for m in models: # type: ignore[assignment]
296
- if not isinstance(m, dict):
297
- print(f" {m}")
298
- continue
299
- mid = m.get("id") or "<unknown>"
300
- name = m.get("display_name") or ""
301
- if name:
302
- print(f" {mid} - {name}")
303
- else:
304
- print(f" {mid}")
305
-
306
- return 0
307
-
308
-
309
- def main(argv: Sequence[str] | None = None) -> int:
310
- parser = _build_parser()
311
- args = parser.parse_args(argv)
312
-
313
- if args.command == "respond":
314
- return _cmd_respond(args)
315
- if args.command == "models":
316
- return _cmd_models(args)
317
- if args.command == "all-models":
318
- return _cmd_all_models(args)
319
-
320
- parser.error("unknown command")
321
- return 2
322
-
323
-
324
- if __name__ == "__main__":
325
- raise SystemExit(main())
1
+ """Command-line client for llmconnector.
2
+
3
+ Reads the API key from an environment variable derived from the provider name
4
+ ("{PROVIDER}_API_KEY", e.g. OPENAI_API_KEY) and exposes
5
+ simple commands to generate a response or list available models for a
6
+ provider.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import argparse
12
+ import json
13
+ import os
14
+ import sys
15
+ from typing import Sequence
16
+
17
+ # Support both package execution and direct file execution.
18
+ # When run as a script (no package), add the parent of this file's directory to sys.path
19
+ # so that `import llmconnector` resolves.
20
+ try:
21
+ if __package__ in (None, ""):
22
+ # Running as a script: add repo/src to path
23
+ _here = os.path.dirname(os.path.abspath(__file__))
24
+ _pkg_root = os.path.dirname(_here)
25
+ if _pkg_root not in sys.path:
26
+ sys.path.insert(0, _pkg_root)
27
+ from llmconnector.client import LLMClient # type: ignore
28
+ else:
29
+ from .client import LLMClient
30
+ except Exception: # pragma: no cover - defensive import fallback
31
+ # Last-resort fallback if the above logic fails in unusual environments
32
+ from llmconnector.client import LLMClient # type: ignore
33
+
34
+
35
+ def _env_api_key(provider: str) -> str:
36
+ env_name = f"{provider.upper()}_API_KEY"
37
+ api_key = os.environ.get(env_name, "")
38
+ if not api_key:
39
+ raise SystemExit(
40
+ f"Missing {env_name} environment variable. Set it before running."
41
+ )
42
+ return api_key
43
+
44
+
45
+ def _build_parser() -> argparse.ArgumentParser:
46
+ parser = argparse.ArgumentParser(
47
+ prog="client_cli",
48
+ description="CLI for provider-agnostic LLM requests",
49
+ )
50
+
51
+ subparsers = parser.add_subparsers(dest="command", required=True)
52
+
53
+ # respond: generate a model response
54
+ p_respond = subparsers.add_parser(
55
+ "respond", help="Generate a response from a provider model"
56
+ )
57
+ p_respond.add_argument(
58
+ "--provider",
59
+ required=False,
60
+ help="Provider to use (e.g. openai, gemini, anthropic, grok). If omitted, you will be prompted.",
61
+ )
62
+ p_respond.add_argument(
63
+ "--model",
64
+ required=False,
65
+ help="Model identifier for the provider (e.g. gpt-4o). If omitted, you will be prompted.",
66
+ )
67
+ p_respond.add_argument(
68
+ "--prompt",
69
+ default="",
70
+ help="Text prompt to send (omit if only using images)",
71
+ )
72
+ p_respond.add_argument(
73
+ "--image",
74
+ action="append",
75
+ dest="images",
76
+ default=None,
77
+ help="Image path or URL; may be provided multiple times",
78
+ )
79
+ p_respond.add_argument(
80
+ "--max-tokens",
81
+ type=int,
82
+ default=32000,
83
+ help="Maximum output tokens (provider-specific meaning)",
84
+ )
85
+ p_respond.add_argument(
86
+ "--reasoning-effort",
87
+ choices=["low", "medium", "high"],
88
+ default=None,
89
+ help="Optional reasoning effort hint if supported",
90
+ )
91
+
92
+ # models: list available models
93
+ p_models = subparsers.add_parser(
94
+ "models", help="List models available to the provider"
95
+ )
96
+ p_models.add_argument(
97
+ "--provider",
98
+ required=False,
99
+ help="Provider to query (e.g. openai, gemini, anthropic, grok). If omitted, you will be prompted.",
100
+ )
101
+ p_models.add_argument(
102
+ "--json",
103
+ action="store_true",
104
+ help="Output as JSON (default is human-readable)",
105
+ )
106
+
107
+ # all-models: list models for all registered providers
108
+ p_all_models = subparsers.add_parser(
109
+ "all-models", help="List models for all registered providers"
110
+ )
111
+ p_all_models.add_argument(
112
+ "--json",
113
+ action="store_true",
114
+ help="Output as JSON (default is human-readable)",
115
+ )
116
+
117
+ return parser
118
+
119
+
120
+ def _cmd_respond(args: argparse.Namespace) -> int:
121
+ client = LLMClient()
122
+ provider = args.provider
123
+ if not provider:
124
+ # Try to hint known providers
125
+ try:
126
+ known = sorted(LLMClient._discover_default_providers().keys()) # type: ignore[attr-defined]
127
+ except Exception:
128
+ known = []
129
+ hint = f" ({'/'.join(known)})" if known else ""
130
+ provider = input(f"Provider{hint}: ").strip()
131
+ if not provider:
132
+ print("Error: provider is required.", file=sys.stderr)
133
+ return 2
134
+
135
+ api_key = _env_api_key(provider)
136
+
137
+ model = args.model
138
+ if not model:
139
+ model = input("Model id: ").strip()
140
+ if not model:
141
+ print("Error: model is required.", file=sys.stderr)
142
+ return 2
143
+
144
+ prompt = args.prompt
145
+ images: Sequence[str] | None = args.images
146
+ if not prompt and not images:
147
+ prompt = input("Prompt: ")
148
+ if not prompt and not images:
149
+ print("Error: provide a prompt or at least one image.", file=sys.stderr)
150
+ return 2
151
+ try:
152
+ output = client.generate_response(
153
+ provider=provider,
154
+ api_key=api_key,
155
+ prompt=prompt,
156
+ model=model,
157
+ max_tokens=args.max_tokens,
158
+ reasoning_effort=args.reasoning_effort,
159
+ images=images,
160
+ )
161
+ except Exception as exc: # pragma: no cover - CLI surface
162
+ print(f"Error: {exc}", file=sys.stderr)
163
+ return 2
164
+
165
+ print(output)
166
+ return 0
167
+
168
+
169
+ def _cmd_models(args: argparse.Namespace) -> int:
170
+ client = LLMClient()
171
+ provider = args.provider
172
+ if not provider:
173
+ try:
174
+ known = sorted(LLMClient._discover_default_providers().keys()) # type: ignore[attr-defined]
175
+ except Exception:
176
+ known = []
177
+ hint = f" ({'/'.join(known)})" if known else ""
178
+ provider = input(f"Provider{hint}: ").strip()
179
+ if not provider:
180
+ print("Error: provider is required.", file=sys.stderr)
181
+ return 2
182
+
183
+ api_key = _env_api_key(provider)
184
+ try:
185
+ models = client.list_models(provider=provider, api_key=api_key)
186
+ except Exception as exc: # pragma: no cover - CLI surface
187
+ print(f"Error: {exc}", file=sys.stderr)
188
+ return 2
189
+
190
+ if args.json:
191
+ print(json.dumps(models, indent=2))
192
+ else:
193
+ if not models:
194
+ print("No models found.")
195
+ else:
196
+ for m in models:
197
+ mid = m.get("id") or "<unknown>"
198
+ name = m.get("display_name") or ""
199
+ if name:
200
+ print(f"{mid} - {name}")
201
+ else:
202
+ print(mid)
203
+ return 0
204
+
205
+
206
+ def _env_api_key_with_fallbacks(provider: str) -> tuple[str | None, list[str]]:
207
+ """Return the API key for a provider, trying common env var fallbacks.
208
+
209
+ Returns a tuple of (api_key_or_none, tried_env_names).
210
+ """
211
+ name = provider.upper()
212
+ # Default convention first
213
+ env_names: list[str] = [f"{name}_API_KEY"]
214
+
215
+ # Provider-specific fallbacks commonly used by SDKs/docs
216
+ if provider.lower() == "gemini":
217
+ env_names.extend(["GOOGLE_API_KEY"]) # google-genai default
218
+ elif provider.lower() in {"grok", "xai"}:
219
+ env_names.extend(["XAI_API_KEY", "GROK_API_KEY"]) # prefer XAI
220
+ elif provider.lower() == "openai":
221
+ # OPENAI_API_KEY already covered by default
222
+ pass
223
+ elif provider.lower() == "anthropic":
224
+ # ANTHROPIC_API_KEY already covered by default
225
+ pass
226
+
227
+ for env_name in env_names:
228
+ val = os.environ.get(env_name)
229
+ if val:
230
+ return val, env_names
231
+
232
+ return None, env_names
233
+
234
+
235
+ def _cmd_all_models(args: argparse.Namespace) -> int:
236
+ client = LLMClient()
237
+
238
+ # Group provider names by underlying client instance to avoid duplicates
239
+ by_client: dict[int, dict[str, object]] = {}
240
+ for name, prov_client in getattr(client, "_providers", {}).items(): # type: ignore[attr-defined]
241
+ key = id(prov_client)
242
+ group = by_client.setdefault(key, {"names": [], "client": prov_client})
243
+ names = group["names"] # type: ignore[assignment]
244
+ assert isinstance(names, list)
245
+ names.append(name)
246
+
247
+ results: list[dict[str, object]] = []
248
+ for entry in by_client.values():
249
+ names = sorted(entry["names"]) # type: ignore[index]
250
+ primary = names[0]
251
+ display_name = "/".join(names)
252
+
253
+ api_key, tried = _env_api_key_with_fallbacks(primary)
254
+ if not api_key:
255
+ results.append(
256
+ {
257
+ "provider": display_name,
258
+ "error": f"missing API key (tried: {', '.join(tried)})",
259
+ "models": [],
260
+ }
261
+ )
262
+ continue
263
+
264
+ try:
265
+ models = client.list_models(provider=primary, api_key=api_key)
266
+ except Exception as exc: # pragma: no cover - CLI surface
267
+ results.append(
268
+ {
269
+ "provider": display_name,
270
+ "error": str(exc),
271
+ "models": [],
272
+ }
273
+ )
274
+ continue
275
+
276
+ results.append({"provider": display_name, "models": models})
277
+
278
+ if args.json:
279
+ print(json.dumps(results, indent=2))
280
+ else:
281
+ if not results:
282
+ print("No providers registered.")
283
+ return 0
284
+
285
+ for item in results:
286
+ provider_label = str(item.get("provider", "<unknown>"))
287
+ print(f"== {provider_label} ==")
288
+ if item.get("error"):
289
+ print(f" Skipped: {item['error']}")
290
+ continue
291
+ models = item.get("models") or []
292
+ if not models:
293
+ print(" No models found.")
294
+ continue
295
+ for m in models: # type: ignore[assignment]
296
+ if not isinstance(m, dict):
297
+ print(f" {m}")
298
+ continue
299
+ mid = m.get("id") or "<unknown>"
300
+ name = m.get("display_name") or ""
301
+ if name:
302
+ print(f" {mid} - {name}")
303
+ else:
304
+ print(f" {mid}")
305
+
306
+ return 0
307
+
308
+
309
+ def main(argv: Sequence[str] | None = None) -> int:
310
+ parser = _build_parser()
311
+ args = parser.parse_args(argv)
312
+
313
+ if args.command == "respond":
314
+ return _cmd_respond(args)
315
+ if args.command == "models":
316
+ return _cmd_models(args)
317
+ if args.command == "all-models":
318
+ return _cmd_all_models(args)
319
+
320
+ parser.error("unknown command")
321
+ return 2
322
+
323
+
324
+ if __name__ == "__main__":
325
+ raise SystemExit(main())