ccs-llmconnector 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccs_llmconnector-1.0.0.dist-info/METADATA +349 -0
- ccs_llmconnector-1.0.0.dist-info/RECORD +14 -0
- ccs_llmconnector-1.0.0.dist-info/WHEEL +5 -0
- ccs_llmconnector-1.0.0.dist-info/entry_points.txt +2 -0
- ccs_llmconnector-1.0.0.dist-info/licenses/LICENSE +22 -0
- ccs_llmconnector-1.0.0.dist-info/top_level.txt +1 -0
- llmconnector/__init__.py +39 -0
- llmconnector/anthropic_client.py +190 -0
- llmconnector/client.py +148 -0
- llmconnector/client_cli.py +325 -0
- llmconnector/gemini_client.py +191 -0
- llmconnector/grok_client.py +139 -0
- llmconnector/openai_client.py +139 -0
- llmconnector/py.typed +1 -0
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""Thin wrapper around the xAI Grok chat API via the xai-sdk package."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import mimetypes
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional, Sequence, Union
|
|
9
|
+
|
|
10
|
+
from xai_sdk import Client
|
|
11
|
+
from xai_sdk.chat import image as chat_image
|
|
12
|
+
from xai_sdk.chat import user
|
|
13
|
+
|
|
14
|
+
ImageInput = Union[str, Path]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class GrokClient:
|
|
18
|
+
"""Convenience wrapper around the xAI Grok chat API."""
|
|
19
|
+
|
|
20
|
+
def generate_response(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
api_key: str,
|
|
24
|
+
prompt: str,
|
|
25
|
+
model: str,
|
|
26
|
+
max_tokens: int = 32000,
|
|
27
|
+
reasoning_effort: Optional[str] = None,
|
|
28
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
29
|
+
) -> str:
|
|
30
|
+
"""Generate a response from the specified Grok model.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
api_key: API key used to authenticate with xAI.
|
|
34
|
+
prompt: Natural-language instruction or query for the model.
|
|
35
|
+
model: Identifier of the Grok model to target (for example, ``"grok-3"``).
|
|
36
|
+
max_tokens: Cap for tokens in the generated response, defaults to 32000.
|
|
37
|
+
reasoning_effort: Optional hint for reasoning-focused models (``"low"`` or ``"high"``).
|
|
38
|
+
images: Optional collection of image references (local paths, URLs, or data URLs).
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
The text output produced by the model.
|
|
42
|
+
|
|
43
|
+
Raises:
|
|
44
|
+
ValueError: If required arguments are missing or the request payload is empty.
|
|
45
|
+
RuntimeError: If the Grok response does not contain any textual content.
|
|
46
|
+
"""
|
|
47
|
+
if not api_key:
|
|
48
|
+
raise ValueError("api_key must be provided.")
|
|
49
|
+
if not prompt and not images:
|
|
50
|
+
raise ValueError("At least one of prompt or images must be provided.")
|
|
51
|
+
if not model:
|
|
52
|
+
raise ValueError("model must be provided.")
|
|
53
|
+
|
|
54
|
+
message_parts = []
|
|
55
|
+
if prompt:
|
|
56
|
+
message_parts.append(prompt)
|
|
57
|
+
|
|
58
|
+
if images:
|
|
59
|
+
for image in images:
|
|
60
|
+
message_parts.append(chat_image(self._to_image_url(image)))
|
|
61
|
+
|
|
62
|
+
if not message_parts:
|
|
63
|
+
raise ValueError("No content provided for response generation.")
|
|
64
|
+
|
|
65
|
+
grok_client = Client(api_key=api_key)
|
|
66
|
+
|
|
67
|
+
create_kwargs = {
|
|
68
|
+
"model": model,
|
|
69
|
+
"max_tokens": max_tokens,
|
|
70
|
+
"messages": [user(*message_parts)],
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
normalized_effort = (reasoning_effort or "").strip().lower()
|
|
74
|
+
if normalized_effort in {"low", "high"}:
|
|
75
|
+
create_kwargs["reasoning_effort"] = normalized_effort
|
|
76
|
+
|
|
77
|
+
chat = grok_client.chat.create(**create_kwargs)
|
|
78
|
+
response = chat.sample()
|
|
79
|
+
|
|
80
|
+
content = getattr(response, "content", None)
|
|
81
|
+
if content:
|
|
82
|
+
return content
|
|
83
|
+
|
|
84
|
+
reasoning_content = getattr(response, "reasoning_content", None)
|
|
85
|
+
if reasoning_content:
|
|
86
|
+
return reasoning_content
|
|
87
|
+
|
|
88
|
+
raise RuntimeError("xAI response did not include any text output.")
|
|
89
|
+
|
|
90
|
+
def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
|
|
91
|
+
"""Return the Grok language models available to the authenticated account."""
|
|
92
|
+
if not api_key:
|
|
93
|
+
raise ValueError("api_key must be provided.")
|
|
94
|
+
|
|
95
|
+
grok_client = Client(api_key=api_key)
|
|
96
|
+
models: list[dict[str, Optional[str]]] = []
|
|
97
|
+
|
|
98
|
+
for model in grok_client.models.list_language_models():
|
|
99
|
+
model_id = getattr(model, "name", None)
|
|
100
|
+
if not model_id:
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
aliases = getattr(model, "aliases", None)
|
|
104
|
+
display_name: Optional[str] = None
|
|
105
|
+
if aliases:
|
|
106
|
+
try:
|
|
107
|
+
display_name = next(iter(aliases)) or None
|
|
108
|
+
except (StopIteration, TypeError):
|
|
109
|
+
display_name = None
|
|
110
|
+
|
|
111
|
+
models.append(
|
|
112
|
+
{
|
|
113
|
+
"id": model_id,
|
|
114
|
+
"display_name": display_name,
|
|
115
|
+
}
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
return models
|
|
119
|
+
|
|
120
|
+
@staticmethod
|
|
121
|
+
def _to_image_url(image: ImageInput) -> str:
|
|
122
|
+
"""Convert an image reference into a URL or data URL suitable for the xAI SDK."""
|
|
123
|
+
if isinstance(image, Path):
|
|
124
|
+
return _encode_image_path(image)
|
|
125
|
+
|
|
126
|
+
if image.startswith(("http://", "https://", "data:")):
|
|
127
|
+
return image
|
|
128
|
+
|
|
129
|
+
return _encode_image_path(Path(image))
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _encode_image_path(path: Path) -> str:
|
|
133
|
+
"""Generate a data URL for the provided image path."""
|
|
134
|
+
expanded = path.expanduser()
|
|
135
|
+
data = expanded.read_bytes()
|
|
136
|
+
mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
|
|
137
|
+
encoded = base64.b64encode(data).decode("utf-8")
|
|
138
|
+
return f"data:{mime_type};base64,{encoded}"
|
|
139
|
+
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""Thin wrapper around the OpenAI Responses API."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import mimetypes
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional, Sequence, Union
|
|
9
|
+
|
|
10
|
+
from openai import OpenAI
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from openai import APIError as OpenAIError # type: ignore
|
|
14
|
+
except ImportError: # pragma: no cover - fallback for older SDKs
|
|
15
|
+
from openai.error import OpenAIError # type: ignore
|
|
16
|
+
|
|
17
|
+
ImageInput = Union[str, Path]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class OpenAIResponsesClient:
|
|
21
|
+
"""Convenience wrapper around the OpenAI Responses API."""
|
|
22
|
+
|
|
23
|
+
def generate_response(
|
|
24
|
+
self,
|
|
25
|
+
*,
|
|
26
|
+
api_key: str,
|
|
27
|
+
prompt: str,
|
|
28
|
+
model: str,
|
|
29
|
+
max_tokens: int = 32000,
|
|
30
|
+
reasoning_effort: Optional[str] = None,
|
|
31
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
32
|
+
) -> str:
|
|
33
|
+
"""Generate a response from the specified model.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
api_key: Secret key used to authenticate with OpenAI.
|
|
37
|
+
prompt: Natural-language instruction or query for the model.
|
|
38
|
+
model: Identifier of the OpenAI model to target (for example, ``"gpt-4o"``).
|
|
39
|
+
max_tokens: Cap for tokens across the entire exchange, defaults to 32000.
|
|
40
|
+
reasoning_effort: Optional reasoning effort hint (``"low"``, ``"medium"``, or ``"high"``).
|
|
41
|
+
images: Optional collection of image references (local paths or URLs).
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
The text output produced by the model.
|
|
45
|
+
|
|
46
|
+
Raises:
|
|
47
|
+
ValueError: If required arguments are missing or the request payload is empty.
|
|
48
|
+
OpenAIError: If the underlying OpenAI request fails.
|
|
49
|
+
"""
|
|
50
|
+
if not api_key:
|
|
51
|
+
raise ValueError("api_key must be provided.")
|
|
52
|
+
if not prompt and not images:
|
|
53
|
+
raise ValueError("At least one of prompt or images must be provided.")
|
|
54
|
+
if not model:
|
|
55
|
+
raise ValueError("model must be provided.")
|
|
56
|
+
|
|
57
|
+
client = OpenAI(api_key=api_key)
|
|
58
|
+
|
|
59
|
+
content_blocks = []
|
|
60
|
+
if prompt:
|
|
61
|
+
content_blocks.append({"type": "input_text", "text": prompt})
|
|
62
|
+
|
|
63
|
+
if images:
|
|
64
|
+
for image in images:
|
|
65
|
+
content_blocks.append(self._to_image_block(image))
|
|
66
|
+
|
|
67
|
+
if not content_blocks:
|
|
68
|
+
raise ValueError("No content provided for response generation.")
|
|
69
|
+
|
|
70
|
+
request_payload = {
|
|
71
|
+
"model": model,
|
|
72
|
+
"input": [
|
|
73
|
+
{
|
|
74
|
+
"role": "user",
|
|
75
|
+
"content": content_blocks,
|
|
76
|
+
}
|
|
77
|
+
],
|
|
78
|
+
"max_output_tokens": max_tokens,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
if reasoning_effort:
|
|
82
|
+
request_payload["reasoning"] = {"effort": reasoning_effort}
|
|
83
|
+
|
|
84
|
+
response = client.responses.create(**request_payload)
|
|
85
|
+
|
|
86
|
+
return response.output_text
|
|
87
|
+
|
|
88
|
+
@staticmethod
|
|
89
|
+
def _to_image_block(image: ImageInput) -> dict:
|
|
90
|
+
"""Convert an image reference into an OpenAI Responses API block."""
|
|
91
|
+
if isinstance(image, Path):
|
|
92
|
+
return {
|
|
93
|
+
"type": "input_image",
|
|
94
|
+
"image_url": _encode_image_path(image),
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
if image.startswith(("http://", "https://", "data:")):
|
|
98
|
+
return {
|
|
99
|
+
"type": "input_image",
|
|
100
|
+
"image_url": image,
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return {
|
|
104
|
+
"type": "input_image",
|
|
105
|
+
"image_url": _encode_image_path(Path(image)),
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
|
|
109
|
+
"""Return the models available to the authenticated OpenAI account."""
|
|
110
|
+
if not api_key:
|
|
111
|
+
raise ValueError("api_key must be provided.")
|
|
112
|
+
|
|
113
|
+
client = OpenAI(api_key=api_key)
|
|
114
|
+
response = client.models.list()
|
|
115
|
+
data = getattr(response, "data", []) or []
|
|
116
|
+
|
|
117
|
+
models: list[dict[str, Optional[str]]] = []
|
|
118
|
+
for model in data:
|
|
119
|
+
model_id = getattr(model, "id", None)
|
|
120
|
+
if model_id is None and isinstance(model, dict):
|
|
121
|
+
model_id = model.get("id")
|
|
122
|
+
if not model_id:
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
display_name = getattr(model, "display_name", None)
|
|
126
|
+
if display_name is None and isinstance(model, dict):
|
|
127
|
+
display_name = model.get("display_name")
|
|
128
|
+
|
|
129
|
+
models.append({"id": model_id, "display_name": display_name})
|
|
130
|
+
|
|
131
|
+
return models
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _encode_image_path(path: Path) -> str:
|
|
135
|
+
"""Generate a data URL for the provided image path."""
|
|
136
|
+
data = path.read_bytes()
|
|
137
|
+
encoded = base64.b64encode(data).decode("utf-8")
|
|
138
|
+
mime_type = mimetypes.guess_type(path.name)[0] or "image/png"
|
|
139
|
+
return f"data:{mime_type};base64,{encoded}"
|
llmconnector/py.typed
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|