ccs-llmconnector 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,170 +1,186 @@
1
- """Thin wrapper around the xAI Grok chat API via the xai-sdk package."""
2
-
3
- from __future__ import annotations
4
-
5
- import base64
6
- import mimetypes
7
- from pathlib import Path
8
- import logging
9
- from typing import Optional, Sequence, Union
10
-
11
- from xai_sdk import Client
12
- from xai_sdk.chat import image as chat_image
13
- from xai_sdk.chat import user
14
-
15
- ImageInput = Union[str, Path]
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- class GrokClient:
20
- """Convenience wrapper around the xAI Grok chat API."""
21
-
22
- def generate_response(
23
- self,
24
- *,
25
- api_key: str,
26
- prompt: str,
27
- model: str,
28
- max_tokens: int = 32000,
29
- reasoning_effort: Optional[str] = None,
30
- images: Optional[Sequence[ImageInput]] = None,
31
- ) -> str:
32
- """Generate a response from the specified Grok model.
33
-
34
- Args:
35
- api_key: API key used to authenticate with xAI.
36
- prompt: Natural-language instruction or query for the model.
37
- model: Identifier of the Grok model to target (for example, ``"grok-3"``).
38
- max_tokens: Cap for tokens in the generated response, defaults to 32000.
39
- reasoning_effort: Optional hint for reasoning-focused models (``"low"`` or ``"high"``).
40
- images: Optional collection of image references (local paths, URLs, or data URLs).
41
-
42
- Returns:
43
- The text output produced by the model.
44
-
45
- Raises:
46
- ValueError: If required arguments are missing or the request payload is empty.
47
- RuntimeError: If the Grok response does not contain any textual content.
48
- """
49
- if not api_key:
50
- raise ValueError("api_key must be provided.")
51
- if not prompt and not images:
52
- raise ValueError("At least one of prompt or images must be provided.")
53
- if not model:
54
- raise ValueError("model must be provided.")
55
-
56
- message_parts = []
57
- if prompt:
58
- message_parts.append(prompt)
59
-
60
- if images:
61
- for image in images:
62
- message_parts.append(chat_image(self._to_image_url(image)))
63
-
64
- if not message_parts:
65
- raise ValueError("No content provided for response generation.")
66
-
67
- grok_client = Client(api_key=api_key)
68
-
69
- create_kwargs = {
70
- "model": model,
71
- "max_tokens": max_tokens,
72
- "messages": [user(*message_parts)],
73
- }
74
-
75
- normalized_effort = (reasoning_effort or "").strip().lower()
76
- if normalized_effort in {"low", "high"}:
77
- create_kwargs["reasoning_effort"] = normalized_effort
78
-
79
- try:
80
- chat = grok_client.chat.create(**create_kwargs)
81
- response = chat.sample()
82
- except Exception as exc:
83
- logger.exception("xAI Grok chat request failed: %s", exc)
84
- raise
85
-
86
- content = getattr(response, "content", None)
87
- if content:
88
- logger.info(
89
- "xAI chat succeeded: model=%s images=%d text_len=%d",
90
- model,
91
- len(images or []),
92
- len(content or ""),
93
- )
94
- return content
95
-
96
- reasoning_content = getattr(response, "reasoning_content", None)
97
- if reasoning_content:
98
- logger.info(
99
- "xAI chat succeeded (reasoning): model=%s images=%d text_len=%d",
100
- model,
101
- len(images or []),
102
- len(reasoning_content or ""),
103
- )
104
- return reasoning_content
105
-
106
- # Treat successful calls without textual content as a successful, empty response
107
- # rather than raising. This aligns with callers that handle empty outputs gracefully.
108
- logger.info(
109
- "xAI chat succeeded with no text: model=%s images=%d",
110
- model,
111
- len(images or []),
112
- )
113
- return ""
114
-
115
- def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
116
- """Return the Grok language models available to the authenticated account."""
117
- if not api_key:
118
- raise ValueError("api_key must be provided.")
119
-
120
- grok_client = Client(api_key=api_key)
121
- models: list[dict[str, Optional[str]]] = []
122
-
123
- try:
124
- iterator = grok_client.models.list_language_models()
125
- except Exception as exc:
126
- logger.exception("xAI list language models failed: %s", exc)
127
- raise
128
-
129
- for model in iterator:
130
- model_id = getattr(model, "name", None)
131
- if not model_id:
132
- continue
133
-
134
- aliases = getattr(model, "aliases", None)
135
- display_name: Optional[str] = None
136
- if aliases:
137
- try:
138
- display_name = next(iter(aliases)) or None
139
- except (StopIteration, TypeError):
140
- display_name = None
141
-
142
- models.append(
143
- {
144
- "id": model_id,
145
- "display_name": display_name,
146
- }
147
- )
148
-
149
- logger.info("xAI list_language_models succeeded: count=%d", len(models))
150
- return models
151
-
152
- @staticmethod
153
- def _to_image_url(image: ImageInput) -> str:
154
- """Convert an image reference into a URL or data URL suitable for the xAI SDK."""
155
- if isinstance(image, Path):
156
- return _encode_image_path(image)
157
-
158
- if image.startswith(("http://", "https://", "data:")):
159
- return image
160
-
161
- return _encode_image_path(Path(image))
162
-
163
-
164
- def _encode_image_path(path: Path) -> str:
165
- """Generate a data URL for the provided image path."""
166
- expanded = path.expanduser()
167
- data = expanded.read_bytes()
168
- mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
169
- encoded = base64.b64encode(data).decode("utf-8")
170
- return f"data:{mime_type};base64,{encoded}"
1
+ """Thin wrapper around the xAI Grok chat API via the xai-sdk package."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import base64
6
+ import mimetypes
7
+ from pathlib import Path
8
+ import logging
9
+ from typing import Optional, Sequence, Union
10
+
11
+ from xai_sdk import Client
12
+ from xai_sdk.chat import image as chat_image
13
+ from xai_sdk.chat import user
14
+
15
+ ImageInput = Union[str, Path]
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class GrokClient:
20
+ """Convenience wrapper around the xAI Grok chat API."""
21
+
22
+ def generate_response(
23
+ self,
24
+ *,
25
+ api_key: str,
26
+ prompt: str,
27
+ model: str,
28
+ max_tokens: int = 32000,
29
+ reasoning_effort: Optional[str] = None,
30
+ images: Optional[Sequence[ImageInput]] = None,
31
+ ) -> str:
32
+ """Generate a response from the specified Grok model.
33
+
34
+ Args:
35
+ api_key: API key used to authenticate with xAI.
36
+ prompt: Natural-language instruction or query for the model.
37
+ model: Identifier of the Grok model to target (for example, ``"grok-3"``).
38
+ max_tokens: Cap for tokens in the generated response, defaults to 32000.
39
+ reasoning_effort: Optional hint for reasoning-focused models (``"low"`` or ``"high"``).
40
+ images: Optional collection of image references (local paths, URLs, or data URLs).
41
+
42
+ Returns:
43
+ The text output produced by the model.
44
+
45
+ Raises:
46
+ ValueError: If required arguments are missing or the request payload is empty.
47
+ RuntimeError: If the Grok response does not contain any textual content.
48
+ """
49
+ if not api_key:
50
+ raise ValueError("api_key must be provided.")
51
+ if not prompt and not images:
52
+ raise ValueError("At least one of prompt or images must be provided.")
53
+ if not model:
54
+ raise ValueError("model must be provided.")
55
+
56
+ message_parts = []
57
+ if prompt:
58
+ message_parts.append(prompt)
59
+
60
+ if images:
61
+ for image in images:
62
+ message_parts.append(chat_image(self._to_image_url(image)))
63
+
64
+ if not message_parts:
65
+ raise ValueError("No content provided for response generation.")
66
+
67
+ grok_client = Client(api_key=api_key)
68
+
69
+ create_kwargs = {
70
+ "model": model,
71
+ "max_tokens": max_tokens,
72
+ "messages": [user(*message_parts)],
73
+ }
74
+
75
+ normalized_effort = (reasoning_effort or "").strip().lower()
76
+ if normalized_effort in {"low", "high"}:
77
+ create_kwargs["reasoning_effort"] = normalized_effort
78
+
79
+ try:
80
+ chat = grok_client.chat.create(**create_kwargs)
81
+ response = chat.sample()
82
+ except Exception as exc:
83
+ logger.exception("xAI Grok chat request failed: %s", exc)
84
+ raise
85
+
86
+ content = getattr(response, "content", None)
87
+ if content:
88
+ logger.info(
89
+ "xAI chat succeeded: model=%s images=%d text_len=%d",
90
+ model,
91
+ len(images or []),
92
+ len(content or ""),
93
+ )
94
+ return content
95
+
96
+ reasoning_content = getattr(response, "reasoning_content", None)
97
+ if reasoning_content:
98
+ logger.info(
99
+ "xAI chat succeeded (reasoning): model=%s images=%d text_len=%d",
100
+ model,
101
+ len(images or []),
102
+ len(reasoning_content or ""),
103
+ )
104
+ return reasoning_content
105
+
106
+ # Treat successful calls without textual content as a successful, empty response
107
+ # rather than raising. This aligns with callers that handle empty outputs gracefully.
108
+ logger.info(
109
+ "xAI chat succeeded with no text: model=%s images=%d",
110
+ model,
111
+ len(images or []),
112
+ )
113
+ return ""
114
+
115
+ def generate_image(
116
+ self,
117
+ *,
118
+ api_key: str,
119
+ prompt: str,
120
+ model: str,
121
+ image_size: str = "2K",
122
+ image: Optional[ImageInput] = None,
123
+ ) -> bytes:
124
+ """Generate an image using the Grok API.
125
+
126
+ Raises:
127
+ NotImplementedError: This method is not yet implemented for Grok.
128
+ """
129
+ raise NotImplementedError("Image generation is not implemented for Grok.")
130
+
131
+ def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
132
+ """Return the Grok language models available to the authenticated account."""
133
+ if not api_key:
134
+ raise ValueError("api_key must be provided.")
135
+
136
+ grok_client = Client(api_key=api_key)
137
+ models: list[dict[str, Optional[str]]] = []
138
+
139
+ try:
140
+ iterator = grok_client.models.list_language_models()
141
+ except Exception as exc:
142
+ logger.exception("xAI list language models failed: %s", exc)
143
+ raise
144
+
145
+ for model in iterator:
146
+ model_id = getattr(model, "name", None)
147
+ if not model_id:
148
+ continue
149
+
150
+ aliases = getattr(model, "aliases", None)
151
+ display_name: Optional[str] = None
152
+ if aliases:
153
+ try:
154
+ display_name = next(iter(aliases)) or None
155
+ except (StopIteration, TypeError):
156
+ display_name = None
157
+
158
+ models.append(
159
+ {
160
+ "id": model_id,
161
+ "display_name": display_name,
162
+ }
163
+ )
164
+
165
+ logger.info("xAI list_language_models succeeded: count=%d", len(models))
166
+ return models
167
+
168
+ @staticmethod
169
+ def _to_image_url(image: ImageInput) -> str:
170
+ """Convert an image reference into a URL or data URL suitable for the xAI SDK."""
171
+ if isinstance(image, Path):
172
+ return _encode_image_path(image)
173
+
174
+ if image.startswith(("http://", "https://", "data:")):
175
+ return image
176
+
177
+ return _encode_image_path(Path(image))
178
+
179
+
180
+ def _encode_image_path(path: Path) -> str:
181
+ """Generate a data URL for the provided image path."""
182
+ expanded = path.expanduser()
183
+ data = expanded.read_bytes()
184
+ mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
185
+ encoded = base64.b64encode(data).decode("utf-8")
186
+ return f"data:{mime_type};base64,{encoded}"
@@ -4,26 +4,26 @@ from __future__ import annotations
4
4
 
5
5
  import base64
6
6
  import mimetypes
7
- from pathlib import Path
8
- import logging
9
- from typing import Optional, Sequence, Union
10
-
11
- from openai import OpenAI
12
-
13
- try:
14
- from openai import APIError as OpenAIError # type: ignore
15
- except ImportError: # pragma: no cover - fallback for older SDKs
16
- from openai.error import OpenAIError # type: ignore
17
-
18
- ImageInput = Union[str, Path]
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class OpenAIResponsesClient:
23
- """Convenience wrapper around the OpenAI Responses API."""
24
-
25
- def generate_response(
26
- self,
7
+ from pathlib import Path
8
+ import logging
9
+ from typing import Optional, Sequence, Union
10
+
11
+ from openai import OpenAI
12
+
13
+ try:
14
+ from openai import APIError as OpenAIError # type: ignore
15
+ except ImportError: # pragma: no cover - fallback for older SDKs
16
+ from openai.error import OpenAIError # type: ignore
17
+
18
+ ImageInput = Union[str, Path]
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class OpenAIResponsesClient:
23
+ """Convenience wrapper around the OpenAI Responses API."""
24
+
25
+ def generate_response(
26
+ self,
27
27
  *,
28
28
  api_key: str,
29
29
  prompt: str,
@@ -83,24 +83,24 @@ class OpenAIResponsesClient:
83
83
  if reasoning_effort:
84
84
  request_payload["reasoning"] = {"effort": reasoning_effort}
85
85
 
86
- try:
87
- response = client.responses.create(**request_payload)
88
- except Exception as exc: # Log and re-raise to preserve default behavior
89
- logger.exception("OpenAI Responses API request failed: %s", exc)
90
- raise
91
-
92
- output_text = response.output_text
93
- logger.info(
94
- "OpenAI generate_response succeeded: model=%s images=%d text_len=%d",
95
- model,
96
- len(images or []),
97
- len(output_text or ""),
98
- )
99
- return output_text
100
-
101
- @staticmethod
102
- def _to_image_block(image: ImageInput) -> dict:
103
- """Convert an image reference into an OpenAI Responses API block."""
86
+ try:
87
+ response = client.responses.create(**request_payload)
88
+ except Exception as exc: # Log and re-raise to preserve default behavior
89
+ logger.exception("OpenAI Responses API request failed: %s", exc)
90
+ raise
91
+
92
+ output_text = response.output_text
93
+ logger.info(
94
+ "OpenAI generate_response succeeded: model=%s images=%d text_len=%d",
95
+ model,
96
+ len(images or []),
97
+ len(output_text or ""),
98
+ )
99
+ return output_text
100
+
101
+ @staticmethod
102
+ def _to_image_block(image: ImageInput) -> dict:
103
+ """Convert an image reference into an OpenAI Responses API block."""
104
104
  if isinstance(image, Path):
105
105
  return {
106
106
  "type": "input_image",
@@ -113,41 +113,57 @@ class OpenAIResponsesClient:
113
113
  "image_url": image,
114
114
  }
115
115
 
116
- return {
117
- "type": "input_image",
118
- "image_url": _encode_image_path(Path(image)),
119
- }
120
-
121
- def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
122
- """Return the models available to the authenticated OpenAI account."""
123
- if not api_key:
124
- raise ValueError("api_key must be provided.")
125
-
126
- client = OpenAI(api_key=api_key)
127
- try:
128
- response = client.models.list()
129
- except Exception as exc:
130
- logger.exception("OpenAI list models failed: %s", exc)
131
- raise
132
- data = getattr(response, "data", []) or []
133
-
134
- models: list[dict[str, Optional[str]]] = []
135
- for model in data:
136
- model_id = getattr(model, "id", None)
137
- if model_id is None and isinstance(model, dict):
138
- model_id = model.get("id")
139
- if not model_id:
140
- continue
141
-
142
- display_name = getattr(model, "display_name", None)
143
- if display_name is None and isinstance(model, dict):
144
- display_name = model.get("display_name")
145
-
146
- models.append({"id": model_id, "display_name": display_name})
147
-
148
- logger.info("OpenAI list_models succeeded: count=%d", len(models))
149
- return models
150
-
116
+ return {
117
+ "type": "input_image",
118
+ "image_url": _encode_image_path(Path(image)),
119
+ }
120
+
121
+ def generate_image(
122
+ self,
123
+ *,
124
+ api_key: str,
125
+ prompt: str,
126
+ model: str,
127
+ image_size: str = "2K",
128
+ image: Optional[ImageInput] = None,
129
+ ) -> bytes:
130
+ """Generate an image using the OpenAI API.
131
+
132
+ Raises:
133
+ NotImplementedError: This method is not yet implemented for OpenAI.
134
+ """
135
+ raise NotImplementedError("Image generation is not implemented for OpenAI.")
136
+
137
+ def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
138
+ """Return the models available to the authenticated OpenAI account."""
139
+ if not api_key:
140
+ raise ValueError("api_key must be provided.")
141
+
142
+ client = OpenAI(api_key=api_key)
143
+ try:
144
+ response = client.models.list()
145
+ except Exception as exc:
146
+ logger.exception("OpenAI list models failed: %s", exc)
147
+ raise
148
+ data = getattr(response, "data", []) or []
149
+
150
+ models: list[dict[str, Optional[str]]] = []
151
+ for model in data:
152
+ model_id = getattr(model, "id", None)
153
+ if model_id is None and isinstance(model, dict):
154
+ model_id = model.get("id")
155
+ if not model_id:
156
+ continue
157
+
158
+ display_name = getattr(model, "display_name", None)
159
+ if display_name is None and isinstance(model, dict):
160
+ display_name = model.get("display_name")
161
+
162
+ models.append({"id": model_id, "display_name": display_name})
163
+
164
+ logger.info("OpenAI list_models succeeded: count=%d", len(models))
165
+ return models
166
+
151
167
 
152
168
  def _encode_image_path(path: Path) -> str:
153
169
  """Generate a data URL for the provided image path."""
llmconnector/py.typed CHANGED
@@ -1 +1 @@
1
-
1
+
@@ -1,14 +0,0 @@
1
- ccs_llmconnector-1.0.4.dist-info/licenses/LICENSE,sha256=YYl_gt0O2aJW046pklgKWlVVZZpFcTIOsycrs69ltn4,1061
2
- llmconnector/__init__.py,sha256=SCCVGnaj8aFeE5ugvgf2bGmCLt29R__hoRu0qKhJA4c,1174
3
- llmconnector/anthropic_client.py,sha256=BoRoIqdgKKico_7u7T-jDybhta3dlEYMTL8FX1SfEDM,7197
4
- llmconnector/client.py,sha256=SVn-afiwjdnFnlDflN-WiGK1wdFyazh5wSmUTtRWQmU,4834
5
- llmconnector/client_cli.py,sha256=ncEkQ5xcoPnfnnDYaFKbpf6imR9uDBqlLFxS9EA1_7s,10335
6
- llmconnector/gemini_client.py,sha256=GDcx3KfYHWA4qlTHRqPQ_wGk_HL8wIRWB-qHT4ZHC0k,8060
7
- llmconnector/grok_client.py,sha256=lv-syTmqcgpLsLf6VVsPnghos3lR9YvPIuNv-Iy7SAo,5823
8
- llmconnector/openai_client.py,sha256=ECugkVsKNWaV_Xl3suY9XhagSAyCP1AwKP1mXtUGmDk,5402
9
- llmconnector/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
10
- ccs_llmconnector-1.0.4.dist-info/METADATA,sha256=ym56hl73mswU1qhD5n9lDsaEh2SIEdpE1MvuAvdXQec,14051
11
- ccs_llmconnector-1.0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- ccs_llmconnector-1.0.4.dist-info/entry_points.txt,sha256=eFvLY3nHAG_QhaKlemhhK7echfezW0KiMdSNMZOStLc,60
13
- ccs_llmconnector-1.0.4.dist-info/top_level.txt,sha256=Doer7TAUsN8UXQfPHPNsuBXVNCz2uV-Q0v4t4fwv_MM,13
14
- ccs_llmconnector-1.0.4.dist-info/RECORD,,