ccs-llmconnector 1.0.2__tar.gz → 1.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {ccs_llmconnector-1.0.2/src/ccs_llmconnector.egg-info → ccs_llmconnector-1.0.3}/PKG-INFO +1 -1
  2. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/pyproject.toml +1 -1
  3. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3/src/ccs_llmconnector.egg-info}/PKG-INFO +1 -1
  4. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/anthropic_client.py +31 -4
  5. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/gemini_client.py +42 -9
  6. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/grok_client.py +36 -5
  7. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/openai_client.py +24 -6
  8. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/LICENSE +0 -0
  9. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/MANIFEST.in +0 -0
  10. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/README.md +0 -0
  11. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/setup.cfg +0 -0
  12. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/ccs_llmconnector.egg-info/SOURCES.txt +0 -0
  13. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/ccs_llmconnector.egg-info/dependency_links.txt +0 -0
  14. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/ccs_llmconnector.egg-info/entry_points.txt +0 -0
  15. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/ccs_llmconnector.egg-info/requires.txt +0 -0
  16. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/ccs_llmconnector.egg-info/top_level.txt +0 -0
  17. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/__init__.py +0 -0
  18. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/client.py +0 -0
  19. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/client_cli.py +0 -0
  20. {ccs_llmconnector-1.0.2 → ccs_llmconnector-1.0.3}/src/llmconnector/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ccs-llmconnector
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
5
  Author: CCS
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ccs-llmconnector"
7
- version = "1.0.2"
7
+ version = "1.0.3"
8
8
  description = "Lightweight wrapper around different LLM provider Python SDK Responses APIs."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ccs-llmconnector
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
5
  Author: CCS
6
6
  License: MIT
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  import base64
6
6
  import mimetypes
7
7
  from pathlib import Path
8
+ import logging
8
9
  from typing import Optional, Sequence, Union
9
10
  from urllib.error import URLError
10
11
  from urllib.request import urlopen
@@ -12,6 +13,7 @@ from urllib.request import urlopen
12
13
  from anthropic import APIError, Anthropic
13
14
 
14
15
  ImageInput = Union[str, Path]
16
+ logger = logging.getLogger(__name__)
15
17
 
16
18
 
17
19
  class AnthropicClient:
@@ -65,11 +67,15 @@ class AnthropicClient:
65
67
 
66
68
  client = Anthropic(api_key=api_key)
67
69
 
70
+ try:
68
71
  response = client.messages.create(
69
72
  model=model,
70
73
  max_tokens=max_tokens,
71
74
  messages=[{"role": "user", "content": content_blocks}],
72
75
  )
76
+ except Exception as exc:
77
+ logger.exception("Anthropic messages.create failed: %s", exc)
78
+ raise
73
79
 
74
80
  text_blocks: list[str] = []
75
81
  for block in getattr(response, "content", []) or []:
@@ -79,9 +85,23 @@ class AnthropicClient:
79
85
  text_blocks.append(text)
80
86
 
81
87
  if text_blocks:
82
- return "".join(text_blocks)
83
-
84
- raise RuntimeError("Anthropic response did not include any text output.")
88
+ result_text = "".join(text_blocks)
89
+ logger.info(
90
+ "Anthropic messages.create succeeded: model=%s images=%d text_len=%d",
91
+ model,
92
+ len(images or []),
93
+ len(result_text or ""),
94
+ )
95
+ return result_text
96
+
97
+ # Treat successful calls without textual content as a successful, empty response
98
+ # rather than raising. This aligns with callers that handle empty outputs gracefully.
99
+ logger.info(
100
+ "Anthropic messages.create succeeded with no text: model=%s images=%d",
101
+ model,
102
+ len(images or []),
103
+ )
104
+ return ""
85
105
 
86
106
  def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
87
107
  """Return the models available to the authenticated Anthropic account."""
@@ -91,7 +111,13 @@ class AnthropicClient:
91
111
  client = Anthropic(api_key=api_key)
92
112
  models: list[dict[str, Optional[str]]] = []
93
113
 
94
- for model in client.models.list():
114
+ try:
115
+ iterator = client.models.list()
116
+ except Exception as exc:
117
+ logger.exception("Anthropic list models failed: %s", exc)
118
+ raise
119
+
120
+ for model in iterator:
95
121
  model_id = getattr(model, "id", None)
96
122
  if model_id is None and isinstance(model, dict):
97
123
  model_id = model.get("id")
@@ -104,6 +130,7 @@ class AnthropicClient:
104
130
 
105
131
  models.append({"id": model_id, "display_name": display_name})
106
132
 
133
+ logger.info("Anthropic list_models succeeded: count=%d", len(models))
107
134
  return models
108
135
 
109
136
  @staticmethod
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  import base64
6
6
  import mimetypes
7
7
  from pathlib import Path
8
+ import logging
8
9
  from typing import Optional, Sequence, Union
9
10
  from urllib.error import URLError
10
11
  from urllib.request import urlopen
@@ -13,6 +14,7 @@ from google import genai
13
14
  from google.genai import types
14
15
 
15
16
  ImageInput = Union[str, Path]
17
+ logger = logging.getLogger(__name__)
16
18
 
17
19
 
18
20
  class GeminiClient:
@@ -72,11 +74,15 @@ class GeminiClient:
72
74
 
73
75
  client = genai.Client(api_key=api_key)
74
76
  try:
75
- response = client.models.generate_content(
76
- model=model,
77
- contents=[content],
78
- config=config,
79
- )
77
+ try:
78
+ response = client.models.generate_content(
79
+ model=model,
80
+ contents=[content],
81
+ config=config,
82
+ )
83
+ except Exception as exc:
84
+ logger.exception("Gemini generate_content failed: %s", exc)
85
+ raise
80
86
  finally:
81
87
  closer = getattr(client, "close", None)
82
88
  if callable(closer):
@@ -86,7 +92,14 @@ class GeminiClient:
86
92
  pass
87
93
 
88
94
  if response.text:
89
- return response.text
95
+ result_text = response.text
96
+ logger.info(
97
+ "Gemini generate_content succeeded: model=%s images=%d text_len=%d",
98
+ model,
99
+ len(images or []),
100
+ len(result_text or ""),
101
+ )
102
+ return result_text
90
103
 
91
104
  candidate_texts: list[str] = []
92
105
  for candidate in getattr(response, "candidates", []) or []:
@@ -99,9 +112,23 @@ class GeminiClient:
99
112
  candidate_texts.append(text)
100
113
 
101
114
  if candidate_texts:
102
- return "\n".join(candidate_texts)
115
+ result_text = "\n".join(candidate_texts)
116
+ logger.info(
117
+ "Gemini generate_content succeeded (candidates): model=%s images=%d text_len=%d",
118
+ model,
119
+ len(images or []),
120
+ len(result_text or ""),
121
+ )
122
+ return result_text
103
123
 
104
- raise RuntimeError("Gemini response did not include any text output.")
124
+ # Treat successful calls without textual content as a successful, empty response
125
+ # rather than raising. This aligns with callers that handle empty outputs gracefully.
126
+ logger.info(
127
+ "Gemini generate_content succeeded with no text: model=%s images=%d",
128
+ model,
129
+ len(images or []),
130
+ )
131
+ return ""
105
132
 
106
133
  def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
107
134
  """Return the models available to the authenticated Gemini account."""
@@ -111,7 +138,12 @@ class GeminiClient:
111
138
  models: list[dict[str, Optional[str]]] = []
112
139
  client = genai.Client(api_key=api_key)
113
140
  try:
114
- for model in client.models.list():
141
+ try:
142
+ iterator = client.models.list()
143
+ except Exception as exc:
144
+ logger.exception("Gemini list models failed: %s", exc)
145
+ raise
146
+ for model in iterator:
115
147
  model_id = getattr(model, "name", None)
116
148
  if model_id is None and isinstance(model, dict):
117
149
  model_id = model.get("name")
@@ -135,6 +167,7 @@ class GeminiClient:
135
167
  except Exception:
136
168
  pass
137
169
 
170
+ logger.info("Gemini list_models succeeded: count=%d", len(models))
138
171
  return models
139
172
 
140
173
  @staticmethod
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  import base64
6
6
  import mimetypes
7
7
  from pathlib import Path
8
+ import logging
8
9
  from typing import Optional, Sequence, Union
9
10
 
10
11
  from xai_sdk import Client
@@ -12,6 +13,7 @@ from xai_sdk.chat import image as chat_image
12
13
  from xai_sdk.chat import user
13
14
 
14
15
  ImageInput = Union[str, Path]
16
+ logger = logging.getLogger(__name__)
15
17
 
16
18
 
17
19
  class GrokClient:
@@ -74,18 +76,41 @@ class GrokClient:
74
76
  if normalized_effort in {"low", "high"}:
75
77
  create_kwargs["reasoning_effort"] = normalized_effort
76
78
 
77
- chat = grok_client.chat.create(**create_kwargs)
78
- response = chat.sample()
79
+ try:
80
+ chat = grok_client.chat.create(**create_kwargs)
81
+ response = chat.sample()
82
+ except Exception as exc:
83
+ logger.exception("xAI Grok chat request failed: %s", exc)
84
+ raise
79
85
 
80
86
  content = getattr(response, "content", None)
81
87
  if content:
88
+ logger.info(
89
+ "xAI chat succeeded: model=%s images=%d text_len=%d",
90
+ model,
91
+ len(images or []),
92
+ len(content or ""),
93
+ )
82
94
  return content
83
95
 
84
96
  reasoning_content = getattr(response, "reasoning_content", None)
85
97
  if reasoning_content:
98
+ logger.info(
99
+ "xAI chat succeeded (reasoning): model=%s images=%d text_len=%d",
100
+ model,
101
+ len(images or []),
102
+ len(reasoning_content or ""),
103
+ )
86
104
  return reasoning_content
87
105
 
88
- raise RuntimeError("xAI response did not include any text output.")
106
+ # Treat successful calls without textual content as a successful, empty response
107
+ # rather than raising. This aligns with callers that handle empty outputs gracefully.
108
+ logger.info(
109
+ "xAI chat succeeded with no text: model=%s images=%d",
110
+ model,
111
+ len(images or []),
112
+ )
113
+ return ""
89
114
 
90
115
  def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
91
116
  """Return the Grok language models available to the authenticated account."""
@@ -95,7 +120,13 @@ class GrokClient:
95
120
  grok_client = Client(api_key=api_key)
96
121
  models: list[dict[str, Optional[str]]] = []
97
122
 
98
- for model in grok_client.models.list_language_models():
123
+ try:
124
+ iterator = grok_client.models.list_language_models()
125
+ except Exception as exc:
126
+ logger.exception("xAI list language models failed: %s", exc)
127
+ raise
128
+
129
+ for model in iterator:
99
130
  model_id = getattr(model, "name", None)
100
131
  if not model_id:
101
132
  continue
@@ -115,6 +146,7 @@ class GrokClient:
115
146
  }
116
147
  )
117
148
 
149
+ logger.info("xAI list_language_models succeeded: count=%d", len(models))
118
150
  return models
119
151
 
120
152
  @staticmethod
@@ -136,4 +168,3 @@ def _encode_image_path(path: Path) -> str:
136
168
  mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
137
169
  encoded = base64.b64encode(data).decode("utf-8")
138
170
  return f"data:{mime_type};base64,{encoded}"
139
-
@@ -4,7 +4,8 @@ from __future__ import annotations
4
4
 
5
5
  import base64
6
6
  import mimetypes
7
- from pathlib import Path
7
+ from pathlib import Path
8
+ import logging
8
9
  from typing import Optional, Sequence, Union
9
10
 
10
11
  from openai import OpenAI
@@ -14,7 +15,8 @@ try:
14
15
  except ImportError: # pragma: no cover - fallback for older SDKs
15
16
  from openai.error import OpenAIError # type: ignore
16
17
 
17
- ImageInput = Union[str, Path]
18
+ ImageInput = Union[str, Path]
19
+ logger = logging.getLogger(__name__)
18
20
 
19
21
 
20
22
  class OpenAIResponsesClient:
@@ -81,9 +83,20 @@ class OpenAIResponsesClient:
81
83
  if reasoning_effort:
82
84
  request_payload["reasoning"] = {"effort": reasoning_effort}
83
85
 
84
- response = client.responses.create(**request_payload)
85
-
86
- return response.output_text
86
+ try:
87
+ response = client.responses.create(**request_payload)
88
+ except Exception as exc: # Log and re-raise to preserve default behavior
89
+ logger.exception("OpenAI Responses API request failed: %s", exc)
90
+ raise
91
+
92
+ output_text = response.output_text
93
+ logger.info(
94
+ "OpenAI generate_response succeeded: model=%s images=%d text_len=%d",
95
+ model,
96
+ len(images or []),
97
+ len(output_text or ""),
98
+ )
99
+ return output_text
87
100
 
88
101
  @staticmethod
89
102
  def _to_image_block(image: ImageInput) -> dict:
@@ -111,7 +124,11 @@ class OpenAIResponsesClient:
111
124
  raise ValueError("api_key must be provided.")
112
125
 
113
126
  client = OpenAI(api_key=api_key)
114
- response = client.models.list()
127
+ try:
128
+ response = client.models.list()
129
+ except Exception as exc:
130
+ logger.exception("OpenAI list models failed: %s", exc)
131
+ raise
115
132
  data = getattr(response, "data", []) or []
116
133
 
117
134
  models: list[dict[str, Optional[str]]] = []
@@ -128,6 +145,7 @@ class OpenAIResponsesClient:
128
145
 
129
146
  models.append({"id": model_id, "display_name": display_name})
130
147
 
148
+ logger.info("OpenAI list_models succeeded: count=%d", len(models))
131
149
  return models
132
150
 
133
151