lollms-client 0.24.2__py3-none-any.whl → 0.27.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +3 -2
- lollms_client/llm_bindings/azure_openai/__init__.py +364 -0
- lollms_client/llm_bindings/claude/__init__.py +549 -0
- lollms_client/llm_bindings/gemini/__init__.py +501 -0
- lollms_client/llm_bindings/grok/__init__.py +536 -0
- lollms_client/llm_bindings/groq/__init__.py +292 -0
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +307 -0
- lollms_client/llm_bindings/litellm/__init__.py +201 -0
- lollms_client/llm_bindings/lollms/__init__.py +2 -0
- lollms_client/llm_bindings/mistral/__init__.py +298 -0
- lollms_client/llm_bindings/open_router/__init__.py +304 -0
- lollms_client/llm_bindings/openai/__init__.py +30 -9
- lollms_client/lollms_core.py +338 -162
- lollms_client/lollms_discussion.py +135 -37
- lollms_client/lollms_llm_binding.py +4 -0
- lollms_client/lollms_types.py +9 -1
- lollms_client/lollms_utilities.py +68 -0
- lollms_client/mcp_bindings/remote_mcp/__init__.py +82 -4
- lollms_client-0.27.0.dist-info/METADATA +604 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/RECORD +23 -14
- lollms_client-0.24.2.dist-info/METADATA +0 -239
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,549 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import os
|
|
3
|
+
from io import BytesIO
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional, Callable, List, Union, Dict
|
|
6
|
+
import json
|
|
7
|
+
import requests
|
|
8
|
+
|
|
9
|
+
from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
|
|
10
|
+
from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
11
|
+
from lollms_client.lollms_types import MSG_TYPE
|
|
12
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
13
|
+
|
|
14
|
+
import pipmaster as pm
|
|
15
|
+
|
|
16
|
+
# Ensure the required packages are installed
|
|
17
|
+
# Added 'requests' for dynamic model listing
|
|
18
|
+
pm.ensure_packages(["anthropic", "pillow", "tiktoken", "requests"])
|
|
19
|
+
|
|
20
|
+
import anthropic
|
|
21
|
+
from PIL import Image, ImageDraw
|
|
22
|
+
import tiktoken
|
|
23
|
+
|
|
24
|
+
BindingName = "ClaudeBinding"
|
|
25
|
+
|
|
26
|
+
# API Endpoint for model listing
|
|
27
|
+
ANTHROPIC_API_BASE_URL = "https://api.anthropic.com/v1"
|
|
28
|
+
|
|
29
|
+
# A hardcoded list to be used as a fallback if the API call fails
|
|
30
|
+
_FALLBACK_MODELS = [
|
|
31
|
+
{'model_name': 'claude-3-opus-20240229', 'display_name': 'Claude 3 Opus', 'description': 'Most powerful model for highly complex tasks.', 'owned_by': 'Anthropic'},
|
|
32
|
+
{'model_name': 'claude-3-5-sonnet-20240620', 'display_name': 'Claude 3.5 Sonnet', 'description': 'Our most intelligent model, a new industry standard.', 'owned_by': 'Anthropic'},
|
|
33
|
+
{'model_name': 'claude-3-sonnet-20240229', 'display_name': 'Claude 3 Sonnet', 'description': 'Ideal balance of intelligence and speed for enterprise workloads.', 'owned_by': 'Anthropic'},
|
|
34
|
+
{'model_name': 'claude-3-haiku-20240307', 'display_name': 'Claude 3 Haiku', 'description': 'Fastest and most compact model for near-instant responsiveness.', 'owned_by': 'Anthropic'},
|
|
35
|
+
{'model_name': 'claude-2.1', 'display_name': 'Claude 2.1', 'description': 'Legacy model with a 200K token context window.', 'owned_by': 'Anthropic'},
|
|
36
|
+
{'model_name': 'claude-2.0', 'display_name': 'Claude 2.0', 'description': 'Legacy model.', 'owned_by': 'Anthropic'},
|
|
37
|
+
{'model_name': 'claude-instant-1.2', 'display_name': 'Claude Instant 1.2', 'description': 'Legacy fast and light-weight model.', 'owned_by': 'Anthropic'},
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# Helper to check if a string is a valid path to an image
|
|
42
|
+
def is_image_path(path_str: str) -> bool:
|
|
43
|
+
try:
|
|
44
|
+
p = Path(path_str)
|
|
45
|
+
return p.is_file() and p.suffix.lower() in ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp']
|
|
46
|
+
except Exception:
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
# Helper to get image media type
|
|
50
|
+
def get_media_type(image_path: Union[str, Path]) -> str:
|
|
51
|
+
path = Path(image_path)
|
|
52
|
+
ext = path.suffix.lower()
|
|
53
|
+
if ext == ".jpg" or ext == ".jpeg":
|
|
54
|
+
return "image/jpeg"
|
|
55
|
+
elif ext == ".png":
|
|
56
|
+
return "image/png"
|
|
57
|
+
elif ext == ".gif":
|
|
58
|
+
return "image/gif"
|
|
59
|
+
elif ext == ".webp":
|
|
60
|
+
return "image/webp"
|
|
61
|
+
else:
|
|
62
|
+
# A default or raise an error
|
|
63
|
+
return "image/jpeg"
|
|
64
|
+
|
|
65
|
+
class ClaudeBinding(LollmsLLMBinding):
|
|
66
|
+
"""Anthropic Claude-specific binding implementation."""
|
|
67
|
+
|
|
68
|
+
def __init__(self,
|
|
69
|
+
host_address: str = None, # Ignored, for compatibility
|
|
70
|
+
model_name: str = "claude-3-sonnet-20240229",
|
|
71
|
+
service_key: str = None,
|
|
72
|
+
verify_ssl_certificate: bool = True, # Ignored, for compatibility
|
|
73
|
+
**kwargs
|
|
74
|
+
):
|
|
75
|
+
"""
|
|
76
|
+
Initialize the Claude binding.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
model_name (str): Name of the Claude model to use.
|
|
80
|
+
service_key (str): Anthropic API key.
|
|
81
|
+
"""
|
|
82
|
+
super().__init__(binding_name=BindingName)
|
|
83
|
+
self.model_name = model_name
|
|
84
|
+
self.service_key = service_key
|
|
85
|
+
self._cached_models: Optional[List[Dict[str, str]]] = None
|
|
86
|
+
|
|
87
|
+
if not self.service_key:
|
|
88
|
+
self.service_key = os.getenv("ANTHROPIC_API_KEY")
|
|
89
|
+
|
|
90
|
+
if not self.service_key:
|
|
91
|
+
raise ValueError("Anthropic API key is required. Please set it via the 'service_key' parameter or the ANTHROPIC_API_KEY environment variable.")
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
self.client = anthropic.Anthropic(api_key=self.service_key)
|
|
95
|
+
except Exception as e:
|
|
96
|
+
ASCIIColors.error(f"Failed to configure Anthropic client: {e}")
|
|
97
|
+
self.client = None
|
|
98
|
+
raise ConnectionError(f"Could not configure Anthropic client: {e}") from e
|
|
99
|
+
|
|
100
|
+
def _construct_parameters(self,
|
|
101
|
+
temperature: float,
|
|
102
|
+
top_p: float,
|
|
103
|
+
top_k: int,
|
|
104
|
+
n_predict: int) -> Dict[str, any]:
|
|
105
|
+
"""Builds a parameters dictionary for the Claude API."""
|
|
106
|
+
params = {}
|
|
107
|
+
if temperature is not None: params['temperature'] = float(temperature)
|
|
108
|
+
if top_p is not None: params['top_p'] = top_p
|
|
109
|
+
if top_k is not None: params['top_k'] = top_k
|
|
110
|
+
if n_predict is not None: params['max_tokens'] = n_predict
|
|
111
|
+
return params
|
|
112
|
+
|
|
113
|
+
def generate_text(self,
|
|
114
|
+
prompt: str,
|
|
115
|
+
images: Optional[List[str]] = None,
|
|
116
|
+
system_prompt: str = "",
|
|
117
|
+
n_predict: Optional[int] = 2048,
|
|
118
|
+
stream: Optional[bool] = False,
|
|
119
|
+
temperature: float = 0.7,
|
|
120
|
+
top_k: int = 40,
|
|
121
|
+
top_p: float = 0.9,
|
|
122
|
+
repeat_penalty: float = 1.1, # Not supported
|
|
123
|
+
repeat_last_n: int = 64, # Not supported
|
|
124
|
+
seed: Optional[int] = None, # Not supported
|
|
125
|
+
n_threads: Optional[int] = None, # Not applicable
|
|
126
|
+
ctx_size: int | None = None, # Determined by model
|
|
127
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
128
|
+
split:Optional[bool]=False, # Not used in this direct method
|
|
129
|
+
user_keyword:Optional[str]="!@>user:", # Not used
|
|
130
|
+
ai_keyword:Optional[str]="!@>assistant:", # Not used
|
|
131
|
+
) -> Union[str, dict]:
|
|
132
|
+
"""
|
|
133
|
+
Generate text using the Claude model.
|
|
134
|
+
"""
|
|
135
|
+
if not self.client:
|
|
136
|
+
return {"status": False, "error": "Anthropic client not initialized."}
|
|
137
|
+
|
|
138
|
+
api_params = self._construct_parameters(temperature, top_p, top_k, n_predict)
|
|
139
|
+
|
|
140
|
+
message_content = []
|
|
141
|
+
if prompt and prompt.strip():
|
|
142
|
+
message_content.append({"type": "text", "text": prompt})
|
|
143
|
+
|
|
144
|
+
if images:
|
|
145
|
+
for image_data in images:
|
|
146
|
+
try:
|
|
147
|
+
# ... (image processing code is unchanged)
|
|
148
|
+
if is_image_path(image_data):
|
|
149
|
+
with open(image_data, "rb") as image_file:
|
|
150
|
+
b64_data = base64.b64encode(image_file.read()).decode('utf-8')
|
|
151
|
+
media_type = get_media_type(image_data)
|
|
152
|
+
else:
|
|
153
|
+
b64_data = image_data
|
|
154
|
+
media_type = "image/jpeg"
|
|
155
|
+
|
|
156
|
+
message_content.append({
|
|
157
|
+
"type": "image",
|
|
158
|
+
"source": { "type": "base64", "media_type": media_type, "data": b64_data }
|
|
159
|
+
})
|
|
160
|
+
except Exception as e:
|
|
161
|
+
error_msg = f"Failed to process image: {e}"
|
|
162
|
+
ASCIIColors.error(error_msg)
|
|
163
|
+
return {"status": False, "error": error_msg}
|
|
164
|
+
|
|
165
|
+
if not message_content:
|
|
166
|
+
if stream and streaming_callback:
|
|
167
|
+
streaming_callback("", MSG_TYPE.MSG_TYPE_FINISHED_MESSAGE)
|
|
168
|
+
return ""
|
|
169
|
+
|
|
170
|
+
messages = [{"role": "user", "content": message_content}]
|
|
171
|
+
full_response_text = ""
|
|
172
|
+
|
|
173
|
+
# ---- CHANGE START ----
|
|
174
|
+
# Conditionally build the request arguments to avoid sending an empty `system` parameter.
|
|
175
|
+
request_args = {
|
|
176
|
+
"model": self.model_name,
|
|
177
|
+
"messages": messages,
|
|
178
|
+
**api_params
|
|
179
|
+
}
|
|
180
|
+
if system_prompt and system_prompt.strip():
|
|
181
|
+
request_args["system"] = system_prompt
|
|
182
|
+
# ---- CHANGE END ----
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
if stream:
|
|
186
|
+
with self.client.messages.stream(**request_args) as stream_response:
|
|
187
|
+
for chunk in stream_response.text_stream:
|
|
188
|
+
full_response_text += chunk
|
|
189
|
+
if streaming_callback:
|
|
190
|
+
if not streaming_callback(chunk, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
191
|
+
break
|
|
192
|
+
return full_response_text
|
|
193
|
+
else:
|
|
194
|
+
response = self.client.messages.create(**request_args)
|
|
195
|
+
if response.stop_reason == "error":
|
|
196
|
+
return {"status": False, "error": f"API returned an error: {response.stop_reason}"}
|
|
197
|
+
return response.content[0].text
|
|
198
|
+
|
|
199
|
+
except Exception as ex:
|
|
200
|
+
error_message = f"An unexpected error occurred with Claude API: {str(ex)}"
|
|
201
|
+
trace_exception(ex)
|
|
202
|
+
return {"status": False, "error": error_message}
|
|
203
|
+
|
|
204
|
+
def chat(self,
|
|
205
|
+
discussion: LollmsDiscussion,
|
|
206
|
+
branch_tip_id: Optional[str] = None,
|
|
207
|
+
n_predict: Optional[int] = 2048,
|
|
208
|
+
stream: Optional[bool] = False,
|
|
209
|
+
temperature: float = 0.7,
|
|
210
|
+
top_k: int = 40,
|
|
211
|
+
top_p: float = 0.9,
|
|
212
|
+
repeat_penalty: float = 1.1, # Not supported
|
|
213
|
+
repeat_last_n: int = 64, # Not supported
|
|
214
|
+
seed: Optional[int] = None, # Not supported
|
|
215
|
+
n_threads: Optional[int] = None, # Not supported
|
|
216
|
+
ctx_size: Optional[int] = None, # Not supported
|
|
217
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
|
|
218
|
+
) -> Union[str, dict]:
|
|
219
|
+
"""
|
|
220
|
+
Conduct a chat session with the Claude model using a LollmsDiscussion object.
|
|
221
|
+
"""
|
|
222
|
+
if not self.client:
|
|
223
|
+
return {"status": "error", "message": "Anthropic client not initialized."}
|
|
224
|
+
|
|
225
|
+
system_prompt = discussion.system_prompt
|
|
226
|
+
messages = discussion.get_messages(branch_tip_id)
|
|
227
|
+
|
|
228
|
+
history = []
|
|
229
|
+
# ... (history building code is unchanged)
|
|
230
|
+
for msg in messages:
|
|
231
|
+
role = 'user' if msg.sender_type == "user" else 'assistant'
|
|
232
|
+
content_parts = []
|
|
233
|
+
if msg.content and msg.content.strip():
|
|
234
|
+
content_parts.append({"type": "text", "text": msg.content})
|
|
235
|
+
|
|
236
|
+
if msg.images:
|
|
237
|
+
for file_path in msg.images:
|
|
238
|
+
if is_image_path(file_path):
|
|
239
|
+
try:
|
|
240
|
+
with open(file_path, "rb") as image_file:
|
|
241
|
+
b64_data = base64.b64encode(image_file.read()).decode('utf-8')
|
|
242
|
+
media_type = get_media_type(file_path)
|
|
243
|
+
content_parts.append({
|
|
244
|
+
"type": "image",
|
|
245
|
+
"source": { "type": "base64", "media_type": media_type, "data": b64_data }
|
|
246
|
+
})
|
|
247
|
+
except Exception as e:
|
|
248
|
+
ASCIIColors.warning(f"Could not load image {file_path}: {e}")
|
|
249
|
+
|
|
250
|
+
if content_parts:
|
|
251
|
+
if history and history[-1]['role'] == role:
|
|
252
|
+
history[-1]['content'].extend(content_parts)
|
|
253
|
+
else:
|
|
254
|
+
history.append({'role': role, 'content': content_parts})
|
|
255
|
+
|
|
256
|
+
if not history:
|
|
257
|
+
return {"status": "error", "message": "Cannot start chat with an empty discussion."}
|
|
258
|
+
|
|
259
|
+
api_params = self._construct_parameters(temperature, top_p, top_k, n_predict)
|
|
260
|
+
full_response_text = ""
|
|
261
|
+
|
|
262
|
+
# ---- CHANGE START ----
|
|
263
|
+
# Conditionally build the request arguments to avoid sending an empty `system` parameter.
|
|
264
|
+
request_args = {
|
|
265
|
+
"model": self.model_name,
|
|
266
|
+
"messages": history,
|
|
267
|
+
**api_params
|
|
268
|
+
}
|
|
269
|
+
if system_prompt and system_prompt.strip():
|
|
270
|
+
request_args["system"] = system_prompt
|
|
271
|
+
# ---- CHANGE END ----
|
|
272
|
+
|
|
273
|
+
try:
|
|
274
|
+
if stream:
|
|
275
|
+
with self.client.messages.stream(**request_args) as stream_response:
|
|
276
|
+
for chunk in stream_response.text_stream:
|
|
277
|
+
full_response_text += chunk
|
|
278
|
+
if streaming_callback:
|
|
279
|
+
if not streaming_callback(chunk, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
280
|
+
break
|
|
281
|
+
return full_response_text
|
|
282
|
+
else:
|
|
283
|
+
response = self.client.messages.create(**request_args)
|
|
284
|
+
if response.stop_reason == "error":
|
|
285
|
+
return {"status": "error", "message": f"API returned an error: {response.stop_reason}"}
|
|
286
|
+
return response.content[0].text
|
|
287
|
+
|
|
288
|
+
except Exception as ex:
|
|
289
|
+
error_message = f"An unexpected error occurred with Claude API: {str(ex)}"
|
|
290
|
+
trace_exception(ex)
|
|
291
|
+
return {"status": "error", "message": error_message}
|
|
292
|
+
|
|
293
|
+
# ... (Rest of the file is unchanged) ...
|
|
294
|
+
def tokenize(self, text: str) -> list:
|
|
295
|
+
"""
|
|
296
|
+
Tokenize the input text.
|
|
297
|
+
Note: Claude doesn't expose a public tokenizer API.
|
|
298
|
+
Using tiktoken for a rough estimate, NOT accurate for Claude.
|
|
299
|
+
"""
|
|
300
|
+
try:
|
|
301
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
302
|
+
return encoding.encode(text)
|
|
303
|
+
except:
|
|
304
|
+
return list(text.encode('utf-8'))
|
|
305
|
+
|
|
306
|
+
def detokenize(self, tokens: list) -> str:
|
|
307
|
+
"""
|
|
308
|
+
Detokenize a list of tokens.
|
|
309
|
+
Note: Based on the placeholder tokenizer.
|
|
310
|
+
"""
|
|
311
|
+
try:
|
|
312
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
313
|
+
return encoding.decode(tokens)
|
|
314
|
+
except:
|
|
315
|
+
return bytes(tokens).decode('utf-8', errors='ignore')
|
|
316
|
+
|
|
317
|
+
def count_tokens(self, text: str) -> int:
|
|
318
|
+
"""
|
|
319
|
+
Count tokens from a text using the Anthropic API.
|
|
320
|
+
This provides a more accurate count than a fallback tokenizer.
|
|
321
|
+
"""
|
|
322
|
+
if not text or not text.strip():
|
|
323
|
+
return 0
|
|
324
|
+
|
|
325
|
+
if not self.client:
|
|
326
|
+
ASCIIColors.warning("Cannot count tokens, Anthropic client not initialized.")
|
|
327
|
+
return len(self.tokenize(text))
|
|
328
|
+
try:
|
|
329
|
+
# Note: count_tokens doesn't use a system prompt, so it's safe.
|
|
330
|
+
# However, for consistency, we could add one if needed by the logic.
|
|
331
|
+
# For now, this is fine as it only counts user content tokens.
|
|
332
|
+
response = self.client.messages.count_tokens( # Changed from messages.count_tokens to top-level client method
|
|
333
|
+
model=self.model_name,
|
|
334
|
+
messages=[{"role": "user", "content": text}]
|
|
335
|
+
)
|
|
336
|
+
return response.token_count # Updated to correct response attribute
|
|
337
|
+
except Exception as e:
|
|
338
|
+
ASCIIColors.error(f"Failed to count tokens with Claude API: {e}")
|
|
339
|
+
return len(self.tokenize(text))
|
|
340
|
+
|
|
341
|
+
def embed(self, text: str, **kwargs) -> List[float]:
|
|
342
|
+
"""
|
|
343
|
+
Get embeddings for the input text.
|
|
344
|
+
Note: Anthropic does not provide a dedicated embedding model API.
|
|
345
|
+
"""
|
|
346
|
+
ASCIIColors.warning("Anthropic does not offer a public embedding API. This method is not implemented.")
|
|
347
|
+
raise NotImplementedError("Claude binding does not support embeddings.")
|
|
348
|
+
|
|
349
|
+
def get_model_info(self) -> dict:
|
|
350
|
+
"""Return information about the current Claude model setup."""
|
|
351
|
+
return {
|
|
352
|
+
"name": self.binding_name,
|
|
353
|
+
"version": anthropic.__version__,
|
|
354
|
+
"host_address": "https://api.anthropic.com",
|
|
355
|
+
"model_name": self.model_name,
|
|
356
|
+
"supports_structured_output": False,
|
|
357
|
+
"supports_vision": "claude-3" in self.model_name,
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
def listModels(self) -> List[Dict[str, str]]:
|
|
361
|
+
"""
|
|
362
|
+
Lists available models from the Anthropic API.
|
|
363
|
+
Caches the result to avoid repeated API calls.
|
|
364
|
+
Falls back to a static list if the API call fails.
|
|
365
|
+
"""
|
|
366
|
+
if self._cached_models is not None:
|
|
367
|
+
return self._cached_models
|
|
368
|
+
|
|
369
|
+
if not self.service_key:
|
|
370
|
+
ASCIIColors.warning("Cannot fetch models without an API key. Using fallback list.")
|
|
371
|
+
self._cached_models = _FALLBACK_MODELS
|
|
372
|
+
return self._cached_models
|
|
373
|
+
|
|
374
|
+
# This part is complex and likely correct, leaving as is.
|
|
375
|
+
# It's good practice.
|
|
376
|
+
headers = {
|
|
377
|
+
"x-api-key": self.service_key,
|
|
378
|
+
"anthropic-version": "2023-06-01",
|
|
379
|
+
"accept": "application/json"
|
|
380
|
+
}
|
|
381
|
+
url = f"{ANTHROPIC_API_BASE_URL}/models"
|
|
382
|
+
|
|
383
|
+
try:
|
|
384
|
+
ASCIIColors.info("Fetching available models from Anthropic API...")
|
|
385
|
+
response = requests.get(url, headers=headers, timeout=15)
|
|
386
|
+
response.raise_for_status()
|
|
387
|
+
|
|
388
|
+
data = response.json()
|
|
389
|
+
|
|
390
|
+
if "data" in data and isinstance(data["data"], list):
|
|
391
|
+
models_data = data["data"]
|
|
392
|
+
formatted_models = []
|
|
393
|
+
for model in models_data:
|
|
394
|
+
model_id = model.get("id")
|
|
395
|
+
if not model_id: continue
|
|
396
|
+
|
|
397
|
+
display_name = model.get("name", model_id.replace("-", " ").title())
|
|
398
|
+
|
|
399
|
+
desc_parts = []
|
|
400
|
+
if model.get('context_length'):
|
|
401
|
+
desc_parts.append(f"Context: {model['context_length']:,} tokens.")
|
|
402
|
+
if model.get('max_output_tokens'):
|
|
403
|
+
desc_parts.append(f"Max Output: {model['max_output_tokens']:,} tokens.")
|
|
404
|
+
description = " ".join(desc_parts) or f"Anthropic model: {model_id}"
|
|
405
|
+
|
|
406
|
+
formatted_models.append({
|
|
407
|
+
'model_name': model_id,
|
|
408
|
+
'display_name': display_name,
|
|
409
|
+
'description': description,
|
|
410
|
+
'owned_by': 'Anthropic'
|
|
411
|
+
})
|
|
412
|
+
|
|
413
|
+
formatted_models.sort(key=lambda x: x['model_name'])
|
|
414
|
+
self._cached_models = formatted_models
|
|
415
|
+
ASCIIColors.green(f"Successfully fetched and parsed {len(self._cached_models)} models.")
|
|
416
|
+
return self._cached_models
|
|
417
|
+
else:
|
|
418
|
+
raise ValueError("API response is malformed. 'data' field missing or not a list.")
|
|
419
|
+
|
|
420
|
+
except Exception as e:
|
|
421
|
+
ASCIIColors.error(f"Failed to fetch models from Anthropic API: {e}")
|
|
422
|
+
ASCIIColors.warning("Using hardcoded fallback list of models.")
|
|
423
|
+
trace_exception(e)
|
|
424
|
+
self._cached_models = _FALLBACK_MODELS
|
|
425
|
+
return self._cached_models
|
|
426
|
+
|
|
427
|
+
def load_model(self, model_name: str) -> bool:
|
|
428
|
+
"""Set the model name for subsequent operations."""
|
|
429
|
+
self.model_name = model_name
|
|
430
|
+
ASCIIColors.info(f"Claude model set to: {model_name}. It will be used on the next API call.")
|
|
431
|
+
return True
|
|
432
|
+
|
|
433
|
+
if __name__ == '__main__':
|
|
434
|
+
# Example Usage (requires ANTHROPIC_API_KEY environment variable)
|
|
435
|
+
if 'ANTHROPIC_API_KEY' not in os.environ:
|
|
436
|
+
ASCIIColors.red("Error: ANTHROPIC_API_KEY environment variable not set.")
|
|
437
|
+
print("Please get your key from Anthropic and set it.")
|
|
438
|
+
exit(1)
|
|
439
|
+
|
|
440
|
+
ASCIIColors.yellow("--- Testing ClaudeBinding ---")
|
|
441
|
+
|
|
442
|
+
# --- Configuration ---
|
|
443
|
+
test_model_name = "claude-3-haiku-20240307" # Use Haiku for speed in testing
|
|
444
|
+
test_vision_model_name = "claude-3-sonnet-20240229"
|
|
445
|
+
|
|
446
|
+
full_streamed_text = ""
|
|
447
|
+
|
|
448
|
+
try:
|
|
449
|
+
# --- Initialization ---
|
|
450
|
+
ASCIIColors.cyan("\n--- Initializing Binding ---")
|
|
451
|
+
binding = ClaudeBinding(model_name=test_model_name)
|
|
452
|
+
ASCIIColors.green("Binding initialized successfully.")
|
|
453
|
+
ASCIIColors.info(f"Using anthropic version: {anthropic.__version__}")
|
|
454
|
+
|
|
455
|
+
# --- List Models ---
|
|
456
|
+
ASCIIColors.cyan("\n--- Listing Models (dynamic) ---")
|
|
457
|
+
models = binding.listModels()
|
|
458
|
+
if models:
|
|
459
|
+
ASCIIColors.green(f"Found {len(models)} models.")
|
|
460
|
+
for m in models:
|
|
461
|
+
print(f"- {m['model_name']} ({m['display_name']})")
|
|
462
|
+
else:
|
|
463
|
+
ASCIIColors.error("Failed to list models.")
|
|
464
|
+
|
|
465
|
+
# --- Count Tokens ---
|
|
466
|
+
ASCIIColors.cyan("\n--- Counting Tokens (with valid and empty text) ---")
|
|
467
|
+
sample_text = "Hello, world! This is a test."
|
|
468
|
+
token_count = binding.count_tokens(sample_text)
|
|
469
|
+
ASCIIColors.green(f"Token count for '{sample_text}': {token_count} (via API)")
|
|
470
|
+
empty_token_count = binding.count_tokens(" ")
|
|
471
|
+
ASCIIColors.green(f"Token count for empty string: {empty_token_count} (handled locally)")
|
|
472
|
+
assert empty_token_count == 0
|
|
473
|
+
|
|
474
|
+
# --- Text Generation (Non-Streaming) ---
|
|
475
|
+
ASCIIColors.cyan("\n--- Text Generation (Non-Streaming) ---")
|
|
476
|
+
prompt_text = "Explain the importance of bees in one paragraph."
|
|
477
|
+
ASCIIColors.info(f"Prompt: {prompt_text}")
|
|
478
|
+
generated_text = binding.generate_text(prompt_text, n_predict=100, stream=False, system_prompt=" ")
|
|
479
|
+
if isinstance(generated_text, str):
|
|
480
|
+
ASCIIColors.green(f"Generated text:\n{generated_text}")
|
|
481
|
+
else:
|
|
482
|
+
ASCIIColors.error(f"Generation failed: {generated_text}")
|
|
483
|
+
|
|
484
|
+
# --- Text Generation (Streaming) ---
|
|
485
|
+
ASCIIColors.cyan("\n--- Text Generation (Streaming) ---")
|
|
486
|
+
|
|
487
|
+
captured_chunks = []
|
|
488
|
+
def stream_callback(chunk: str, msg_type: int):
|
|
489
|
+
ASCIIColors.green(chunk, end="", flush=True)
|
|
490
|
+
captured_chunks.append(chunk)
|
|
491
|
+
return True
|
|
492
|
+
|
|
493
|
+
ASCIIColors.info(f"Prompt: {prompt_text}")
|
|
494
|
+
result = binding.generate_text(prompt_text, n_predict=150, stream=True, streaming_callback=stream_callback)
|
|
495
|
+
full_streamed_text = "".join(captured_chunks)
|
|
496
|
+
print("\n--- End of Stream ---")
|
|
497
|
+
ASCIIColors.green(f"Full streamed text (for verification): {result}")
|
|
498
|
+
assert result == full_streamed_text
|
|
499
|
+
|
|
500
|
+
# --- Embeddings ---
|
|
501
|
+
ASCIIColors.cyan("\n--- Embeddings ---")
|
|
502
|
+
try:
|
|
503
|
+
embedding_text = "Lollms is a cool project."
|
|
504
|
+
embedding_vector = binding.embed(embedding_text)
|
|
505
|
+
except NotImplementedError as e:
|
|
506
|
+
ASCIIColors.green(f"Successfully caught expected error for embeddings: {e}")
|
|
507
|
+
except Exception as e:
|
|
508
|
+
ASCIIColors.error(f"Caught an unexpected error for embeddings: {e}")
|
|
509
|
+
|
|
510
|
+
# --- Vision Model Test ---
|
|
511
|
+
dummy_image_path = "claude_dummy_test_image.png"
|
|
512
|
+
try:
|
|
513
|
+
available_model_names = [m['model_name'] for m in models]
|
|
514
|
+
if test_vision_model_name not in available_model_names:
|
|
515
|
+
ASCIIColors.warning(f"Vision test model '{test_vision_model_name}' not available. Skipping vision test.")
|
|
516
|
+
else:
|
|
517
|
+
img = Image.new('RGB', (200, 50), color = ('blue'))
|
|
518
|
+
d = ImageDraw.Draw(img)
|
|
519
|
+
d.text((10,10), "Test Image", fill=('yellow'))
|
|
520
|
+
img.save(dummy_image_path)
|
|
521
|
+
ASCIIColors.info(f"Created dummy image: {dummy_image_path}")
|
|
522
|
+
|
|
523
|
+
ASCIIColors.cyan(f"\n--- Vision Generation (using {test_vision_model_name}) ---")
|
|
524
|
+
binding.load_model(test_vision_model_name)
|
|
525
|
+
vision_prompt = "What color is the text and what does it say?"
|
|
526
|
+
ASCIIColors.info(f"Vision Prompt: {vision_prompt} with image {dummy_image_path}")
|
|
527
|
+
|
|
528
|
+
vision_response = binding.generate_text(
|
|
529
|
+
prompt=vision_prompt,
|
|
530
|
+
images=[dummy_image_path],
|
|
531
|
+
n_predict=50,
|
|
532
|
+
stream=False
|
|
533
|
+
)
|
|
534
|
+
if isinstance(vision_response, str):
|
|
535
|
+
ASCIIColors.green(f"Vision model response: {vision_response}")
|
|
536
|
+
else:
|
|
537
|
+
ASCIIColors.error(f"Vision generation failed: {vision_response}")
|
|
538
|
+
except Exception as e:
|
|
539
|
+
ASCIIColors.error(f"Error during vision test: {e}")
|
|
540
|
+
trace_exception(e)
|
|
541
|
+
finally:
|
|
542
|
+
if os.path.exists(dummy_image_path):
|
|
543
|
+
os.remove(dummy_image_path)
|
|
544
|
+
|
|
545
|
+
except Exception as e:
|
|
546
|
+
ASCIIColors.error(f"An error occurred during testing: {e}")
|
|
547
|
+
trace_exception(e)
|
|
548
|
+
|
|
549
|
+
ASCIIColors.yellow("\nClaudeBinding test finished.")
|