lollms-client 0.25.6__py3-none-any.whl → 0.26.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +364 -0
- lollms_client/llm_bindings/claude/__init__.py +549 -0
- lollms_client/llm_bindings/groq/__init__.py +292 -0
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +307 -0
- lollms_client/llm_bindings/lollms/__init__.py +1 -0
- lollms_client/llm_bindings/mistral/__init__.py +298 -0
- lollms_client/llm_bindings/open_router/__init__.py +304 -0
- lollms_client/lollms_discussion.py +16 -20
- {lollms_client-0.25.6.dist-info → lollms_client-0.26.0.dist-info}/METADATA +265 -1
- {lollms_client-0.25.6.dist-info → lollms_client-0.26.0.dist-info}/RECORD +14 -8
- {lollms_client-0.25.6.dist-info → lollms_client-0.26.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.25.6.dist-info → lollms_client-0.26.0.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.25.6.dist-info → lollms_client-0.26.0.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
__version__ = "0.
|
|
11
|
+
__version__ = "0.26.0" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import os
|
|
3
|
+
from io import BytesIO
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional, Callable, List, Union, Dict
|
|
6
|
+
|
|
7
|
+
from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
|
|
8
|
+
from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
9
|
+
from lollms_client.lollms_types import MSG_TYPE
|
|
10
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
11
|
+
|
|
12
|
+
import pipmaster as pm
|
|
13
|
+
|
|
14
|
+
# Ensure the required packages are installed
|
|
15
|
+
pm.ensure_packages(["openai", "pillow", "tiktoken"])
|
|
16
|
+
|
|
17
|
+
import openai
|
|
18
|
+
from PIL import Image, ImageDraw
|
|
19
|
+
import tiktoken
|
|
20
|
+
|
|
21
|
+
BindingName = "AzureOpenAIBinding"
|
|
22
|
+
|
|
23
|
+
# Helper to check if a string is a valid path to an image
|
|
24
|
+
def is_image_path(path_str: str) -> bool:
|
|
25
|
+
try:
|
|
26
|
+
p = Path(path_str)
|
|
27
|
+
return p.is_file() and p.suffix.lower() in ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp']
|
|
28
|
+
except Exception:
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
# Helper to get image media type
|
|
32
|
+
def get_media_type(image_path: Union[str, Path]) -> str:
|
|
33
|
+
path = Path(image_path)
|
|
34
|
+
ext = path.suffix.lower()
|
|
35
|
+
# While OpenAI supports various types, it's often safest to send common ones.
|
|
36
|
+
# We don't need to be as specific as Claude's API.
|
|
37
|
+
return "image/jpeg" if ext in [".jpg", ".jpeg"] else "image/png"
|
|
38
|
+
|
|
39
|
+
class AzureOpenAIBinding(LollmsLLMBinding):
|
|
40
|
+
"""
|
|
41
|
+
Microsoft Azure OpenAI-specific binding implementation.
|
|
42
|
+
|
|
43
|
+
This binding connects to an Azure OpenAI deployment. It requires
|
|
44
|
+
the Azure endpoint, API key, and the specific deployment name.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self,
|
|
48
|
+
model_name: str, # In Azure, this is the DEPLOYMENT NAME
|
|
49
|
+
azure_api_key: str = None,
|
|
50
|
+
azure_endpoint: str = None,
|
|
51
|
+
azure_api_version: str = "2024-02-01",
|
|
52
|
+
**kwargs
|
|
53
|
+
):
|
|
54
|
+
"""
|
|
55
|
+
Initialize the AzureOpenAIBinding.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
model_name (str): The name of the Azure OpenAI DEPLOYMENT to use.
|
|
59
|
+
azure_api_key (str): The API key for the Azure OpenAI service.
|
|
60
|
+
azure_endpoint (str): The endpoint URL for the Azure OpenAI service.
|
|
61
|
+
azure_api_version (str): The API version to use.
|
|
62
|
+
"""
|
|
63
|
+
super().__init__(binding_name=BindingName)
|
|
64
|
+
self.model_name = model_name # Here, it's the deployment name
|
|
65
|
+
self.azure_api_key = azure_api_key or os.getenv("AZURE_OPENAI_API_KEY")
|
|
66
|
+
self.azure_endpoint = azure_endpoint or os.getenv("AZURE_OPENAI_ENDPOINT")
|
|
67
|
+
self.azure_api_version = azure_api_version or os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-01")
|
|
68
|
+
|
|
69
|
+
if not self.model_name:
|
|
70
|
+
raise ValueError("Azure deployment name ('model_name') is required.")
|
|
71
|
+
if not self.azure_api_key:
|
|
72
|
+
raise ValueError("Azure API key is required. Set it via 'azure_api_key' or AZURE_OPENAI_API_KEY env var.")
|
|
73
|
+
if not self.azure_endpoint:
|
|
74
|
+
raise ValueError("Azure endpoint is required. Set it via 'azure_endpoint' or AZURE_OPENAI_ENDPOINT env var.")
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
self.client = openai.AzureOpenAI(
|
|
78
|
+
api_key=self.azure_api_key,
|
|
79
|
+
azure_endpoint=self.azure_endpoint,
|
|
80
|
+
api_version=self.azure_api_version,
|
|
81
|
+
)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
ASCIIColors.error(f"Failed to configure AzureOpenAI client: {e}")
|
|
84
|
+
self.client = None
|
|
85
|
+
raise ConnectionError(f"Could not configure AzureOpenAI client: {e}") from e
|
|
86
|
+
|
|
87
|
+
def _construct_parameters(self,
|
|
88
|
+
temperature: float,
|
|
89
|
+
top_p: float,
|
|
90
|
+
n_predict: int,
|
|
91
|
+
seed: Optional[int]) -> Dict[str, any]:
|
|
92
|
+
"""Builds a parameters dictionary for the OpenAI API."""
|
|
93
|
+
params = {}
|
|
94
|
+
if temperature is not None: params['temperature'] = float(temperature)
|
|
95
|
+
if top_p is not None: params['top_p'] = top_p
|
|
96
|
+
if n_predict is not None: params['max_tokens'] = n_predict
|
|
97
|
+
if seed is not None: params['seed'] = seed
|
|
98
|
+
return params
|
|
99
|
+
|
|
100
|
+
def _prepare_messages(self, discussion: LollmsDiscussion, branch_tip_id: Optional[str] = None) -> List[Dict[str, any]]:
|
|
101
|
+
"""Prepares the message list for the OpenAI API from a LollmsDiscussion."""
|
|
102
|
+
history = []
|
|
103
|
+
if discussion.system_prompt:
|
|
104
|
+
history.append({"role": "system", "content": discussion.system_prompt})
|
|
105
|
+
|
|
106
|
+
for msg in discussion.get_messages(branch_tip_id):
|
|
107
|
+
role = 'user' if msg.sender_type == "user" else 'assistant'
|
|
108
|
+
|
|
109
|
+
content_parts = []
|
|
110
|
+
if msg.content:
|
|
111
|
+
content_parts.append({"type": "text", "text": msg.content})
|
|
112
|
+
|
|
113
|
+
if msg.images:
|
|
114
|
+
for file_path in msg.images:
|
|
115
|
+
if is_image_path(file_path):
|
|
116
|
+
try:
|
|
117
|
+
with open(file_path, "rb") as image_file:
|
|
118
|
+
b64_data = base64.b64encode(image_file.read()).decode('utf-8')
|
|
119
|
+
content_parts.append({
|
|
120
|
+
"type": "image_url",
|
|
121
|
+
"image_url": {"url": f"data:{get_media_type(file_path)};base64,{b64_data}"}
|
|
122
|
+
})
|
|
123
|
+
except Exception as e:
|
|
124
|
+
ASCIIColors.warning(f"Could not load image {file_path}: {e}")
|
|
125
|
+
|
|
126
|
+
if content_parts:
|
|
127
|
+
history.append({'role': role, 'content': content_parts})
|
|
128
|
+
return history
|
|
129
|
+
|
|
130
|
+
def generate_text(self, prompt: str, **kwargs) -> Union[str, dict]:
|
|
131
|
+
"""
|
|
132
|
+
Generate text using an Azure OpenAI deployment. This is a wrapper around the chat method.
|
|
133
|
+
Note: The 'chat' method is preferred for multi-turn conversations.
|
|
134
|
+
"""
|
|
135
|
+
# Create a temporary discussion to leverage the `chat` method's logic
|
|
136
|
+
temp_discussion = LollmsDiscussion.from_messages([
|
|
137
|
+
LollmsMessage.new_message(sender_type="user", content=prompt, images=kwargs.get("images"))
|
|
138
|
+
])
|
|
139
|
+
if kwargs.get("system_prompt"):
|
|
140
|
+
temp_discussion.system_prompt = kwargs.get("system_prompt")
|
|
141
|
+
|
|
142
|
+
# Pass all relevant kwargs to the chat method
|
|
143
|
+
return self.chat(temp_discussion, **kwargs)
|
|
144
|
+
|
|
145
|
+
def chat(self,
|
|
146
|
+
discussion: LollmsDiscussion,
|
|
147
|
+
branch_tip_id: Optional[str] = None,
|
|
148
|
+
n_predict: Optional[int] = 2048,
|
|
149
|
+
stream: Optional[bool] = False,
|
|
150
|
+
temperature: float = 0.7,
|
|
151
|
+
top_p: float = 0.9,
|
|
152
|
+
seed: Optional[int] = None,
|
|
153
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
154
|
+
**kwargs
|
|
155
|
+
) -> Union[str, dict]:
|
|
156
|
+
"""
|
|
157
|
+
Conduct a chat session with the Azure OpenAI deployment.
|
|
158
|
+
"""
|
|
159
|
+
if not self.client:
|
|
160
|
+
return {"status": "error", "message": "AzureOpenAI client not initialized."}
|
|
161
|
+
|
|
162
|
+
messages = self._prepare_messages(discussion, branch_tip_id)
|
|
163
|
+
api_params = self._construct_parameters(temperature, top_p, n_predict, seed)
|
|
164
|
+
full_response_text = ""
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
response = self.client.chat.completions.create(
|
|
168
|
+
model=self.model_name, # This must be the DEPLOYMENT NAME
|
|
169
|
+
messages=messages,
|
|
170
|
+
stream=stream,
|
|
171
|
+
**api_params
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
if stream:
|
|
175
|
+
for chunk in response:
|
|
176
|
+
delta = chunk.choices[0].delta.content
|
|
177
|
+
if delta:
|
|
178
|
+
full_response_text += delta
|
|
179
|
+
if streaming_callback:
|
|
180
|
+
if not streaming_callback(delta, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
181
|
+
break
|
|
182
|
+
return full_response_text
|
|
183
|
+
else:
|
|
184
|
+
return response.choices[0].message.content
|
|
185
|
+
|
|
186
|
+
except Exception as ex:
|
|
187
|
+
error_message = f"An unexpected error occurred with Azure OpenAI API: {str(ex)}"
|
|
188
|
+
trace_exception(ex)
|
|
189
|
+
return {"status": "error", "message": error_message}
|
|
190
|
+
|
|
191
|
+
def tokenize(self, text: str) -> list:
|
|
192
|
+
"""Tokenize text using tiktoken, the tokenizer used by OpenAI models."""
|
|
193
|
+
try:
|
|
194
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
195
|
+
return encoding.encode(text)
|
|
196
|
+
except Exception:
|
|
197
|
+
# Fallback for when tiktoken is not available
|
|
198
|
+
return list(text.encode('utf-8'))
|
|
199
|
+
|
|
200
|
+
def detokenize(self, tokens: list) -> str:
|
|
201
|
+
"""Detokenize tokens using tiktoken."""
|
|
202
|
+
try:
|
|
203
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
204
|
+
return encoding.decode(tokens)
|
|
205
|
+
except Exception:
|
|
206
|
+
return bytes(tokens).decode('utf-8', errors='ignore')
|
|
207
|
+
|
|
208
|
+
def count_tokens(self, text: str) -> int:
|
|
209
|
+
"""Count tokens in a text using tiktoken."""
|
|
210
|
+
return len(self.tokenize(text))
|
|
211
|
+
|
|
212
|
+
def embed(self, text: str, **kwargs) -> List[float]:
|
|
213
|
+
"""
|
|
214
|
+
Get embeddings for the input text using an Azure OpenAI embedding deployment.
|
|
215
|
+
"""
|
|
216
|
+
if not self.client:
|
|
217
|
+
raise Exception("AzureOpenAI client not initialized.")
|
|
218
|
+
|
|
219
|
+
# The embedding deployment name must be passed via kwargs
|
|
220
|
+
embedding_deployment = kwargs.get("model")
|
|
221
|
+
if not embedding_deployment:
|
|
222
|
+
raise ValueError("An embedding deployment name must be provided via the 'model' kwarg for the embed method.")
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
response = self.client.embeddings.create(
|
|
226
|
+
model=embedding_deployment,
|
|
227
|
+
input=text
|
|
228
|
+
)
|
|
229
|
+
return response.data[0].embedding
|
|
230
|
+
except Exception as ex:
|
|
231
|
+
trace_exception(ex)
|
|
232
|
+
raise Exception(f"Azure OpenAI embedding failed: {str(ex)}") from ex
|
|
233
|
+
|
|
234
|
+
def get_model_info(self) -> dict:
|
|
235
|
+
"""Return information about the current Azure OpenAI setup."""
|
|
236
|
+
return {
|
|
237
|
+
"name": self.binding_name,
|
|
238
|
+
"version": openai.__version__,
|
|
239
|
+
"host_address": self.azure_endpoint,
|
|
240
|
+
"model_name": self.model_name, # This is the deployment name
|
|
241
|
+
"supports_structured_output": False,
|
|
242
|
+
"supports_vision": True, # Assume modern deployments support vision
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
def listModels(self) -> List[Dict[str, str]]:
|
|
246
|
+
"""
|
|
247
|
+
List Models is not supported via the Azure OpenAI API.
|
|
248
|
+
Deployments are managed in the Azure Portal. This method returns an empty list.
|
|
249
|
+
"""
|
|
250
|
+
ASCIIColors.warning("Listing models is not supported for Azure OpenAI. Manage deployments in the Azure Portal.")
|
|
251
|
+
return []
|
|
252
|
+
|
|
253
|
+
def load_model(self, model_name: str) -> bool:
|
|
254
|
+
"""Sets the deployment name for subsequent operations."""
|
|
255
|
+
self.model_name = model_name
|
|
256
|
+
ASCIIColors.info(f"Azure OpenAI deployment set to: {model_name}. It will be used on the next API call.")
|
|
257
|
+
return True
|
|
258
|
+
|
|
259
|
+
if __name__ == '__main__':
|
|
260
|
+
# Environment variables to set for testing:
|
|
261
|
+
# AZURE_OPENAI_API_KEY: Your Azure OpenAI API key
|
|
262
|
+
# AZURE_OPENAI_ENDPOINT: Your Azure OpenAI endpoint URL (e.g., https://your-resource.openai.azure.com/)
|
|
263
|
+
# AZURE_DEPLOYMENT_NAME: The name of your chat deployment (e.g., gpt-4o)
|
|
264
|
+
# AZURE_VISION_DEPLOYMENT_NAME: The name of a vision-capable deployment (can be the same as above)
|
|
265
|
+
# AZURE_EMBEDDING_DEPLOYMENT_NAME: The name of your embedding deployment (e.g., text-embedding-ada-002)
|
|
266
|
+
|
|
267
|
+
if not all(k in os.environ for k in ["AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_DEPLOYMENT_NAME"]):
|
|
268
|
+
ASCIIColors.red("Error: Required environment variables not set.")
|
|
269
|
+
print("Please set AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_DEPLOYMENT_NAME.")
|
|
270
|
+
exit(1)
|
|
271
|
+
|
|
272
|
+
ASCIIColors.yellow("--- Testing AzureOpenAIBinding ---")
|
|
273
|
+
|
|
274
|
+
test_deployment_name = os.environ["AZURE_DEPLOYMENT_NAME"]
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
# --- Initialization ---
|
|
278
|
+
ASCIIColors.cyan("\n--- Initializing Binding ---")
|
|
279
|
+
binding = AzureOpenAIBinding(model_name=test_deployment_name)
|
|
280
|
+
ASCIIColors.green("Binding initialized successfully.")
|
|
281
|
+
ASCIIColors.info(f"Using openai library version: {openai.__version__}")
|
|
282
|
+
ASCIIColors.info(f"Endpoint: {binding.azure_endpoint}")
|
|
283
|
+
ASCIIColors.info(f"Deployment: {binding.model_name}")
|
|
284
|
+
|
|
285
|
+
# --- List Models ---
|
|
286
|
+
ASCIIColors.cyan("\n--- Listing Models ---")
|
|
287
|
+
models = binding.listModels()
|
|
288
|
+
if not models:
|
|
289
|
+
ASCIIColors.green("Correctly returned an empty list for models, as expected for Azure.")
|
|
290
|
+
|
|
291
|
+
# --- Count Tokens ---
|
|
292
|
+
ASCIIColors.cyan("\n--- Counting Tokens ---")
|
|
293
|
+
sample_text = "Hello, Azure! This is a test."
|
|
294
|
+
token_count = binding.count_tokens(sample_text)
|
|
295
|
+
ASCIIColors.green(f"Token count for '{sample_text}': {token_count}")
|
|
296
|
+
|
|
297
|
+
# --- Text Generation (Non-Streaming) ---
|
|
298
|
+
ASCIIColors.cyan("\n--- Text Generation (Non-Streaming) ---")
|
|
299
|
+
prompt_text = "What is the Azure cloud platform known for? Answer in one sentence."
|
|
300
|
+
generated_text = binding.generate_text(prompt_text, n_predict=100, stream=False)
|
|
301
|
+
if isinstance(generated_text, str):
|
|
302
|
+
ASCIIColors.green(f"Generated text:\n{generated_text}")
|
|
303
|
+
else:
|
|
304
|
+
ASCIIColors.error(f"Generation failed: {generated_text}")
|
|
305
|
+
|
|
306
|
+
# --- Text Generation (Streaming) ---
|
|
307
|
+
ASCIIColors.cyan("\n--- Text Generation (Streaming) ---")
|
|
308
|
+
full_streamed_text = ""
|
|
309
|
+
def stream_callback(chunk: str, msg_type: int):
|
|
310
|
+
nonlocal full_streamed_text
|
|
311
|
+
ASCIIColors.green(chunk, end="", flush=True)
|
|
312
|
+
full_streamed_text += chunk
|
|
313
|
+
return True
|
|
314
|
+
|
|
315
|
+
result = binding.generate_text(prompt_text, n_predict=150, stream=True, streaming_callback=stream_callback)
|
|
316
|
+
print("\n--- End of Stream ---")
|
|
317
|
+
ASCIIColors.green(f"Full streamed text (for verification): {result}")
|
|
318
|
+
|
|
319
|
+
# --- Embeddings ---
|
|
320
|
+
if "AZURE_EMBEDDING_DEPLOYMENT_NAME" in os.environ:
|
|
321
|
+
ASCIIColors.cyan("\n--- Embeddings ---")
|
|
322
|
+
embedding_deployment = os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"]
|
|
323
|
+
embedding_text = "LoLLMs and Azure make a great team."
|
|
324
|
+
embedding_vector = binding.embed(embedding_text, model=embedding_deployment)
|
|
325
|
+
ASCIIColors.green(f"Embedding for '{embedding_text}' (first 5 dims): {embedding_vector[:5]}...")
|
|
326
|
+
ASCIIColors.info(f"Embedding vector dimension: {len(embedding_vector)}")
|
|
327
|
+
else:
|
|
328
|
+
ASCIIColors.yellow("\nSkipping Embeddings test: AZURE_EMBEDDING_DEPLOYMENT_NAME not set.")
|
|
329
|
+
|
|
330
|
+
# --- Vision Model Test ---
|
|
331
|
+
if "AZURE_VISION_DEPLOYMENT_NAME" in os.environ:
|
|
332
|
+
vision_deployment = os.environ["AZURE_VISION_DEPLOYMENT_NAME"]
|
|
333
|
+
dummy_image_path = "azure_dummy_test_image.png"
|
|
334
|
+
try:
|
|
335
|
+
img = Image.new('RGB', (250, 60), color = ('#0078D4')) # Azure blue
|
|
336
|
+
d = ImageDraw.Draw(img)
|
|
337
|
+
d.text((10,10), "Azure Test Image", fill=('white'))
|
|
338
|
+
img.save(dummy_image_path)
|
|
339
|
+
|
|
340
|
+
ASCIIColors.cyan(f"\n--- Vision Generation (deployment: {vision_deployment}) ---")
|
|
341
|
+
binding.load_model(vision_deployment)
|
|
342
|
+
vision_prompt = "Describe the image. What color is the background and what does the text say?"
|
|
343
|
+
|
|
344
|
+
vision_response = binding.generate_text(
|
|
345
|
+
prompt=vision_prompt,
|
|
346
|
+
images=[dummy_image_path],
|
|
347
|
+
n_predict=50,
|
|
348
|
+
stream=False
|
|
349
|
+
)
|
|
350
|
+
if isinstance(vision_response, str):
|
|
351
|
+
ASCIIColors.green(f"Vision model response: {vision_response}")
|
|
352
|
+
else:
|
|
353
|
+
ASCIIColors.error(f"Vision generation failed: {vision_response}")
|
|
354
|
+
finally:
|
|
355
|
+
if os.path.exists(dummy_image_path):
|
|
356
|
+
os.remove(dummy_image_path)
|
|
357
|
+
else:
|
|
358
|
+
ASCIIColors.yellow("\nSkipping Vision test: AZURE_VISION_DEPLOYMENT_NAME not set.")
|
|
359
|
+
|
|
360
|
+
except Exception as e:
|
|
361
|
+
ASCIIColors.error(f"An error occurred during testing: {e}")
|
|
362
|
+
trace_exception(e)
|
|
363
|
+
|
|
364
|
+
ASCIIColors.yellow("\nAzureOpenAIBinding test finished.")
|