lollms-client 0.17.0__py3-none-any.whl → 0.17.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

examples/text_2_image.py CHANGED
@@ -14,8 +14,13 @@ LOLLMS_CLIENT_ID = "my_lollms_client_id" # Replace with your actual client ID or
14
14
 
15
15
  # Initialize LollmsClient, enabling the TTI 'lollms' binding
16
16
  # The service_key here is used as client_id by the TTI binding for lollms
17
+ # lc = LollmsClient(
18
+ # tti_binding_name="lollms"
19
+ # )
20
+
21
+ # make sure you have a OPENAI_API_KEY environment variable
17
22
  lc = LollmsClient(
18
- tti_binding_name="lollms"
23
+ tti_binding_name="dalle"
19
24
  )
20
25
 
21
26
  if not lc.tti:
@@ -79,8 +84,8 @@ def test_generate_image():
79
84
  ASCIIColors.cyan("\n--- Testing Generate Image ---")
80
85
  prompt = "A futuristic cityscape at sunset, neon lights, flying vehicles"
81
86
  negative_prompt = "blurry, low quality, ugly, text, watermark"
82
- width = 512
83
- height = 512
87
+ width = 1024
88
+ height = 1024
84
89
  home_dir = Path.home()
85
90
  documents_dir = home_dir / "Documents"
86
91
  output_filename = documents_dir/"generated_lollms_image.jpg"
@@ -0,0 +1,274 @@
1
+ # lollms_client/examples/test_tti_bindings.py
2
+ from lollms_client import LollmsClient
3
+ from lollms_client.lollms_types import MSG_TYPE # If using callbacks
4
+ from ascii_colors import ASCIIColors, trace_exception
5
+ from PIL import Image
6
+ from pathlib import Path
7
+ import io
8
+ import os
9
+ import platform # For opening image
10
+ import subprocess # For opening image
11
+ import shutil # For cleanup of diffusers env
12
+ import json # For pretty printing dicts
13
+ from typing import Optional
14
+ try:
15
+ from huggingface_hub import snapshot_download
16
+ HUGGINGFACE_HUB_AVAILABLE = True
17
+ except ImportError:
18
+ HUGGINGFACE_HUB_AVAILABLE = False
19
+ snapshot_download = None
20
+ ASCIIColors.warning("huggingface_hub library not found. Diffusers model download test will be skipped.")
21
+ ASCIIColors.warning("Please install with: pip install huggingface-hub")
22
+
23
+ LOLLMS_CLIENT_ID = "my_lollms_test_client_id"
24
+
25
+ # --- Diffusers Test Specific Configuration ---
26
+ # Using a standard Stable Diffusion model
27
+ DIFFUSERS_MODEL_ID = "runwayml/stable-diffusion-v1-5" # Standard SD 1.5 model
28
+ DIFFUSERS_LOCAL_MODEL_NAME = "sd-v1-5-test-model" # Folder name for the downloaded model
29
+ TEMP_DIFFUSERS_ENV_DIR_NAME = "temp_diffusers_lollms_env_for_test"
30
+ BASE_TEST_PATH = Path(__file__).parent
31
+
32
+ def setup_diffusers_environment(base_path: Path):
33
+ ASCIIColors.cyan(f"\n--- Setting up Diffusers Test Environment for {DIFFUSERS_MODEL_ID} at {base_path} ---")
34
+ if not HUGGINGFACE_HUB_AVAILABLE:
35
+ raise ImportError("huggingface_hub is not available. Cannot set up Diffusers environment.")
36
+
37
+ temp_env_root = base_path / TEMP_DIFFUSERS_ENV_DIR_NAME
38
+ if temp_env_root.exists():
39
+ shutil.rmtree(temp_env_root)
40
+ temp_env_root.mkdir(parents=True, exist_ok=True)
41
+
42
+ lollms_paths = {
43
+ "personal_models_path": temp_env_root / "personal_models",
44
+ "models_zoo_path": temp_env_root / "models_zoo",
45
+ "shared_cache_path": temp_env_root / "shared_cache",
46
+ "tti_bindings_path": base_path.parent / "tti_bindings"
47
+ }
48
+ for p_key, p_val in lollms_paths.items():
49
+ if p_key != "tti_bindings_path":
50
+ Path(p_val).mkdir(parents=True, exist_ok=True)
51
+
52
+ diffusers_models_dir = lollms_paths["personal_models_path"] / "diffusers_models"
53
+ diffusers_models_dir.mkdir(parents=True, exist_ok=True)
54
+
55
+ model_target_dir = diffusers_models_dir / DIFFUSERS_LOCAL_MODEL_NAME
56
+
57
+ ASCIIColors.info(f"Attempting to download {DIFFUSERS_MODEL_ID} to {model_target_dir}...")
58
+ try:
59
+ # SD 1.5 often has fp16 revision, which is smaller and faster if GPU is used
60
+ # For CPU test, main revision is fine. Safetensors are preferred.
61
+ snapshot_download(
62
+ repo_id=DIFFUSERS_MODEL_ID,
63
+ local_dir=str(model_target_dir),
64
+ local_dir_use_symlinks=False,
65
+ cache_dir=str(lollms_paths["shared_cache_path"] / "huggingface_hub_cache"),
66
+ # revision="fp16", # Optional: if you want the fp16 variant specifically
67
+ allow_patterns=["*.json", "*.txt", "*.safetensors"], # Prefer safetensors
68
+ # ignore_patterns=["*.bin", "*.ckpt"], # Ignore older formats if safetensors exist
69
+ )
70
+ ASCIIColors.green(f"Model {DIFFUSERS_MODEL_ID} downloaded successfully.")
71
+
72
+ except Exception as e:
73
+ trace_exception(e)
74
+ ASCIIColors.error(f"Failed to download model {DIFFUSERS_MODEL_ID} or assertion failed: {e}")
75
+ if model_target_dir.exists():
76
+ ASCIIColors.info(f"Contents of {model_target_dir} ({model_target_dir.resolve()}):")
77
+ for item_path in model_target_dir.rglob('*'):
78
+ if item_path.is_file(): ASCIIColors.info(f" - {item_path.relative_to(model_target_dir)}")
79
+ elif item_path.is_dir(): ASCIIColors.info(f" DIR: {item_path.relative_to(model_target_dir)}")
80
+ raise
81
+
82
+ binding_instance_config = {
83
+ "model_id_or_path": DIFFUSERS_LOCAL_MODEL_NAME,
84
+ "pipeline_class_name": None, # Let AutoPipelineForText2Image handle SD 1.5
85
+ "device": "cpu", # Keep CPU for stable CI/testing
86
+ "torch_dtype_str": "float32", # float32 for CPU
87
+ "num_inference_steps": 10, # Fewer steps for faster test
88
+ "default_width": 512, # Standard for SD 1.5
89
+ "default_height": 512,
90
+ "safety_checker_on": False, # Commonly disabled for local testing
91
+ "lollms_paths": lollms_paths,
92
+ "hf_variant": None # If using main revision, no variant needed. Use "fp16" if you downloaded that revision.
93
+ }
94
+ return binding_instance_config, lollms_paths
95
+
96
+ def cleanup_diffusers_environment(base_path: Path):
97
+ ASCIIColors.cyan("\n--- Cleaning up Diffusers Test Environment ---")
98
+ temp_env_root = base_path / TEMP_DIFFUSERS_ENV_DIR_NAME
99
+ if temp_env_root.exists():
100
+ try:
101
+ shutil.rmtree(temp_env_root)
102
+ ASCIIColors.info(f"Cleaned up Diffusers temp environment: {temp_env_root}")
103
+ except Exception as e:
104
+ ASCIIColors.warning(f"Could not fully clean up {temp_env_root}: {e}")
105
+ trace_exception(e)
106
+
107
+ def test_list_tti_services(lc: LollmsClient):
108
+ ASCIIColors.cyan("\n--- Testing List TTI Services ---")
109
+ try:
110
+ services = lc.tti.list_services()
111
+ if services:
112
+ ASCIIColors.green(f"Available TTI Services for binding '{lc.tti.binding_name}':")
113
+ for i, service in enumerate(services):
114
+ print(f" {i+1}. Name: {service.get('name')}, Caption: {service.get('caption')}, Help: {service.get('help')}")
115
+ assert len(services) > 0, "Expected at least one service to be listed."
116
+ else:
117
+ ASCIIColors.yellow("No TTI services listed or an empty list was returned.")
118
+ except Exception as e:
119
+ ASCIIColors.error(f"Error listing TTI services: {e}")
120
+ trace_exception(e)
121
+ raise
122
+
123
+ def test_get_tti_settings(lc: LollmsClient):
124
+ ASCIIColors.cyan("\n--- Testing Get Active TTI Settings ---")
125
+ try:
126
+ settings = lc.tti.get_settings() # This should be a list of dicts
127
+ if settings:
128
+ ASCIIColors.green(f"Current Active TTI Settings/Template for binding '{lc.tti.binding_name}':")
129
+ for setting_item in settings:
130
+ # Ensure setting_item is a dictionary before trying to access .get()
131
+ if isinstance(setting_item, dict):
132
+ print(f" - Name: {setting_item.get('name')}, Type: {setting_item.get('type')}, Value: {setting_item.get('value')}")
133
+ else:
134
+ ASCIIColors.warning(f"Found non-dict item in settings list: {setting_item}")
135
+ assert isinstance(settings, list) and len(settings) > 0, "Expected settings to be a non-empty list of dicts."
136
+ elif isinstance(settings, dict) and not settings:
137
+ ASCIIColors.yellow("No active TTI service or settings configured on the server (empty dict).")
138
+ else:
139
+ ASCIIColors.yellow("Could not retrieve TTI settings or format unexpected.")
140
+ print(f"Received: {settings}")
141
+ except Exception as e:
142
+ ASCIIColors.error(f"Error getting TTI settings: {e}")
143
+ trace_exception(e)
144
+ raise
145
+
146
+ def test_set_tti_settings(lc: LollmsClient):
147
+ ASCIIColors.cyan("\n--- Testing Set Active TTI Settings ---")
148
+ if lc.tti.binding_name == "diffusers":
149
+ ASCIIColors.info("Attempting to change 'num_inference_steps' for Diffusers.")
150
+ try:
151
+ original_settings = lc.tti.get_settings()
152
+ original_steps = None
153
+ for s_item in original_settings:
154
+ if isinstance(s_item, dict) and s_item.get('name') == 'num_inference_steps':
155
+ original_steps = s_item['value']
156
+ break
157
+ assert original_steps is not None, "Could not find 'num_inference_steps' in Diffusers settings."
158
+
159
+ new_steps = int(original_steps) + 1
160
+ settings_to_set = [{"name": "num_inference_steps", "value": new_steps}]
161
+
162
+ success = lc.tti.set_settings(settings_to_set)
163
+ if success:
164
+ ASCIIColors.green(f"Successfully sent request to set 'num_inference_steps' to {new_steps}.")
165
+ current_settings_after_set = lc.tti.get_settings()
166
+ current_config_steps = None
167
+ for s_item_after in current_settings_after_set:
168
+ if isinstance(s_item_after, dict) and s_item_after.get('name') == 'num_inference_steps':
169
+ current_config_steps = s_item_after['value']
170
+ break
171
+ assert current_config_steps == new_steps, f"Verification failed: settings show {current_config_steps}, expected {new_steps}"
172
+ ASCIIColors.green("Setting change verified in binding's settings.")
173
+ else:
174
+ ASCIIColors.red("Failed to set TTI settings (binding indicated failure or no change).")
175
+ assert False, "set_settings returned False"
176
+ except Exception as e:
177
+ ASCIIColors.error(f"Error setting Diffusers TTI settings: {e}")
178
+ trace_exception(e)
179
+ raise
180
+ else:
181
+ ASCIIColors.yellow(f"Skipping actual setting change for '{lc.tti.binding_name}' in this detailed test.")
182
+
183
+ def test_generate_image(lc: LollmsClient, output_filename: Path, prompt: str, negative_prompt: Optional[str], width: int, height: int):
184
+ ASCIIColors.cyan(f"\n--- Testing Generate Image ({lc.tti.binding_name}) ---")
185
+ ASCIIColors.info(f"Output to: {output_filename}")
186
+ ASCIIColors.info(f"Prompt: {prompt}")
187
+ if negative_prompt: ASCIIColors.info(f"Negative Prompt: {negative_prompt}")
188
+ ASCIIColors.info(f"Dimensions: {width}x{height}")
189
+
190
+ try:
191
+ image_bytes = lc.tti.generate_image(
192
+ prompt=prompt,
193
+ negative_prompt=negative_prompt,
194
+ width=width,
195
+ height=height
196
+ )
197
+
198
+ if image_bytes:
199
+ ASCIIColors.green(f"Image generated successfully ({len(image_bytes)} bytes).")
200
+ try:
201
+ image = Image.open(io.BytesIO(image_bytes))
202
+ image.save(output_filename)
203
+ ASCIIColors.green(f"Image saved as {output_filename}")
204
+ if os.name == 'nt':
205
+ os.startfile(str(output_filename))
206
+ elif os.name == 'posix':
207
+ try:
208
+ opener = "open" if platform.system() == "Darwin" else "xdg-open"
209
+ subprocess.run([opener, str(output_filename)], check=False, timeout=5)
210
+ except Exception:
211
+ ASCIIColors.yellow(f"Could not auto-open image. Please find it at {output_filename}")
212
+ except Exception as e:
213
+ ASCIIColors.error(f"Error processing or saving image: {e}")
214
+ trace_exception(e)
215
+ raw_output_filename = output_filename.with_suffix(".raw_data")
216
+ with open(raw_output_filename, "wb") as f_raw: f_raw.write(image_bytes)
217
+ ASCIIColors.yellow(f"Raw image data saved as {raw_output_filename} for inspection.")
218
+ raise
219
+ else:
220
+ ASCIIColors.red("Image generation returned empty bytes.")
221
+ assert False, "Image generation returned empty bytes"
222
+ except Exception as e:
223
+ ASCIIColors.error(f"Error during image generation: {e}")
224
+ trace_exception(e)
225
+ raise
226
+
227
+ if __name__ == "__main__":
228
+ # --- DALL-E Test ---
229
+ ASCIIColors.magenta("\n\n========== DALL-E Binding Test ==========")
230
+ if not os.environ.get("OPENAI_API_KEY"):
231
+ ASCIIColors.warning("OPENAI_API_KEY environment variable not set. Skipping DALL-E tests.")
232
+ else:
233
+ try:
234
+ lc_dalle = LollmsClient(tti_binding_name="dalle", service_key=LOLLMS_CLIENT_ID)
235
+ if not lc_dalle.tti: ASCIIColors.error("DALL-E TTI binding could not be initialized.")
236
+ else:
237
+ test_list_tti_services(lc_dalle)
238
+ test_get_tti_settings(lc_dalle)
239
+ test_set_tti_settings(lc_dalle)
240
+ test_generate_image(lc_dalle, BASE_TEST_PATH / "generated_dalle_image.png",
241
+ "A vibrant oil painting of a mythical creature in an enchanted forest",
242
+ "photorealistic, modern, ugly, deformed", 1024, 1024)
243
+ except Exception as e:
244
+ ASCIIColors.error(f"DALL-E test block failed: {e}"); trace_exception(e)
245
+
246
+ # --- Diffusers Test ---
247
+ ASCIIColors.magenta("\n\n========== Diffusers Binding Test ==========")
248
+ if not HUGGINGFACE_HUB_AVAILABLE:
249
+ ASCIIColors.warning("Skipping Diffusers tests as huggingface_hub is not available.")
250
+ else:
251
+ diffusers_binding_config = None
252
+ try:
253
+ diffusers_binding_config, _ = setup_diffusers_environment(BASE_TEST_PATH)
254
+ lc_diffusers = LollmsClient(tti_binding_name="diffusers", binding_config=diffusers_binding_config, service_key=LOLLMS_CLIENT_ID)
255
+ if not lc_diffusers.tti: raise RuntimeError("Diffusers TTI binding failed to initialize")
256
+
257
+ test_list_tti_services(lc_diffusers)
258
+ test_get_tti_settings(lc_diffusers)
259
+ test_set_tti_settings(lc_diffusers)
260
+
261
+ gen_width = lc_diffusers.tti.config.get("default_width", 512)
262
+ gen_height = lc_diffusers.tti.config.get("default_height", 512)
263
+ diffusers_prompt = "A majestic griffin soaring through a cloudy sky, detailed feathers, fantasy art"
264
+ diffusers_negative_prompt = "ugly, blurry, low quality, watermark, text, simple background"
265
+
266
+ test_generate_image(lc_diffusers, BASE_TEST_PATH / "generated_diffusers_image.png",
267
+ diffusers_prompt, diffusers_negative_prompt,
268
+ gen_width, gen_height)
269
+ except Exception as e:
270
+ ASCIIColors.error(f"Diffusers test block failed: {e}"); trace_exception(e)
271
+ finally:
272
+ cleanup_diffusers_environment(BASE_TEST_PATH)
273
+
274
+ ASCIIColors.magenta("\n\n========== All Tests Finished ==========")
lollms_client/__init__.py CHANGED
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
6
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
8
 
9
- __version__ = "0.17.0"
9
+ __version__ = "0.17.2"
10
10
 
11
11
  # Optionally, you could define __all__ if you want to be explicit about exports
12
12
  __all__ = [
@@ -24,7 +24,7 @@ pm.ensure_packages(["requests", "pillow"]) # pillow for dummy image in test
24
24
  if not pm.is_installed("llama-cpp-binaries"):
25
25
  def install_llama_cpp():
26
26
  system = platform.system()
27
- python_version_simple = f"py{sys.version_info.major}{sys.version_info.minor}" # e.g. py310 for 3.10
27
+ python_version_simple = f"py{sys.version_info.major}" # e.g. py310 for 3.10
28
28
 
29
29
  # Determine CUDA suffix based on common recent versions. Adjust if needed.
30
30
  # For simplicity, we'll target a common recent CUDA version.
@@ -35,13 +35,13 @@ if not pm.is_installed("llama-cpp-binaries"):
35
35
 
36
36
 
37
37
  if system == "Windows":
38
- # llama_cpp_binaries-0.12.0+cu124-py3-none-win_amd64.whl
39
- url = f"https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.12.0/llama_cpp_binaries-0.12.0{cuda_suffix}-{python_version_simple}-none-win_amd64.whl"
40
- fallback_url = "https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.12.0/llama_cpp_binaries-0.12.0+cu124-py3-none-win_amd64.whl" # Generic py3
38
+ # llama_cpp_binaries-0.14.0+cu124-py3-none-win_amd64.whl
39
+ url = f"https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.12.0/llama_cpp_binaries-0.14.0{cuda_suffix}-{python_version_simple}-none-win_amd64.whl"
40
+ fallback_url = "https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.14.0/llama_cpp_binaries-0.14.0+cu124-py3-none-win_amd64.whl" # Generic py3
41
41
  elif system == "Linux":
42
- # llama_cpp_binaries-0.12.0+cu124-py3-none-linux_x86_64.whl
43
- url = f"https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.12.0/llama_cpp_binaries-0.12.0{cuda_suffix}-{python_version_simple}-none-linux_x86_64.whl"
44
- fallback_url = "https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.12.0/llama_cpp_binaries-0.12.0+cu124-py3-none-linux_x86_64.whl" # Generic py3
42
+ # llama_cpp_binaries-0.14.0+cu124-py3-none-linux_x86_64.whl
43
+ url = f"https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.14.0/llama_cpp_binaries-0.14.0{cuda_suffix}-{python_version_simple}-none-linux_x86_64.whl"
44
+ fallback_url = "https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.14.0/llama_cpp_binaries-0.14.0+cu124-py3-none-linux_x86_64.whl" # Generic py3
45
45
  else:
46
46
  ASCIIColors.warning(f"Unsupported OS for prebuilt llama-cpp-binaries: {system}. Please install manually.")
47
47
  return
@@ -915,4 +915,4 @@ if __name__ == '__main__':
915
915
  else:
916
916
  ASCIIColors.green("All servers shut down correctly.")
917
917
 
918
- ASCIIColors.yellow("\nLlamaCppServerBinding test finished.")
918
+ ASCIIColors.yellow("\nLlamaCppServerBinding test finished.")
@@ -61,11 +61,12 @@ class LollmsClient():
61
61
  ctx_size: Optional[int] = 8192,
62
62
  n_predict: Optional[int] = 4096,
63
63
  stream: bool = False,
64
- temperature: float = 0.1,
65
- top_k: int = 50,
66
- top_p: float = 0.95,
67
- repeat_penalty: float = 0.8,
68
- repeat_last_n: int = 40,
64
+ temperature: float = 0.7, # Ollama default is 0.8, common default 0.7
65
+ top_k: int = 40, # Ollama default is 40
66
+ top_p: float = 0.9, # Ollama default is 0.9
67
+ repeat_penalty: float = 1.1, # Ollama default is 1.1
68
+ repeat_last_n: int = 64, # Ollama default is 64
69
+
69
70
  seed: Optional[int] = None,
70
71
  n_threads: int = 8,
71
72
  streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
@@ -157,29 +157,25 @@ class LollmsLLMBinding(ABC):
157
157
 
158
158
 
159
159
  def split_discussion(self, lollms_prompt_string: str, system_keyword="!@>system:", user_keyword="!@>user:", ai_keyword="!@>assistant:") -> list:
160
-
161
160
  """
162
- Abra-cadabra! Splits a combined LoLLMs prompt string into a list of
163
- OpenAI chat messages.
164
-
165
- Each segment starting with system_keyword/user_keyword/ai_keyword
166
- becomes a {"role": ..., "content": ...} dict.
161
+ Splits a LoLLMs prompt into a list of OpenAI-style messages.
162
+ If the very first chunk has no prefix, it's assigned to "system".
167
163
  """
168
- # Build a regex that looks ahead for any of the three markers
164
+ # Regex to split on any of the three prefixes (lookahead)
169
165
  pattern = r"(?={}|{}|{})".format(
170
166
  re.escape(system_keyword),
171
167
  re.escape(user_keyword),
172
168
  re.escape(ai_keyword)
173
169
  )
174
- # Split the big string into little bits at each keyword
175
170
  parts = re.split(pattern, lollms_prompt_string)
176
171
  messages = []
177
172
 
178
173
  for part in parts:
179
174
  part = part.strip()
180
175
  if not part:
181
- continue # Skip empty rabbitholes
176
+ continue
182
177
 
178
+ # Determine role and strip prefix if present
183
179
  if part.startswith(system_keyword):
184
180
  role = "system"
185
181
  content = part[len(system_keyword):].strip()
@@ -190,16 +186,22 @@ class LollmsLLMBinding(ABC):
190
186
  role = "assistant"
191
187
  content = part[len(ai_keyword):].strip()
192
188
  else:
193
- # Unknown segment—maybe a ghost?
194
- continue
189
+ # No prefix: if it's the first valid chunk, treat as system
190
+ if not messages:
191
+ role = "system"
192
+ content = part
193
+ else:
194
+ # otherwise skip unrecognized segments
195
+ continue
195
196
 
196
197
  messages.append({"role": role, "content": content})
197
- if messages[-1]["content"]=="":
198
- del messages[-1]
198
+ if messages[-1]["content"]=="":
199
+ del messages[-1]
199
200
  return messages
200
201
 
201
202
 
202
203
 
204
+
203
205
  class LollmsLLMBindingManager:
204
206
  """Manages binding discovery and instantiation"""
205
207
 
@@ -30,7 +30,7 @@ try:
30
30
  torch_index_url = None
31
31
  if preferred_torch_device_for_install == "cuda":
32
32
  # Specify a common CUDA version index. Pip should resolve the correct torch version.
33
- # As of late 2023/early 2024, cu118 or cu121 are common. Let's use cu121.
33
+ # As of late 2023/early 2024, cu118 or cu121 are common. Let's use cu126.
34
34
  # Users with different CUDA setups might need to pre-install torch manually.
35
35
  torch_index_url = "https://download.pytorch.org/whl/cu126"
36
36
  ASCIIColors.info(f"Attempting to ensure PyTorch with CUDA support (target index: {torch_index_url})")