lollms-client 1.7.13__py3-none-any.whl → 1.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -471,30 +471,156 @@ class LollmsMessage:
471
471
  if i < len(self.active_images) and self.active_images[i]
472
472
  ]
473
473
 
474
+ def _sync_active_images_flags(self):
475
+ """Re-calculates active_images boolean list based on image_groups metadata."""
476
+ current_images = self.images or []
477
+ if not current_images:
478
+ self.active_images = []
479
+ return
480
+
481
+ metadata = self.metadata or {}
482
+ # Support legacy key "image_generation_groups" by merging or checking both
483
+ groups = metadata.get("image_groups", []) + metadata.get("image_generation_groups", [])
484
+
485
+ new_active_flags = [False] * len(current_images)
486
+
487
+ grouped_indices = set()
488
+
489
+ for group in groups:
490
+ indices = group.get("indices", [])
491
+ for i in indices:
492
+ grouped_indices.add(i)
493
+
494
+ # Group is active by default unless explicitly False
495
+ is_group_active = group.get("is_active", True)
496
+
497
+ if is_group_active:
498
+ main_idx = group.get("main_image_index")
499
+ # Fallback: if main_idx invalid, use first index
500
+ if main_idx is None or main_idx not in indices:
501
+ if indices:
502
+ main_idx = indices[0]
503
+
504
+ if main_idx is not None and 0 <= main_idx < len(new_active_flags):
505
+ new_active_flags[main_idx] = True
506
+
507
+ # Legacy/Ungrouped images are active by default
508
+ for i in range(len(current_images)):
509
+ if i not in grouped_indices:
510
+ new_active_flags[i] = True
511
+
512
+ self.active_images = new_active_flags
513
+
474
514
  def toggle_image_activation(self, index: int, active: Optional[bool] = None):
475
515
  """
476
516
  Toggles or sets the activation status of an image at a given index.
477
- This change is committed to the database if the discussion is DB-backed.
517
+ This handles groups/packs: if the image belongs to a group, enabling it
518
+ will disable others in that group and set it as main.
478
519
 
479
520
  Args:
480
521
  index: The index of the image in the 'images' list.
481
- active: If provided, sets the status to this boolean. If None, toggles the current status.
522
+ active: If provided, sets the status to this boolean. If None, toggles.
482
523
  """
483
- if not self.images or index >= len(self.images):
484
- raise IndexError("Image index out of range.")
485
-
486
- # Initialize active_images if it's missing or mismatched
487
- if self.active_images is None or not isinstance(self.active_images, list) or len(self.active_images) != len(self.images):
488
- new_active_images = [True] * len(self.images)
524
+ metadata = (self.metadata or {}).copy()
525
+ groups = metadata.get("image_groups", []) + metadata.get("image_generation_groups", [])
526
+
527
+ target_group = next((g for g in groups if index in g.get("indices", [])), None)
528
+
529
+ if target_group:
530
+ # If explicit active state provided
531
+ if active is not None:
532
+ if active:
533
+ target_group["is_active"] = True
534
+ target_group["main_image_index"] = index
535
+ else:
536
+ # If setting specific image to inactive
537
+ if target_group.get("main_image_index") == index:
538
+ target_group["is_active"] = False
539
+ else:
540
+ # Toggle logic
541
+ # If clicking the currently active main image -> toggle group active state
542
+ if target_group.get("main_image_index") == index:
543
+ target_group["is_active"] = not target_group.get("is_active", True)
544
+ else:
545
+ # Select new image and ensure group is active
546
+ target_group["main_image_index"] = index
547
+ target_group["is_active"] = True
548
+
549
+ self.metadata = metadata
550
+ self._sync_active_images_flags()
551
+
489
552
  else:
490
- new_active_images = self.active_images.copy()
553
+ # Ungrouped image - wrap in a new single-item group to persist state
554
+ new_group = {
555
+ "id": str(uuid.uuid4()),
556
+ "type": "upload",
557
+ "indices": [index],
558
+ "created_at": datetime.utcnow().isoformat(),
559
+ "main_image_index": index,
560
+ "is_active": active if active is not None else not (self.active_images and self.active_images[index])
561
+ }
562
+ if "image_groups" not in metadata:
563
+ metadata["image_groups"] = []
564
+ metadata["image_groups"].append(new_group)
565
+
566
+ self.metadata = metadata
567
+ self._sync_active_images_flags()
491
568
 
492
- if active is None:
493
- new_active_images[index] = not new_active_images[index]
494
- else:
495
- new_active_images[index] = bool(active) # Ensure it's a boolean
569
+ if self._discussion._is_db_backed:
570
+ self._discussion.commit()
571
+
572
+ def add_image_pack(self, images: List[str], group_type: str = "generated", active_by_default: bool = True, title: str = None) -> None:
573
+ """
574
+ Adds a list of images as a new pack/group.
575
+
576
+ - The new images are appended to the existing images list.
577
+ - A new group entry is added to 'image_groups' metadata with 'main_image_index' and 'is_active'.
578
+
579
+ Args:
580
+ images: List of base64 image strings.
581
+ group_type: Type label for the group (e.g., 'generated', 'upload').
582
+ active_by_default: If True, the pack is activated.
583
+ title: Optional title for the image pack (e.g., the prompt).
584
+ """
585
+ if not images:
586
+ return
587
+
588
+ current_images = self.images or []
589
+ start_index = len(current_images)
590
+
591
+ # Append new images
592
+ current_images.extend(images)
593
+ self.images = current_images
594
+
595
+ # Update Metadata with Group info
596
+ metadata = (self.metadata or {}).copy()
597
+ groups = metadata.get("image_groups", [])
598
+
599
+ new_indices = list(range(start_index, start_index + len(images)))
600
+
601
+ # Default main image is the first one in the pack
602
+ main_image_idx = new_indices[0] if new_indices else None
603
+
604
+ group_entry = {
605
+ "id": str(uuid.uuid4()),
606
+ "type": group_type,
607
+ "indices": new_indices,
608
+ "created_at": datetime.utcnow().isoformat(),
609
+ "main_image_index": main_image_idx,
610
+ "is_active": active_by_default
611
+ }
612
+
613
+ if title:
614
+ group_entry["title"] = title
615
+
616
+ groups.append(group_entry)
617
+
618
+ metadata["image_groups"] = groups
619
+ self.metadata = metadata
620
+
621
+ # Sync active_images list based on new group data
622
+ self._sync_active_images_flags()
496
623
 
497
- self.active_images = new_active_images
498
624
  if self._discussion._is_db_backed:
499
625
  self._discussion.commit()
500
626
 
@@ -962,11 +1088,34 @@ class LollmsDiscussion:
962
1088
  else:
963
1089
  new_msg_orm = SimpleNamespace(**message_data)
964
1090
  self._db_discussion.messages.append(new_msg_orm)
1091
+
1092
+ # Wrap the new message immediately to perform updates
1093
+ wrapped_msg = LollmsMessage(self, new_msg_orm)
1094
+
1095
+ # Automatically group user uploaded images if present and not already grouped
1096
+ images_list = kwargs.get('images', [])
1097
+ if images_list and kwargs.get('sender_type') == 'user':
1098
+ meta = wrapped_msg.metadata or {}
1099
+ # Check if groups already exist (e.g. if loaded from JSON backup)
1100
+ if not meta.get('image_groups'):
1101
+ groups = []
1102
+ for i in range(len(images_list)):
1103
+ groups.append({
1104
+ "id": str(uuid.uuid4()),
1105
+ "type": "upload",
1106
+ "indices": [i],
1107
+ "created_at": datetime.utcnow().isoformat(),
1108
+ "main_image_index": i,
1109
+ "is_active": True
1110
+ })
1111
+ meta["image_groups"] = groups
1112
+ wrapped_msg.metadata = meta
1113
+ wrapped_msg._sync_active_images_flags()
965
1114
 
966
1115
  self.active_branch_id = msg_id # New message is always a leaf
967
1116
  self._message_index[msg_id] = new_msg_orm
968
1117
  self.touch()
969
- return LollmsMessage(self, new_msg_orm)
1118
+ return wrapped_msg
970
1119
 
971
1120
  def get_branch(self, leaf_id: Optional[str]) -> List[LollmsMessage]:
972
1121
  """Traces a branch of the conversation from a leaf message back to the root.
@@ -1115,7 +1264,9 @@ class LollmsDiscussion:
1115
1264
  if user_msg_orm.sender_type != 'user':
1116
1265
  raise ValueError(f"Regeneration failed: active branch tip is a '{user_msg_orm.sender_type}' message, not 'user'.")
1117
1266
  user_msg = LollmsMessage(self, user_msg_orm)
1118
- images = user_msg.images
1267
+ # FIX: Use get_active_images() to ensure we get a list of strings, not potentially objects/dicts.
1268
+ # This prevents errors if the underlying 'images' field contains new-style structured data.
1269
+ images = user_msg.get_active_images()
1119
1270
 
1120
1271
  # extract personality data
1121
1272
  if personality is not None:
@@ -1249,7 +1400,7 @@ class LollmsDiscussion:
1249
1400
  prompt_for_chat = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
1250
1401
  ASCIIColors.cyan("\n" + "="*50 + f"\n--- DEBUG: SIMPLE CHAT PROMPT ---\n{prompt_for_chat}\n" + "="*50 + "\n")
1251
1402
 
1252
- final_raw_response = self.lollmsClient.chat(self, images=images, **kwargs) or ""
1403
+ final_raw_response = self.lollmsClient.chat(self, images=images, branch_tip_id=branch_tip_id, **kwargs) or ""
1253
1404
 
1254
1405
  if debug:
1255
1406
  ASCIIColors.cyan("\n" + "="*50 + f"\n--- DEBUG: RAW SIMPLE CHAT RESPONSE ---\n{final_raw_response}\n" + "="*50 + "\n")
@@ -1299,10 +1450,8 @@ class LollmsDiscussion:
1299
1450
  def regenerate_branch(self, branch_tip_id: Optional[str] = None, **kwargs) -> Dict[str, Any]:
1300
1451
  """Regenerates the AI response for a given message or the active branch's AI response.
1301
1452
 
1302
- If the target is an AI message, it's deleted and its children are re-parented to its parent
1303
- (the user message). A new AI response is then generated from that user message.
1304
- If the target is a user message, all its existing AI children are deleted, and their
1305
- descendants are re-parented to the user message. A new AI response is then generated.
1453
+ Instead of deleting the old response, this method simply starts a new generation
1454
+ from the parent message, creating a new branch (sibling to the original response).
1306
1455
 
1307
1456
  Args:
1308
1457
  branch_tip_id (Optional[str]): The ID of the message to regenerate from.
@@ -1333,23 +1482,6 @@ class LollmsDiscussion:
1333
1482
  else:
1334
1483
  raise ValueError(f"Regeneration failed: Target message '{target_id}' is of an unexpected sender type '{target_message_orm.sender_type}'.")
1335
1484
 
1336
- ai_messages_to_overwrite_ids = set()
1337
- if target_message_orm.sender_type == 'assistant':
1338
- # If target is an AI, we only remove this specific AI message.
1339
- ai_messages_to_overwrite_ids.add(target_message_orm.id)
1340
- elif target_message_orm.sender_type == 'user':
1341
- # If target is a user, we remove ALL AI children of this user message.
1342
- for msg_obj in self._db_discussion.messages:
1343
- if msg_obj.parent_id == user_msg_to_regenerate_from.id and msg_obj.sender_type == 'assistant':
1344
- ai_messages_to_overwrite_ids.add(msg_obj.id)
1345
-
1346
- if not ai_messages_to_overwrite_ids:
1347
- ASCIIColors.warning(f"No AI messages found to regenerate from '{target_id}'. This might be unintended.")
1348
- # If no AI messages to overwrite, just proceed with generation from user message.
1349
- # No changes to existing messages needed, so skip the cleanup phase.
1350
- self.active_branch_id = user_msg_to_regenerate_from.id # Ensure active branch is correct for chat
1351
- return self.chat(user_message="", add_user_message=False, branch_tip_id=user_msg_to_regenerate_from.id, **kwargs)
1352
-
1353
1485
  # --- Phase 1: Generate new AI response ---
1354
1486
  # The user message for the new generation is user_msg_to_regenerate_from
1355
1487
  self.active_branch_id = user_msg_to_regenerate_from.id
@@ -140,7 +140,6 @@ class LollmsMCPBindingManager:
140
140
  if binding_class_to_instantiate:
141
141
  try:
142
142
  return binding_class_to_instantiate(
143
- binding_name=binding_name,
144
143
  **kwargs
145
144
  )
146
145
  except Exception as e:
@@ -88,7 +88,7 @@ class LollmsSTTBindingManager:
88
88
  "service_key": service_key,
89
89
  "verify_ssl_certificate": verify_ssl_certificate
90
90
  })
91
- return binding_class(binding_name=binding_name, **kwargs)
91
+ return binding_class(**kwargs)
92
92
  except Exception as e:
93
93
  trace_exception(e)
94
94
  print(f"Failed to instantiate STT binding {binding_name}: {str(e)}")
@@ -105,7 +105,7 @@ class LollmsTTIBindingManager:
105
105
  binding_class = self.available_bindings.get(binding_name)
106
106
  if binding_class:
107
107
  try:
108
- return binding_class(binding_name=binding_name, **kwargs)
108
+ return binding_class(**kwargs)
109
109
  except Exception as e:
110
110
  trace_exception(e)
111
111
  print(f"Failed to instantiate TTI binding {binding_name}: {str(e)}")
@@ -77,7 +77,7 @@ class LollmsTTMBindingManager:
77
77
  binding_class = self.available_bindings.get(binding_name)
78
78
  if binding_class:
79
79
  try:
80
- return binding_class(binding_name=binding_name, **kwargs)
80
+ return binding_class(**kwargs)
81
81
  except Exception as e:
82
82
  trace_exception(e)
83
83
  print(f"Failed to instantiate TTM binding {binding_name}: {str(e)}")
@@ -75,7 +75,7 @@ class LollmsTTSBindingManager:
75
75
 
76
76
  binding_class = self.available_bindings.get(binding_name)
77
77
  if binding_class:
78
- return binding_class(binding_name=binding_name, **kwargs)
78
+ return binding_class(**kwargs)
79
79
  return None
80
80
 
81
81
 
@@ -76,7 +76,7 @@ class LollmsTTVBindingManager:
76
76
  binding_class = self.available_bindings.get(binding_name)
77
77
  if binding_class:
78
78
  try:
79
- return binding_class(binding_name=binding_name, **kwargs)
79
+ return binding_class(**kwargs)
80
80
  except Exception as e:
81
81
  trace_exception(e)
82
82
  print(f"Failed to instantiate TTV binding {binding_name}: {str(e)}")
@@ -0,0 +1,341 @@
1
+ import requests
2
+ import json
3
+ import base64
4
+ from pathlib import Path
5
+ from typing import Optional, List, Dict, Any, Union, Callable
6
+ from lollms_client.lollms_tti_binding import LollmsTTIBinding
7
+ from lollms_client.lollms_types import MSG_TYPE
8
+ from ascii_colors import ASCIIColors, trace_exception
9
+
10
+ BindingName = "OpenRouterTTIBinding"
11
+
12
+ class OpenRouterTTIBinding(LollmsTTIBinding):
13
+ def __init__(self, **kwargs):
14
+ # Prioritize 'model_name' but accept 'model' as an alias from config files.
15
+ if 'model' in kwargs and 'model_name' not in kwargs:
16
+ kwargs['model_name'] = kwargs.pop('model')
17
+
18
+ # The manager passes binding_name in kwargs. To avoid "multiple values for keyword argument",
19
+ # we pop it and use the local BindingName constant or the passed value.
20
+ binding_name = kwargs.pop('binding_name', BindingName)
21
+ super().__init__(binding_name=binding_name, **kwargs)
22
+
23
+ self.service_key = kwargs.get("service_key", "")
24
+ # Default to a known stable image-capable model
25
+ self.model_name = kwargs.get("model_name", "google/gemini-2.0-flash-exp:free")
26
+ self.host_address = "https://openrouter.ai/api/v1"
27
+ self.config = kwargs
28
+
29
+ def _get_aspect_ratio(self, width: int, height: int) -> str:
30
+ """Helper to map width/height to OpenRouter supported aspect ratios."""
31
+ ratio = width / height
32
+ if abs(ratio - 1.0) < 0.1: return "1:1"
33
+ if abs(ratio - 0.66) < 0.1: return "2:3"
34
+ if abs(ratio - 1.5) < 0.1: return "3:2"
35
+ if abs(ratio - 0.75) < 0.1: return "3:4"
36
+ if abs(ratio - 1.33) < 0.1: return "4:3"
37
+ if abs(ratio - 0.8) < 0.1: return "4:5"
38
+ if abs(ratio - 1.25) < 0.1: return "5:4"
39
+ if abs(ratio - 0.56) < 0.1: return "9:16"
40
+ if abs(ratio - 1.77) < 0.1: return "16:9"
41
+ if abs(ratio - 2.33) < 0.1: return "21:9"
42
+ return "1:1"
43
+
44
+ def generate_image(self,
45
+ prompt: str,
46
+ negative_prompt: str = "",
47
+ width: int = 1024,
48
+ height: int = 1024,
49
+ **kwargs) -> bytes:
50
+ """
51
+ Generates an image using Open Router's /chat/completions endpoint with modalities.
52
+ """
53
+ model = kwargs.get("model_name", self.model_name)
54
+
55
+ headers = {
56
+ "Authorization": f"Bearer {self.service_key}",
57
+ "Content-Type": "application/json",
58
+ "HTTP-Referer": "https://github.com/ParisNeo/lollms_client",
59
+ "X-Title": "LoLLMS Client"
60
+ }
61
+
62
+ # Open Router specific payload using Chat Completions
63
+ payload = {
64
+ "model": model,
65
+ "messages": [
66
+ {
67
+ "role": "user",
68
+ "content": prompt if not negative_prompt else f"{prompt}\n\nNegative prompt: {negative_prompt}"
69
+ }
70
+ ],
71
+ "modalities": ["image", "text"],
72
+ "stream": False
73
+ }
74
+
75
+ # Add image configuration if relevant
76
+ image_config = {}
77
+ aspect_ratio = kwargs.get("aspect_ratio")
78
+ if not aspect_ratio:
79
+ aspect_ratio = self._get_aspect_ratio(width, height)
80
+
81
+ image_config["aspect_ratio"] = aspect_ratio
82
+
83
+ if "image_size" in kwargs:
84
+ image_config["image_size"] = kwargs["image_size"]
85
+
86
+ payload["image_config"] = image_config
87
+
88
+ try:
89
+ ASCIIColors.info(f"Open Router TTI Request: Model={model}, Aspect Ratio={aspect_ratio}")
90
+ response = requests.post(
91
+ f"{self.host_address}/chat/completions",
92
+ headers=headers,
93
+ json=payload,
94
+ timeout=300
95
+ )
96
+ response.raise_for_status()
97
+
98
+ result = response.json()
99
+ if result.get("choices"):
100
+ message = result["choices"][0].get("message", {})
101
+ images = message.get("images", [])
102
+
103
+ if images:
104
+ image_url = images[0]["image_url"]["url"]
105
+ if image_url.startswith("data:image"):
106
+ base64_str = image_url.split(",")[1]
107
+ return base64.b64decode(base64_str)
108
+ else:
109
+ img_res = requests.get(image_url)
110
+ img_res.raise_for_status()
111
+ return img_res.content
112
+
113
+ raise ValueError(f"No image found in Open Router response: {result}")
114
+
115
+ except Exception as e:
116
+ ASCIIColors.error(f"Open Router TTI Error: {e}")
117
+ trace_exception(e)
118
+ return None
119
+
120
+ def list_models(self) -> List[Dict[str, Any]]:
121
+ """
122
+ Lists available models from Open Router and filters for TTI capabilities.
123
+ """
124
+ try:
125
+ response = requests.get(f"{self.host_address}/models")
126
+ response.raise_for_status()
127
+ models = response.json().get("data", [])
128
+
129
+ filtered_models = []
130
+ # Curated list of keywords that identify Image Generation models on OpenRouter
131
+ tti_keywords = ["flux", "dall-e", "imagen", "stable-diffusion", "midjourney", "riverflow"]
132
+
133
+ for m in models:
134
+ m_id = m.get("id", "").lower()
135
+ description = m.get("description", "").lower()
136
+ # OpenRouter sometimes provides output_modalities in the architecture block
137
+ modality = m.get("architecture", {}).get("modality", "text")
138
+
139
+ # Check if it's explicitly an image model or matches TTI keywords
140
+ is_tti = "image" in modality or \
141
+ any(kw in m_id for kw in tti_keywords) or \
142
+ "generate images" in description or \
143
+ "text-to-image" in description
144
+
145
+ if is_tti:
146
+ filtered_models.append({
147
+ "model_name": m.get("id"),
148
+ "display_name": m.get("name"),
149
+ "description": m.get("description", "Image generation model")
150
+ })
151
+
152
+ # Sort by name for easier navigation
153
+ filtered_models = sorted(filtered_models, key=lambda x: x["display_name"])
154
+
155
+ # Hardcoded fallbacks if API discovery fails or to ensure core models are present
156
+ if not filtered_models:
157
+ return [
158
+ {"model_name": "google/gemini-3-pro-image-preview", "display_name": "Gemini 3.0 Pro image"},
159
+ {"model_name": "black-forest-labs/flux.1-pro", "display_name": "FLUX.1 Pro"},
160
+ {"model_name": "openai/gpt-5-image", "display_name": "gpt-5-image"}
161
+ ]
162
+
163
+ return filtered_models
164
+ except Exception as e:
165
+ ASCIIColors.error(f"Failed to list Open Router models: {e}")
166
+ return []
167
+
168
+ def list_services(self, **kwargs) -> List[Dict[str, str]]:
169
+ return [{"name": "Open Router TTI", "id": "open_router"}]
170
+
171
+ def get_settings(self, **kwargs) -> Optional[Dict[str, Any]]:
172
+ return self.config
173
+
174
+ def set_settings(self, settings: Dict[str, Any], **kwargs) -> bool:
175
+ self.config.update(settings)
176
+ if "service_key" in settings:
177
+ self.service_key = settings["service_key"]
178
+ if "model_name" in settings:
179
+ self.model_name = settings["model_name"]
180
+ return True
181
+
182
+ def edit_image(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], prompt: str, **kwargs) -> bytes:
183
+
184
+ model = kwargs.get("model_name", self.model_name)
185
+ negative_prompt = kwargs.get("negative_prompt", "")
186
+ width = kwargs.get("width", 1024)
187
+ height = kwargs.get("height", 1024)
188
+
189
+ headers = {
190
+ "Authorization": f"Bearer {self.service_key}",
191
+ "Content-Type": "application/json",
192
+ "HTTP-Referer": "https://github.com/ParisNeo/lollms_client",
193
+ "X-Title": "LoLLMS Client"
194
+ }
195
+
196
+ # Convert images to base64 data URLs
197
+ if not isinstance(images, list):
198
+ images = [images]
199
+
200
+ image_contents = []
201
+ for img in images:
202
+ if isinstance(img, str):
203
+ # Check if it's already a data URL
204
+ if img.startswith('data:image'):
205
+ image_contents.append({
206
+ "type": "image_url",
207
+ "image_url": {"url": img}
208
+ })
209
+ # Check if it's a regular URL
210
+ elif img.startswith(('http://', 'https://')):
211
+ image_contents.append({
212
+ "type": "image_url",
213
+ "image_url": {"url": img}
214
+ })
215
+ # Check if it's base64 data without the data URL prefix
216
+ elif len(img) > 100 and not '/' in img[:50] and not '\\' in img[:50]:
217
+ # Assume it's raw base64 data
218
+ image_contents.append({
219
+ "type": "image_url",
220
+ "image_url": {"url": f"data:image/png;base64,{img}"}
221
+ })
222
+ else:
223
+ # Assume it's a local file path
224
+ try:
225
+ with open(img, 'rb') as f:
226
+ img_data = base64.b64encode(f.read()).decode('utf-8')
227
+ image_contents.append({
228
+ "type": "image_url",
229
+ "image_url": {"url": f"data:image/jpeg;base64,{img_data}"}
230
+ })
231
+ except FileNotFoundError:
232
+ ASCIIColors.warning(f"Could not find file: {img[:50]}... treating as base64 data")
233
+ # If file not found, treat as base64
234
+ image_contents.append({
235
+ "type": "image_url",
236
+ "image_url": {"url": f"data:image/png;base64,{img}"}
237
+ })
238
+ else:
239
+ # PIL Image object
240
+ from io import BytesIO
241
+ buffer = BytesIO()
242
+ img.save(buffer, format='PNG')
243
+ img_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
244
+ image_contents.append({
245
+ "type": "image_url",
246
+ "image_url": {"url": f"data:image/png;base64,{img_data}"}
247
+ })
248
+
249
+ # Build message content with images and text
250
+ content = image_contents + [{
251
+ "type": "text",
252
+ "text": prompt if not negative_prompt else f"{prompt}\n\nNegative prompt: {negative_prompt}"
253
+ }]
254
+
255
+ # Open Router specific payload using Chat Completions
256
+ payload = {
257
+ "model": model,
258
+ "messages": [
259
+ {
260
+ "role": "user",
261
+ "content": content
262
+ }
263
+ ],
264
+ "modalities": ["image", "text"],
265
+ "stream": False
266
+ }
267
+
268
+ # Add image configuration if relevant
269
+ image_config = {}
270
+ aspect_ratio = kwargs.get("aspect_ratio")
271
+ if not aspect_ratio:
272
+ aspect_ratio = self._get_aspect_ratio(width, height)
273
+
274
+ image_config["aspect_ratio"] = aspect_ratio
275
+
276
+ if "image_size" in kwargs:
277
+ image_config["image_size"] = kwargs["image_size"]
278
+
279
+ payload["image_config"] = image_config
280
+
281
+ try:
282
+ ASCIIColors.info(f"Open Router Image Edit Request: Model={model}, Aspect Ratio={aspect_ratio}, Images={len(images)}")
283
+ response = requests.post(
284
+ f"{self.host_address}/chat/completions",
285
+ headers=headers,
286
+ json=payload,
287
+ timeout=300
288
+ )
289
+ response.raise_for_status()
290
+
291
+ result = response.json()
292
+ if result.get("choices"):
293
+ choice = result["choices"][0]
294
+
295
+ # Check if there's an error in the choice
296
+ if "error" in choice:
297
+ error_info = choice["error"]
298
+ error_msg = error_info.get("message", "Unknown error")
299
+ error_code = error_info.get("code", "unknown")
300
+ raise ValueError(f"API Error ({error_code}): {error_msg}")
301
+
302
+ message = choice.get("message", {})
303
+
304
+ # Check if there are images in the response
305
+ response_images = message.get("images", [])
306
+ if response_images:
307
+ image_url = response_images[0]["image_url"]["url"]
308
+ if image_url.startswith("data:image"):
309
+ base64_str = image_url.split(",")[1]
310
+ return base64.b64decode(base64_str)
311
+ else:
312
+ img_res = requests.get(image_url)
313
+ img_res.raise_for_status()
314
+ return img_res.content
315
+
316
+ # If no images, check if there's a text response (refusal or error)
317
+ content = message.get("content", "")
318
+ if content:
319
+ raise ValueError(f"Model returned text instead of image: {content}")
320
+
321
+ raise ValueError(f"No image found in Open Router response: {result}")
322
+
323
+ except ValueError as ve:
324
+ # Re-raise ValueError with the message for user feedback
325
+ ASCIIColors.error(f"Open Router Image Edit Error: {ve}")
326
+ raise
327
+ except Exception as e:
328
+ ASCIIColors.error(f"Open Router Image Edit Error: {e}")
329
+ trace_exception(e)
330
+ return None
331
+
332
+ if __name__ == "__main__":
333
+ import os
334
+ key = os.getenv("OPENROUTER_API_KEY", "")
335
+ if not key:
336
+ print("Please set OPENROUTER_API_KEY env var.")
337
+ else:
338
+ binding = OpenRouterTTIBinding(service_key=key)
339
+ img = binding.generate_image("A cute robot painting a picture")
340
+ if img:
341
+ with open("output.png", "wb") as f: f.write(img)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.7.13
3
+ Version: 1.8.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License