lollms-client 1.4.0__py3-none-any.whl → 1.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/novita_ai/__init__.py +303 -0
- lollms_client/llm_bindings/perplexity/__init__.py +326 -0
- lollms_client/lollms_core.py +678 -78
- lollms_client/lollms_discussion.py +13 -151
- lollms_client/tti_bindings/diffusers/__init__.py +34 -12
- lollms_client/tti_bindings/leonardo_ai/__init__.py +124 -0
- lollms_client/tti_bindings/novita_ai/__init__.py +102 -0
- lollms_client/tti_bindings/stability_ai/__init__.py +176 -0
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.5.dist-info}/METADATA +1 -1
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.5.dist-info}/RECORD +14 -9
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.5.dist-info}/WHEEL +0 -0
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.5.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.5.dist-info}/top_level.txt +0 -0
|
@@ -1707,21 +1707,25 @@ class LollmsDiscussion:
|
|
|
1707
1707
|
)
|
|
1708
1708
|
|
|
1709
1709
|
if memory_json and memory_json.get("title") and memory_json.get("content"):
|
|
1710
|
-
title = memory_json["title"]
|
|
1711
|
-
self.add_memory(
|
|
1712
|
-
title=title,
|
|
1713
|
-
content=memory_json["content"]
|
|
1714
|
-
)
|
|
1715
|
-
# Automatically load the newly created memory into the context
|
|
1716
|
-
self.load_memory_into_context(title)
|
|
1717
1710
|
print(f"[INFO] Memorize: New memory created and loaded into context: '{title}'.")
|
|
1711
|
+
return memory_json
|
|
1718
1712
|
else:
|
|
1719
1713
|
print("[WARNING] Memorize: Failed to generate a valid memory from the discussion.")
|
|
1720
|
-
|
|
1714
|
+
return None
|
|
1721
1715
|
except Exception as e:
|
|
1722
1716
|
trace_exception(e)
|
|
1723
1717
|
print(f"[ERROR] Memorize: Failed to create memory. {e}")
|
|
1724
1718
|
|
|
1719
|
+
def set_memory(self, memory_text: str):
|
|
1720
|
+
"""Sets the discussion's memory content.
|
|
1721
|
+
This memory is included in the system context during exports and can be
|
|
1722
|
+
used to provide background information or retain important details across turns.
|
|
1723
|
+
Args:
|
|
1724
|
+
memory_text: The text to set as the discussion's memory.
|
|
1725
|
+
"""
|
|
1726
|
+
self.memory = memory_text.strip()
|
|
1727
|
+
self.touch()
|
|
1728
|
+
|
|
1725
1729
|
def count_discussion_tokens(self, format_type: str, branch_tip_id: Optional[str] = None) -> int:
|
|
1726
1730
|
"""Counts the number of tokens in the exported discussion content.
|
|
1727
1731
|
|
|
@@ -2363,7 +2367,7 @@ class LollmsDiscussion:
|
|
|
2363
2367
|
|
|
2364
2368
|
return self.add_artefact(
|
|
2365
2369
|
title, content=new_content, images=new_images,
|
|
2366
|
-
audios=latest_artefact.get("audios", []),
|
|
2370
|
+
audios=latest_artefact.get("audios", []),videos=latest_artefact.get("videos", []),
|
|
2367
2371
|
zip_content=latest_artefact.get("zip"), version=latest_version + 1, **extra_data
|
|
2368
2372
|
)
|
|
2369
2373
|
|
|
@@ -2489,148 +2493,6 @@ class LollmsDiscussion:
|
|
|
2489
2493
|
|
|
2490
2494
|
return removed_count
|
|
2491
2495
|
|
|
2492
|
-
# Memories management system
|
|
2493
|
-
def list_memories(self) -> List[Dict[str, Any]]:
|
|
2494
|
-
"""
|
|
2495
|
-
Lists all memories stored in the discussion's metadata.
|
|
2496
|
-
"""
|
|
2497
|
-
metadata = self.metadata or {}
|
|
2498
|
-
memories = metadata.get("_memories", [])
|
|
2499
|
-
now = datetime.utcnow().isoformat()
|
|
2500
|
-
|
|
2501
|
-
upgraded = []
|
|
2502
|
-
dirty = False
|
|
2503
|
-
for memory in memories:
|
|
2504
|
-
fixed = memory.copy()
|
|
2505
|
-
if "title" not in fixed: fixed["title"] = "untitled"; dirty = True
|
|
2506
|
-
if "content" not in fixed: fixed["content"] = ""; dirty = True
|
|
2507
|
-
if "created_at" not in fixed: fixed["created_at"] = now; dirty = True
|
|
2508
|
-
|
|
2509
|
-
section_start = f"--- Memory: {fixed['title']} ---"
|
|
2510
|
-
fixed["is_loaded"] = section_start in (self.memory or "")
|
|
2511
|
-
upgraded.append(fixed)
|
|
2512
|
-
|
|
2513
|
-
if dirty:
|
|
2514
|
-
metadata["_memories"] = upgraded
|
|
2515
|
-
self.metadata = metadata
|
|
2516
|
-
self.commit()
|
|
2517
|
-
|
|
2518
|
-
return upgraded
|
|
2519
|
-
|
|
2520
|
-
def add_memory(self, title: str, content: str, **extra_data) -> Dict[str, Any]:
|
|
2521
|
-
"""
|
|
2522
|
-
Adds or overwrites a memory in the discussion.
|
|
2523
|
-
"""
|
|
2524
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2525
|
-
memories = new_metadata.get("_memories", [])
|
|
2526
|
-
|
|
2527
|
-
memories = [m for m in memories if m.get('title') != title]
|
|
2528
|
-
|
|
2529
|
-
new_memory = {
|
|
2530
|
-
"title": title, "content": content,
|
|
2531
|
-
"created_at": datetime.utcnow().isoformat(),
|
|
2532
|
-
**extra_data
|
|
2533
|
-
}
|
|
2534
|
-
memories.append(new_memory)
|
|
2535
|
-
|
|
2536
|
-
new_metadata["_memories"] = memories
|
|
2537
|
-
self.metadata = new_metadata
|
|
2538
|
-
self.commit()
|
|
2539
|
-
return new_memory
|
|
2540
|
-
|
|
2541
|
-
def get_memory(self, title: str) -> Optional[Dict[str, Any]]:
|
|
2542
|
-
"""
|
|
2543
|
-
Retrieves a memory by title.
|
|
2544
|
-
"""
|
|
2545
|
-
memories = self.list_memories()
|
|
2546
|
-
return next((m for m in memories if m.get('title') == title), None)
|
|
2547
|
-
|
|
2548
|
-
def load_memory_into_context(self, title: str):
|
|
2549
|
-
"""
|
|
2550
|
-
Loads a memory's content into the long-term memory context.
|
|
2551
|
-
"""
|
|
2552
|
-
memory = self.get_memory(title)
|
|
2553
|
-
if not memory:
|
|
2554
|
-
raise ValueError(f"Memory '{title}' not found.")
|
|
2555
|
-
|
|
2556
|
-
if memory.get('content'):
|
|
2557
|
-
section = (
|
|
2558
|
-
f"--- Memory: {memory['title']} ---\n"
|
|
2559
|
-
f"{memory['content']}\n"
|
|
2560
|
-
f"--- End Memory: {memory['title']} ---\n\n"
|
|
2561
|
-
)
|
|
2562
|
-
if section not in (self.memory or ""):
|
|
2563
|
-
current_memory_zone = self.memory or ""
|
|
2564
|
-
self.memory = current_memory_zone.rstrip() + "\n\n" + section
|
|
2565
|
-
self.touch()
|
|
2566
|
-
self.commit()
|
|
2567
|
-
print(f"Loaded memory '{title}' into context.")
|
|
2568
|
-
|
|
2569
|
-
def unload_memory_from_context(self, title: str):
|
|
2570
|
-
"""
|
|
2571
|
-
Removes a memory's content from the long-term memory context.
|
|
2572
|
-
"""
|
|
2573
|
-
memory = self.get_memory(title)
|
|
2574
|
-
if not memory:
|
|
2575
|
-
raise ValueError(f"Memory '{title}' not found.")
|
|
2576
|
-
|
|
2577
|
-
if self.memory and memory.get('content'):
|
|
2578
|
-
section_start = f"--- Memory: {memory['title']} ---"
|
|
2579
|
-
pattern = rf"\n*\s*{re.escape(section_start)}.*?--- End Memory: {re.escape(memory['title'])} ---\s*\n*"
|
|
2580
|
-
self.memory = re.sub(pattern, "", self.memory, flags=re.DOTALL).strip()
|
|
2581
|
-
self.touch()
|
|
2582
|
-
self.commit()
|
|
2583
|
-
print(f"Unloaded memory '{title}' from context.")
|
|
2584
|
-
|
|
2585
|
-
def is_memory_loaded(self, title: str) -> bool:
|
|
2586
|
-
"""
|
|
2587
|
-
Checks if a memory is currently loaded in the long-term memory context.
|
|
2588
|
-
"""
|
|
2589
|
-
memory = self.get_memory(title)
|
|
2590
|
-
if not memory:
|
|
2591
|
-
return False
|
|
2592
|
-
|
|
2593
|
-
section_start = f"--- Memory: {memory['title']} ---"
|
|
2594
|
-
return section_start in (self.memory or "")
|
|
2595
|
-
|
|
2596
|
-
def purge_memories(self) -> bool:
|
|
2597
|
-
"""
|
|
2598
|
-
Removes all memories from the discussion.
|
|
2599
|
-
|
|
2600
|
-
Returns:
|
|
2601
|
-
The number of memories removed (0 or 1).
|
|
2602
|
-
"""
|
|
2603
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2604
|
-
new_metadata["_memories"] = []
|
|
2605
|
-
self.metadata = new_metadata
|
|
2606
|
-
self.commit()
|
|
2607
|
-
print(f"Removed memory titled.")
|
|
2608
|
-
return True
|
|
2609
|
-
|
|
2610
|
-
def remove_memory(self, title: str) -> int:
|
|
2611
|
-
"""
|
|
2612
|
-
Removes a memory by title.
|
|
2613
|
-
|
|
2614
|
-
Returns:
|
|
2615
|
-
The number of memories removed (0 or 1).
|
|
2616
|
-
"""
|
|
2617
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2618
|
-
memories = new_metadata.get("_memories", [])
|
|
2619
|
-
if not memories:
|
|
2620
|
-
return 0
|
|
2621
|
-
|
|
2622
|
-
initial_count = len(memories)
|
|
2623
|
-
kept_memories = [m for m in memories if m.get('title') != title]
|
|
2624
|
-
|
|
2625
|
-
if len(kept_memories) < initial_count:
|
|
2626
|
-
new_metadata["_memories"] = kept_memories
|
|
2627
|
-
self.metadata = new_metadata
|
|
2628
|
-
self.commit()
|
|
2629
|
-
print(f"Removed memory titled '{title}'.")
|
|
2630
|
-
return 1
|
|
2631
|
-
|
|
2632
|
-
return 0
|
|
2633
|
-
|
|
2634
2496
|
def clone_without_messages(self) -> 'LollmsDiscussion':
|
|
2635
2497
|
"""
|
|
2636
2498
|
Creates a new discussion with the same context but no message history.
|
|
@@ -131,7 +131,28 @@ CIVITAI_MODELS = {
|
|
|
131
131
|
"filename": "papercut.safetensors",
|
|
132
132
|
"description": "Paper cutout SD1.5.",
|
|
133
133
|
"owned_by": "civitai"
|
|
134
|
-
}
|
|
134
|
+
},
|
|
135
|
+
"fantassifiedIcons": {
|
|
136
|
+
"display_name": "Fantassified Icons",
|
|
137
|
+
"url": "https://civitai.com/api/download/models/67584?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
138
|
+
"filename": "fantassifiedIcons_fantassifiedIconsV20.safetensors",
|
|
139
|
+
"description": "Flat, modern Icons.",
|
|
140
|
+
"owned_by": "civitai"
|
|
141
|
+
},
|
|
142
|
+
"game_icon_institute": {
|
|
143
|
+
"display_name": "Game icon institute",
|
|
144
|
+
"url": "https://civitai.com/api/download/models/158776?type=Model&format=SafeTensor&size=full&fp=fp16",
|
|
145
|
+
"filename": "gameIconInstituteV10_v10.safetensors",
|
|
146
|
+
"description": "Flat, modern game Icons.",
|
|
147
|
+
"owned_by": "civitai"
|
|
148
|
+
},
|
|
149
|
+
"M4RV3LS_DUNGEONS": {
|
|
150
|
+
"display_name": "M4RV3LS & DUNGEONS",
|
|
151
|
+
"url": "https://civitai.com/api/download/models/139417?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
152
|
+
"filename": "M4RV3LSDUNGEONSNEWV40COMICS_mD40.safetensors",
|
|
153
|
+
"description": "comics.",
|
|
154
|
+
"owned_by": "civitai"
|
|
155
|
+
},
|
|
135
156
|
}
|
|
136
157
|
|
|
137
158
|
TORCH_DTYPE_MAP_STR_TO_OBJ = {
|
|
@@ -453,8 +474,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
453
474
|
"safety_checker_on": True,
|
|
454
475
|
"num_inference_steps": 25,
|
|
455
476
|
"guidance_scale": 7.0,
|
|
456
|
-
"
|
|
457
|
-
"
|
|
477
|
+
"width": 512,
|
|
478
|
+
"height": 512,
|
|
458
479
|
"seed": -1,
|
|
459
480
|
"enable_cpu_offload": False,
|
|
460
481
|
"enable_sequential_cpu_offload": False,
|
|
@@ -484,6 +505,7 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
484
505
|
self.config = self.DEFAULT_CONFIG.copy()
|
|
485
506
|
self.config.update(kwargs)
|
|
486
507
|
self.model_name = self.config.get("model_name", "")
|
|
508
|
+
|
|
487
509
|
models_path_str = kwargs.get("models_path", str(Path(__file__).parent / "models"))
|
|
488
510
|
self.models_path = Path(models_path_str)
|
|
489
511
|
self.models_path.mkdir(parents=True, exist_ok=True)
|
|
@@ -600,11 +622,11 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
600
622
|
generator = self._prepare_seed(kwargs)
|
|
601
623
|
pipeline_args = {
|
|
602
624
|
"prompt": prompt,
|
|
603
|
-
"negative_prompt": negative_prompt or
|
|
604
|
-
"width": width if width is not None else self.config
|
|
605
|
-
"height": height if height is not None else self.config
|
|
606
|
-
"num_inference_steps": kwargs.pop("num_inference_steps", self.config
|
|
607
|
-
"guidance_scale": kwargs.pop("guidance_scale", self.config
|
|
625
|
+
"negative_prompt": negative_prompt or self.config.get("negative_prompt", ""),
|
|
626
|
+
"width": width if width is not None else self.config.get("width", 512),
|
|
627
|
+
"height": height if height is not None else self.config.get("height", 512),
|
|
628
|
+
"num_inference_steps": kwargs.pop("num_inference_steps", self.config.get("num_inference_steps",25)),
|
|
629
|
+
"guidance_scale": kwargs.pop("guidance_scale", self.config.get("guidance_scale",6.5)),
|
|
608
630
|
"generator": generator
|
|
609
631
|
}
|
|
610
632
|
pipeline_args.update(kwargs)
|
|
@@ -646,8 +668,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
646
668
|
self._acquire_manager()
|
|
647
669
|
imgs = [images] if isinstance(images, str) else list(images)
|
|
648
670
|
pil_images = [self._decode_image_input(s) for s in imgs]
|
|
649
|
-
out_w = width if width is not None else self.config["
|
|
650
|
-
out_h = height if height is not None else self.config["
|
|
671
|
+
out_w = width if width is not None else self.config["width"]
|
|
672
|
+
out_h = height if height is not None else self.config["height"]
|
|
651
673
|
generator = self._prepare_seed(kwargs)
|
|
652
674
|
steps = kwargs.pop("num_inference_steps", self.config["num_inference_steps"])
|
|
653
675
|
guidance = kwargs.pop("guidance_scale", self.config["guidance_scale"])
|
|
@@ -756,8 +778,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
756
778
|
{"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
|
|
757
779
|
{"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload."},
|
|
758
780
|
{"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
|
|
759
|
-
{"name": "
|
|
760
|
-
{"name": "
|
|
781
|
+
{"name": "width", "type": "int", "value": self.config["width"], "description": "Default image width."},
|
|
782
|
+
{"name": "height", "type": "int", "value": self.config["height"], "description": "Default image height."},
|
|
761
783
|
{"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
|
|
762
784
|
{"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
|
|
763
785
|
{"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import requests
|
|
3
|
+
import time
|
|
4
|
+
import base64
|
|
5
|
+
from io import BytesIO
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, List, Dict, Any, Union
|
|
8
|
+
|
|
9
|
+
from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
10
|
+
from ascii_colors import trace_exception, ASCIIColors
|
|
11
|
+
import pipmaster as pm
|
|
12
|
+
|
|
13
|
+
pm.ensure_packages(["requests", "Pillow"])
|
|
14
|
+
from PIL import Image
|
|
15
|
+
|
|
16
|
+
BindingName = "LeonardoAITTIBinding"
|
|
17
|
+
|
|
18
|
+
# Sourced from https://docs.leonardo.ai/docs/models
|
|
19
|
+
LEONARDO_AI_MODELS = [
|
|
20
|
+
{"model_name": "ac4f3991-8a40-42cd-b174-14a8e33738e4", "display_name": "Leonardo Phoenix", "description": "Fast, high-quality photorealism."},
|
|
21
|
+
{"model_name": "1e65d070-22c9-4aed-a5be-ce58a1b65b38", "display_name": "Leonardo Diffusion XL", "description": "The flagship general-purpose SDXL model."},
|
|
22
|
+
{"model_name": "b24e16ff-06e3-43eb-a255-db4322b0f345", "display_name": "AlbedoBase XL", "description": "Versatile model for photorealism and artistic styles."},
|
|
23
|
+
{"model_name": "6bef9f1b-29cb-40c7-b9df-32b51c1f67d3", "display_name": "Absolute Reality v1.6", "description": "Classic photorealistic model."},
|
|
24
|
+
{"model_name": "f3296a34-a868-4665-8b2f-f4313f8c8533", "display_name": "RPG v5", "description": "Specialized in RPG characters and assets."},
|
|
25
|
+
{"model_name": "2067ae58-a02e-4318-9742-2b55b2a4c813", "display_name": "DreamShaper v7", "description": "Popular versatile artistic model."},
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
class LeonardoAITTIBinding(LollmsTTIBinding):
|
|
29
|
+
"""Leonardo.ai TTI binding for LoLLMS"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, **kwargs):
|
|
32
|
+
super().__init__(binding_name=BindingName)
|
|
33
|
+
self.config = kwargs
|
|
34
|
+
self.api_key = self.config.get("api_key") or os.environ.get("LEONARDO_API_KEY")
|
|
35
|
+
if not self.api_key:
|
|
36
|
+
raise ValueError("Leonardo.ai API key is required.")
|
|
37
|
+
self.model_name = self.config.get("model_name", "ac4f3991-8a40-42cd-b174-14a8e33738e4")
|
|
38
|
+
self.base_url = "https://cloud.leonardo.ai/api/rest/v1"
|
|
39
|
+
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
40
|
+
|
|
41
|
+
def listModels(self) -> list:
|
|
42
|
+
# You could also fetch this dynamically from /models endpoint
|
|
43
|
+
return LEONARDO_AI_MODELS
|
|
44
|
+
|
|
45
|
+
def _wait_for_generation(self, generation_id: str) -> List[bytes]:
|
|
46
|
+
while True:
|
|
47
|
+
url = f"{self.base_url}/generations/{generation_id}"
|
|
48
|
+
response = requests.get(url, headers=self.headers)
|
|
49
|
+
response.raise_for_status()
|
|
50
|
+
data = response.json().get("generations_by_pk", {})
|
|
51
|
+
status = data.get("status")
|
|
52
|
+
|
|
53
|
+
if status == "COMPLETE":
|
|
54
|
+
ASCIIColors.green("Generation complete.")
|
|
55
|
+
images_data = []
|
|
56
|
+
for img in data.get("generated_images", []):
|
|
57
|
+
img_url = img.get("url")
|
|
58
|
+
if img_url:
|
|
59
|
+
img_response = requests.get(img_url)
|
|
60
|
+
img_response.raise_for_status()
|
|
61
|
+
images_data.append(img_response.content)
|
|
62
|
+
return images_data
|
|
63
|
+
elif status == "FAILED":
|
|
64
|
+
raise Exception("Leonardo.ai generation failed.")
|
|
65
|
+
else:
|
|
66
|
+
ASCIIColors.info(f"Generation status: {status}. Waiting...")
|
|
67
|
+
time.sleep(3)
|
|
68
|
+
|
|
69
|
+
def generate_image(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
|
|
70
|
+
url = f"{self.base_url}/generations"
|
|
71
|
+
payload = {
|
|
72
|
+
"prompt": prompt,
|
|
73
|
+
"negative_prompt": negative_prompt,
|
|
74
|
+
"modelId": self.model_name,
|
|
75
|
+
"width": width,
|
|
76
|
+
"height": height,
|
|
77
|
+
"num_images": 1,
|
|
78
|
+
"guidance_scale": kwargs.get("guidance_scale", 7),
|
|
79
|
+
"seed": kwargs.get("seed"),
|
|
80
|
+
"sd_version": "SDXL" # Most models are SDXL based
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
ASCIIColors.info(f"Submitting generation job to Leonardo.ai ({self.model_name})...")
|
|
85
|
+
response = requests.post(url, json=payload, headers=self.headers)
|
|
86
|
+
response.raise_for_status()
|
|
87
|
+
generation_id = response.json()["sdGenerationJob"]["generationId"]
|
|
88
|
+
ASCIIColors.info(f"Job submitted with ID: {generation_id}")
|
|
89
|
+
images = self._wait_for_generation(generation_id)
|
|
90
|
+
return images[0]
|
|
91
|
+
except Exception as e:
|
|
92
|
+
trace_exception(e)
|
|
93
|
+
try:
|
|
94
|
+
error_msg = response.json()
|
|
95
|
+
raise Exception(f"Leonardo.ai API error: {error_msg}")
|
|
96
|
+
except:
|
|
97
|
+
raise Exception(f"Leonardo.ai API request failed: {e}")
|
|
98
|
+
|
|
99
|
+
def edit_image(self, **kwargs) -> bytes:
|
|
100
|
+
ASCIIColors.warning("Leonardo.ai edit_image (inpainting/img2img) is not yet implemented in this binding.")
|
|
101
|
+
raise NotImplementedError("This binding does not yet support image editing.")
|
|
102
|
+
|
|
103
|
+
if __name__ == '__main__':
|
|
104
|
+
ASCIIColors.magenta("--- Leonardo.ai TTI Binding Test ---")
|
|
105
|
+
if "LEONARDO_API_KEY" not in os.environ:
|
|
106
|
+
ASCIIColors.error("LEONARDO_API_KEY environment variable not set. Cannot run test.")
|
|
107
|
+
exit(1)
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
binding = LeonardoAITTIBinding()
|
|
111
|
+
|
|
112
|
+
ASCIIColors.cyan("\n--- Test: Text-to-Image ---")
|
|
113
|
+
prompt = "A majestic lion wearing a crown, hyperrealistic, 8k"
|
|
114
|
+
img_bytes = binding.generate_image(prompt, width=1024, height=1024)
|
|
115
|
+
|
|
116
|
+
assert len(img_bytes) > 1000
|
|
117
|
+
output_path = Path(__file__).parent / "tmp_leonardo_t2i.png"
|
|
118
|
+
with open(output_path, "wb") as f:
|
|
119
|
+
f.write(img_bytes)
|
|
120
|
+
ASCIIColors.green(f"Text-to-Image generation OK. Image saved to {output_path}")
|
|
121
|
+
|
|
122
|
+
except Exception as e:
|
|
123
|
+
trace_exception(e)
|
|
124
|
+
ASCIIColors.error(f"Leonardo.ai binding test failed: {e}")
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import requests
|
|
3
|
+
import base64
|
|
4
|
+
from io import BytesIO
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional, List, Dict, Any, Union
|
|
7
|
+
|
|
8
|
+
from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
9
|
+
from ascii_colors import trace_exception, ASCIIColors
|
|
10
|
+
import pipmaster as pm
|
|
11
|
+
|
|
12
|
+
pm.ensure_packages(["requests"])
|
|
13
|
+
|
|
14
|
+
BindingName = "NovitaAITTIBinding"
|
|
15
|
+
|
|
16
|
+
# Sourced from https://docs.novita.ai/image-generation/models
|
|
17
|
+
NOVITA_AI_MODELS = [
|
|
18
|
+
{"model_name": "sd_xl_base_1.0.safetensors", "display_name": "Stable Diffusion XL 1.0", "description": "Official SDXL 1.0 Base model."},
|
|
19
|
+
{"model_name": "dreamshaper_xl_1_0.safetensors", "display_name": "DreamShaper XL 1.0", "description": "Versatile artistic SDXL model."},
|
|
20
|
+
{"model_name": "juggernaut_xl_v9_rundiffusion.safetensors", "display_name": "Juggernaut XL v9", "description": "High-quality realistic and cinematic model."},
|
|
21
|
+
{"model_name": "realistic_vision_v5.1.safetensors", "display_name": "Realistic Vision v5.1", "description": "Popular photorealistic SD1.5 model."},
|
|
22
|
+
{"model_name": "absolutereality_v1.8.1.safetensors", "display_name": "Absolute Reality v1.8.1", "description": "General-purpose realistic SD1.5 model."},
|
|
23
|
+
{"model_name": "meinamix_meina_v11.safetensors", "display_name": "MeinaMix v11", "description": "High-quality anime illustration model."},
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
class NovitaAITTIBinding(LollmsTTIBinding):
|
|
27
|
+
"""Novita.ai TTI binding for LoLLMS"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, **kwargs):
|
|
30
|
+
super().__init__(binding_name=BindingName)
|
|
31
|
+
self.config = kwargs
|
|
32
|
+
self.api_key = self.config.get("api_key") or os.environ.get("NOVITA_API_KEY")
|
|
33
|
+
if not self.api_key:
|
|
34
|
+
raise ValueError("Novita.ai API key is required.")
|
|
35
|
+
self.model_name = self.config.get("model_name", "juggernaut_xl_v9_rundiffusion.safetensors")
|
|
36
|
+
self.base_url = "https://api.novita.ai/v3"
|
|
37
|
+
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
38
|
+
|
|
39
|
+
def listModels(self) -> list:
|
|
40
|
+
return NOVITA_AI_MODELS
|
|
41
|
+
|
|
42
|
+
def generate_image(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
|
|
43
|
+
url = f"{self.base_url}/text2img"
|
|
44
|
+
payload = {
|
|
45
|
+
"model_name": self.model_name,
|
|
46
|
+
"prompt": prompt,
|
|
47
|
+
"negative_prompt": negative_prompt,
|
|
48
|
+
"width": width,
|
|
49
|
+
"height": height,
|
|
50
|
+
"sampler_name": "DPM++ 2M Karras",
|
|
51
|
+
"cfg_scale": kwargs.get("guidance_scale", 7.0),
|
|
52
|
+
"steps": kwargs.get("num_inference_steps", 25),
|
|
53
|
+
"seed": kwargs.get("seed", -1),
|
|
54
|
+
"n_iter": 1,
|
|
55
|
+
"batch_size": 1
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
ASCIIColors.info(f"Requesting image from Novita.ai ({self.model_name})...")
|
|
60
|
+
response = requests.post(url, json=payload, headers=self.headers)
|
|
61
|
+
response.raise_for_status()
|
|
62
|
+
data = response.json()
|
|
63
|
+
if "images" not in data or not data["images"]:
|
|
64
|
+
raise Exception(f"API returned no images. Response: {data}")
|
|
65
|
+
|
|
66
|
+
b64_image = data["images"][0]["image_base64"]
|
|
67
|
+
return base64.b64decode(b64_image)
|
|
68
|
+
|
|
69
|
+
except Exception as e:
|
|
70
|
+
trace_exception(e)
|
|
71
|
+
try:
|
|
72
|
+
error_msg = response.json()
|
|
73
|
+
raise Exception(f"Novita.ai API error: {error_msg}")
|
|
74
|
+
except:
|
|
75
|
+
raise Exception(f"Novita.ai API request failed: {e}")
|
|
76
|
+
|
|
77
|
+
def edit_image(self, **kwargs) -> bytes:
|
|
78
|
+
ASCIIColors.warning("Novita.ai edit_image (inpainting/img2img) is not yet implemented in this binding.")
|
|
79
|
+
raise NotImplementedError("This binding does not yet support image editing.")
|
|
80
|
+
|
|
81
|
+
if __name__ == '__main__':
|
|
82
|
+
ASCIIColors.magenta("--- Novita.ai TTI Binding Test ---")
|
|
83
|
+
if "NOVITA_API_KEY" not in os.environ:
|
|
84
|
+
ASCIIColors.error("NOVITA_API_KEY environment variable not set. Cannot run test.")
|
|
85
|
+
exit(1)
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
binding = NovitaAITTIBinding()
|
|
89
|
+
|
|
90
|
+
ASCIIColors.cyan("\n--- Test: Text-to-Image ---")
|
|
91
|
+
prompt = "A cute capybara wearing a top hat, sitting in a field of flowers, painterly style"
|
|
92
|
+
img_bytes = binding.generate_image(prompt, width=1024, height=1024, num_inference_steps=30)
|
|
93
|
+
|
|
94
|
+
assert len(img_bytes) > 1000
|
|
95
|
+
output_path = Path(__file__).parent / "tmp_novita_t2i.png"
|
|
96
|
+
with open(output_path, "wb") as f:
|
|
97
|
+
f.write(img_bytes)
|
|
98
|
+
ASCIIColors.green(f"Text-to-Image generation OK. Image saved to {output_path}")
|
|
99
|
+
|
|
100
|
+
except Exception as e:
|
|
101
|
+
trace_exception(e)
|
|
102
|
+
ASCIIColors.error(f"Novita.ai binding test failed: {e}")
|