lollms-client 1.3.8__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +2877 -187
- lollms_client/lollms_discussion.py +91 -77
- lollms_client/tti_bindings/diffusers/__init__.py +34 -12
- {lollms_client-1.3.8.dist-info → lollms_client-1.4.1.dist-info}/METADATA +4 -4
- {lollms_client-1.3.8.dist-info → lollms_client-1.4.1.dist-info}/RECORD +9 -9
- {lollms_client-1.3.8.dist-info → lollms_client-1.4.1.dist-info}/WHEEL +0 -0
- {lollms_client-1.3.8.dist-info → lollms_client-1.4.1.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.3.8.dist-info → lollms_client-1.4.1.dist-info}/top_level.txt +0 -0
|
@@ -134,7 +134,7 @@ def create_dynamic_models(
|
|
|
134
134
|
user_data_zone = Column(EncryptedText, nullable=True) # Field for persistent user-specific data
|
|
135
135
|
discussion_data_zone = Column(EncryptedText, nullable=True) # Field for persistent discussion-specific data
|
|
136
136
|
personality_data_zone = Column(EncryptedText, nullable=True) # Field for persistent personality-specific data
|
|
137
|
-
memory = Column(EncryptedText, nullable=True) #
|
|
137
|
+
memory = Column(EncryptedText, nullable=True) # Field for long-term memory, now managed with structured memories
|
|
138
138
|
|
|
139
139
|
participants = Column(JSON, nullable=True, default=dict)
|
|
140
140
|
active_branch_id = Column(String, nullable=True)
|
|
@@ -1646,71 +1646,85 @@ class LollmsDiscussion:
|
|
|
1646
1646
|
|
|
1647
1647
|
def memorize(self, branch_tip_id: Optional[str] = None):
|
|
1648
1648
|
"""
|
|
1649
|
-
Analyzes the current discussion
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
This is intended to build a persistent knowledge base about user preferences,
|
|
1653
|
-
facts, and context that can be useful across different future discussions.
|
|
1649
|
+
Analyzes the current discussion to create a structured memory of its essence,
|
|
1650
|
+
focusing on preserving detailed technical content, problems, and solutions.
|
|
1651
|
+
This new memory is then automatically saved and loaded into the context for immediate use.
|
|
1654
1652
|
|
|
1655
1653
|
Args:
|
|
1656
1654
|
branch_tip_id: The ID of the message to use as the end of the context
|
|
1657
|
-
|
|
1655
|
+
for memory extraction. Defaults to the active branch.
|
|
1658
1656
|
"""
|
|
1659
1657
|
try:
|
|
1660
|
-
# 1. Get the current conversation context
|
|
1661
1658
|
discussion_context = self.export("markdown", branch_tip_id=branch_tip_id)
|
|
1662
1659
|
if not discussion_context.strip():
|
|
1663
1660
|
print("[INFO] Memorize: Discussion is empty, nothing to memorize.")
|
|
1664
1661
|
return
|
|
1665
1662
|
|
|
1666
|
-
# 2. Formulate the prompt for the LLM
|
|
1667
1663
|
system_prompt = (
|
|
1668
|
-
"You are a
|
|
1669
|
-
"
|
|
1670
|
-
|
|
1671
|
-
"
|
|
1672
|
-
"-
|
|
1673
|
-
"-
|
|
1674
|
-
"-
|
|
1675
|
-
"
|
|
1676
|
-
"
|
|
1677
|
-
"
|
|
1664
|
+
"You are a Technical Knowledge Extraction AI specialized in preserving detailed information. "
|
|
1665
|
+
"Your task is to extract and preserve the ACTUAL CONTENT and DETAILS from discussions, not just summaries.\n\n"
|
|
1666
|
+
|
|
1667
|
+
"CRITICAL INSTRUCTIONS:\n"
|
|
1668
|
+
"- If equations, formulas, or code are mentioned, INCLUDE THE FULL EQUATIONS/FORMULAS/CODE in the memory\n"
|
|
1669
|
+
"- If technical procedures or steps are discussed, preserve the EXACT STEPS\n"
|
|
1670
|
+
"- If specific values, parameters, or constants are mentioned, include them\n"
|
|
1671
|
+
"- If problems and solutions are discussed, capture BOTH the problem statement AND the detailed solution\n"
|
|
1672
|
+
"- Focus on ACTIONABLE and REFERENCEABLE content that someone could use later\n"
|
|
1673
|
+
"- Preserve technical terminology, variable names, and specific implementation details\n"
|
|
1674
|
+
"- Do NOT create high-level summaries - capture the actual working content\n\n"
|
|
1675
|
+
|
|
1676
|
+
"OUTPUT FORMAT: JSON with 'title' (descriptive but specific) and 'content' (detailed technical content)"
|
|
1678
1677
|
)
|
|
1679
1678
|
|
|
1680
1679
|
prompt = (
|
|
1681
|
-
"
|
|
1682
|
-
|
|
1683
|
-
"
|
|
1680
|
+
"Extract the key technical content from this discussion. Focus on preserving:\n"
|
|
1681
|
+
"1. Complete equations, formulas, or code snippets\n"
|
|
1682
|
+
"2. Specific problem statements and their detailed solutions\n"
|
|
1683
|
+
"3. Step-by-step procedures or algorithms\n"
|
|
1684
|
+
"4. Important constants, values, or parameters\n"
|
|
1685
|
+
"5. Technical concepts with their precise definitions\n"
|
|
1686
|
+
"6. Any implementation details or configuration settings\n\n"
|
|
1687
|
+
|
|
1688
|
+
"IMPORTANT: Do not summarize what was discussed - extract the actual usable content.\n"
|
|
1689
|
+
"If Maxwell's equations were shown, include the actual equations.\n"
|
|
1690
|
+
"If code was provided, include the actual code.\n"
|
|
1691
|
+
"If a solution method was explained, include the actual steps.\n\n"
|
|
1692
|
+
|
|
1693
|
+
f"--- Conversation to Extract From ---\n{discussion_context}\n\n"
|
|
1694
|
+
|
|
1695
|
+
"Extract the technical essence that would be valuable for future reference:"
|
|
1684
1696
|
)
|
|
1685
1697
|
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
extracted_info = self.lollmsClient.generate_text(
|
|
1698
|
+
print("[INFO] Memorize: Extracting detailed technical content into a new memory...")
|
|
1699
|
+
memory_json = self.lollmsClient.generate_structured_content(
|
|
1689
1700
|
prompt,
|
|
1701
|
+
schema={
|
|
1702
|
+
"title": "A descriptive title indicating the type of problem solved (e.g., 'Python Import Error Fix', 'Database Connection Issue Solution')",
|
|
1703
|
+
"content": "Structured content with PROBLEM: [detailed problem] and SOLUTION: [detailed solution] sections"
|
|
1704
|
+
},
|
|
1690
1705
|
system_prompt=system_prompt,
|
|
1691
|
-
|
|
1692
|
-
temperature=0.1, # Low temperature for factual extraction
|
|
1693
|
-
top_k=10,
|
|
1706
|
+
temperature=0.1
|
|
1694
1707
|
)
|
|
1695
1708
|
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
# Format with a timestamp for context
|
|
1701
|
-
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
|
|
1702
|
-
formatted_entry = f"\n\n--- Memory entry from {timestamp} ---\n{new_memory_entry}"
|
|
1703
|
-
|
|
1704
|
-
current_memory = self.memory or ""
|
|
1705
|
-
self.memory = (current_memory + formatted_entry).strip()
|
|
1706
|
-
self.touch() # Mark as updated and save if autosave is on
|
|
1707
|
-
print(f"[INFO] Memorize: New information added to long-term memory.")
|
|
1709
|
+
if memory_json and memory_json.get("title") and memory_json.get("content"):
|
|
1710
|
+
print(f"[INFO] Memorize: New memory created and loaded into context: '{title}'.")
|
|
1711
|
+
return memory_json
|
|
1708
1712
|
else:
|
|
1709
|
-
print("[
|
|
1710
|
-
|
|
1713
|
+
print("[WARNING] Memorize: Failed to generate a valid memory from the discussion.")
|
|
1714
|
+
return None
|
|
1711
1715
|
except Exception as e:
|
|
1712
1716
|
trace_exception(e)
|
|
1713
|
-
print(f"[ERROR] Memorize: Failed to
|
|
1717
|
+
print(f"[ERROR] Memorize: Failed to create memory. {e}")
|
|
1718
|
+
|
|
1719
|
+
def set_memory(self, memory_text: str):
|
|
1720
|
+
"""Sets the discussion's memory content.
|
|
1721
|
+
This memory is included in the system context during exports and can be
|
|
1722
|
+
used to provide background information or retain important details across turns.
|
|
1723
|
+
Args:
|
|
1724
|
+
memory_text: The text to set as the discussion's memory.
|
|
1725
|
+
"""
|
|
1726
|
+
self.memory = memory_text.strip()
|
|
1727
|
+
self.touch()
|
|
1714
1728
|
|
|
1715
1729
|
def count_discussion_tokens(self, format_type: str, branch_tip_id: Optional[str] = None) -> int:
|
|
1716
1730
|
"""Counts the number of tokens in the exported discussion content.
|
|
@@ -2353,7 +2367,7 @@ class LollmsDiscussion:
|
|
|
2353
2367
|
|
|
2354
2368
|
return self.add_artefact(
|
|
2355
2369
|
title, content=new_content, images=new_images,
|
|
2356
|
-
audios=latest_artefact.get("audios", []),
|
|
2370
|
+
audios=latest_artefact.get("audios", []),videos=latest_artefact.get("videos", []),
|
|
2357
2371
|
zip_content=latest_artefact.get("zip"), version=latest_version + 1, **extra_data
|
|
2358
2372
|
)
|
|
2359
2373
|
|
|
@@ -2447,6 +2461,38 @@ class LollmsDiscussion:
|
|
|
2447
2461
|
title=title, content=content, version=version, **extra_data
|
|
2448
2462
|
)
|
|
2449
2463
|
|
|
2464
|
+
def remove_artefact(self, title: str, version: Optional[int] = None) -> int:
|
|
2465
|
+
"""
|
|
2466
|
+
Removes artefacts by title. Removes all versions if `version` is None.
|
|
2467
|
+
|
|
2468
|
+
Returns:
|
|
2469
|
+
The number of artefact entries removed.
|
|
2470
|
+
"""
|
|
2471
|
+
new_metadata = (self.metadata or {}).copy()
|
|
2472
|
+
artefacts = new_metadata.get("_artefacts", [])
|
|
2473
|
+
if not artefacts:
|
|
2474
|
+
return 0
|
|
2475
|
+
|
|
2476
|
+
initial_count = len(artefacts)
|
|
2477
|
+
|
|
2478
|
+
if version is None:
|
|
2479
|
+
# Remove all versions with the matching title
|
|
2480
|
+
kept_artefacts = [a for a in artefacts if a.get('title') != title]
|
|
2481
|
+
else:
|
|
2482
|
+
# Remove only the specific title and version
|
|
2483
|
+
kept_artefacts = [a for a in artefacts if not (a.get('title') == title and a.get('version') == version)]
|
|
2484
|
+
|
|
2485
|
+
if len(kept_artefacts) < initial_count:
|
|
2486
|
+
new_metadata["_artefacts"] = kept_artefacts
|
|
2487
|
+
self.metadata = new_metadata
|
|
2488
|
+
self.commit()
|
|
2489
|
+
|
|
2490
|
+
removed_count = initial_count - len(kept_artefacts)
|
|
2491
|
+
if removed_count > 0:
|
|
2492
|
+
print(f"Removed {removed_count} artefact(s) titled '{title}'.")
|
|
2493
|
+
|
|
2494
|
+
return removed_count
|
|
2495
|
+
|
|
2450
2496
|
def clone_without_messages(self) -> 'LollmsDiscussion':
|
|
2451
2497
|
"""
|
|
2452
2498
|
Creates a new discussion with the same context but no message history.
|
|
@@ -2539,36 +2585,4 @@ class LollmsDiscussion:
|
|
|
2539
2585
|
if db_manager:
|
|
2540
2586
|
new_discussion.commit()
|
|
2541
2587
|
|
|
2542
|
-
return new_discussion
|
|
2543
|
-
|
|
2544
|
-
def remove_artefact(self, title: str, version: Optional[int] = None) -> int:
|
|
2545
|
-
"""
|
|
2546
|
-
Removes artefacts by title. Removes all versions if `version` is None.
|
|
2547
|
-
|
|
2548
|
-
Returns:
|
|
2549
|
-
The number of artefact entries removed.
|
|
2550
|
-
"""
|
|
2551
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2552
|
-
artefacts = new_metadata.get("_artefacts", [])
|
|
2553
|
-
if not artefacts:
|
|
2554
|
-
return 0
|
|
2555
|
-
|
|
2556
|
-
initial_count = len(artefacts)
|
|
2557
|
-
|
|
2558
|
-
if version is None:
|
|
2559
|
-
# Remove all versions with the matching title
|
|
2560
|
-
kept_artefacts = [a for a in artefacts if a.get('title') != title]
|
|
2561
|
-
else:
|
|
2562
|
-
# Remove only the specific title and version
|
|
2563
|
-
kept_artefacts = [a for a in artefacts if not (a.get('title') == title and a.get('version') == version)]
|
|
2564
|
-
|
|
2565
|
-
if len(kept_artefacts) < initial_count:
|
|
2566
|
-
new_metadata["_artefacts"] = kept_artefacts
|
|
2567
|
-
self.metadata = new_metadata
|
|
2568
|
-
self.commit()
|
|
2569
|
-
|
|
2570
|
-
removed_count = initial_count - len(kept_artefacts)
|
|
2571
|
-
if removed_count > 0:
|
|
2572
|
-
print(f"Removed {removed_count} artefact(s) titled '{title}'.")
|
|
2573
|
-
|
|
2574
|
-
return removed_count
|
|
2588
|
+
return new_discussion
|
|
@@ -131,7 +131,28 @@ CIVITAI_MODELS = {
|
|
|
131
131
|
"filename": "papercut.safetensors",
|
|
132
132
|
"description": "Paper cutout SD1.5.",
|
|
133
133
|
"owned_by": "civitai"
|
|
134
|
-
}
|
|
134
|
+
},
|
|
135
|
+
"fantassifiedIcons": {
|
|
136
|
+
"display_name": "Fantassified Icons",
|
|
137
|
+
"url": "https://civitai.com/api/download/models/67584?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
138
|
+
"filename": "fantassifiedIcons_fantassifiedIconsV20.safetensors",
|
|
139
|
+
"description": "Flat, modern Icons.",
|
|
140
|
+
"owned_by": "civitai"
|
|
141
|
+
},
|
|
142
|
+
"game_icon_institute": {
|
|
143
|
+
"display_name": "Game icon institute",
|
|
144
|
+
"url": "https://civitai.com/api/download/models/158776?type=Model&format=SafeTensor&size=full&fp=fp16",
|
|
145
|
+
"filename": "gameIconInstituteV10_v10.safetensors",
|
|
146
|
+
"description": "Flat, modern game Icons.",
|
|
147
|
+
"owned_by": "civitai"
|
|
148
|
+
},
|
|
149
|
+
"M4RV3LS_DUNGEONS": {
|
|
150
|
+
"display_name": "M4RV3LS & DUNGEONS",
|
|
151
|
+
"url": "https://civitai.com/api/download/models/139417?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
152
|
+
"filename": "M4RV3LSDUNGEONSNEWV40COMICS_mD40.safetensors",
|
|
153
|
+
"description": "comics.",
|
|
154
|
+
"owned_by": "civitai"
|
|
155
|
+
},
|
|
135
156
|
}
|
|
136
157
|
|
|
137
158
|
TORCH_DTYPE_MAP_STR_TO_OBJ = {
|
|
@@ -453,8 +474,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
453
474
|
"safety_checker_on": True,
|
|
454
475
|
"num_inference_steps": 25,
|
|
455
476
|
"guidance_scale": 7.0,
|
|
456
|
-
"
|
|
457
|
-
"
|
|
477
|
+
"width": 512,
|
|
478
|
+
"height": 512,
|
|
458
479
|
"seed": -1,
|
|
459
480
|
"enable_cpu_offload": False,
|
|
460
481
|
"enable_sequential_cpu_offload": False,
|
|
@@ -484,6 +505,7 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
484
505
|
self.config = self.DEFAULT_CONFIG.copy()
|
|
485
506
|
self.config.update(kwargs)
|
|
486
507
|
self.model_name = self.config.get("model_name", "")
|
|
508
|
+
|
|
487
509
|
models_path_str = kwargs.get("models_path", str(Path(__file__).parent / "models"))
|
|
488
510
|
self.models_path = Path(models_path_str)
|
|
489
511
|
self.models_path.mkdir(parents=True, exist_ok=True)
|
|
@@ -600,11 +622,11 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
600
622
|
generator = self._prepare_seed(kwargs)
|
|
601
623
|
pipeline_args = {
|
|
602
624
|
"prompt": prompt,
|
|
603
|
-
"negative_prompt": negative_prompt or
|
|
604
|
-
"width": width if width is not None else self.config
|
|
605
|
-
"height": height if height is not None else self.config
|
|
606
|
-
"num_inference_steps": kwargs.pop("num_inference_steps", self.config
|
|
607
|
-
"guidance_scale": kwargs.pop("guidance_scale", self.config
|
|
625
|
+
"negative_prompt": negative_prompt or self.config.get("negative_prompt", ""),
|
|
626
|
+
"width": width if width is not None else self.config.get("width", 512),
|
|
627
|
+
"height": height if height is not None else self.config.get("height", 512),
|
|
628
|
+
"num_inference_steps": kwargs.pop("num_inference_steps", self.config.get("num_inference_steps",25)),
|
|
629
|
+
"guidance_scale": kwargs.pop("guidance_scale", self.config.get("guidance_scale",6.5)),
|
|
608
630
|
"generator": generator
|
|
609
631
|
}
|
|
610
632
|
pipeline_args.update(kwargs)
|
|
@@ -646,8 +668,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
646
668
|
self._acquire_manager()
|
|
647
669
|
imgs = [images] if isinstance(images, str) else list(images)
|
|
648
670
|
pil_images = [self._decode_image_input(s) for s in imgs]
|
|
649
|
-
out_w = width if width is not None else self.config["
|
|
650
|
-
out_h = height if height is not None else self.config["
|
|
671
|
+
out_w = width if width is not None else self.config["width"]
|
|
672
|
+
out_h = height if height is not None else self.config["height"]
|
|
651
673
|
generator = self._prepare_seed(kwargs)
|
|
652
674
|
steps = kwargs.pop("num_inference_steps", self.config["num_inference_steps"])
|
|
653
675
|
guidance = kwargs.pop("guidance_scale", self.config["guidance_scale"])
|
|
@@ -756,8 +778,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
756
778
|
{"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
|
|
757
779
|
{"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload."},
|
|
758
780
|
{"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
|
|
759
|
-
{"name": "
|
|
760
|
-
{"name": "
|
|
781
|
+
{"name": "width", "type": "int", "value": self.config["width"], "description": "Default image width."},
|
|
782
|
+
{"name": "height", "type": "int", "value": self.config["height"], "description": "Default image height."},
|
|
761
783
|
{"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
|
|
762
784
|
{"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
|
|
763
785
|
{"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.1
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache Software License
|
|
@@ -56,7 +56,7 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
|
|
|
56
56
|
* 📝 **Advanced Structured Content Generation:** Reliably generate structured JSON output from natural language prompts using the `generate_structured_content` helper method, enforcing a specific schema.
|
|
57
57
|
* 💬 **Advanced Discussion Management:** Robustly manage conversation histories with `LollmsDiscussion`, featuring branching, context exporting, and automatic pruning.
|
|
58
58
|
* 🧠 **Persistent Memory & Data Zones:** `LollmsDiscussion` now supports multiple, distinct data zones (`user_data_zone`, `discussion_data_zone`, `personality_data_zone`) and a long-term `memory` field. This allows for sophisticated context layering and state management, enabling agents to learn and remember over time.
|
|
59
|
-
* ✍️ **
|
|
59
|
+
* ✍️ **Structured Memorization:** The `memorize()` method analyzes a conversation to extract its essence (e.g., a problem and its solution), creating a structured "memory" with a title and content. These memories are stored and can be explicitly loaded into the AI's context, providing a more robust and manageable long-term memory system.
|
|
60
60
|
* 📊 **Detailed Context Analysis:** The `get_context_status()` method provides a rich, detailed breakdown of the prompt context, showing the content and token count for each individual component (system prompt, data zones, message history).
|
|
61
61
|
* ⚙️ **Standardized Configuration Management:** A unified dictionary-based system (`llm_binding_config`) to configure any binding in a consistent manner.
|
|
62
62
|
* 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services, including custom MCP toolsets.
|
|
@@ -384,7 +384,7 @@ with tempfile.TemporaryDirectory() as tmpdir:
|
|
|
384
384
|
# The 'breakdown' shows the individual zones that were combined
|
|
385
385
|
for name, content in sys_ctx.get('breakdown', {}).items():
|
|
386
386
|
# For brevity, show only first line of content
|
|
387
|
-
print(f" -> Contains '{name}': {content.split(os.linesep)
|
|
387
|
+
print(f" -> Contains '{name}': {content.split(os.linesep)}...")
|
|
388
388
|
|
|
389
389
|
# Print the message history details
|
|
390
390
|
if 'message_history' in status['zones']:
|
|
@@ -424,7 +424,7 @@ with tempfile.TemporaryDirectory() as tmpdir:
|
|
|
424
424
|
if name == 'memory':
|
|
425
425
|
ASCIIColors.yellow(f" -> Full '{name}' content:\n{content}")
|
|
426
426
|
else:
|
|
427
|
-
print(f" -> Contains '{name}': {content.split(os.linesep)
|
|
427
|
+
print(f" -> Contains '{name}': {content.split(os.linesep)}...")
|
|
428
428
|
print("------------------------------------------")
|
|
429
429
|
|
|
430
430
|
```
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
lollms_client/__init__.py,sha256=
|
|
1
|
+
lollms_client/__init__.py,sha256=G2ENRPwIlHb_nTaBEbn_AvUQvlsBYpIuQXGWYkYmyo0,1146
|
|
2
2
|
lollms_client/lollms_agentic.py,sha256=pQiMEuB_XkG29-SW6u4KTaMFPr6eKqacInggcCuCW3k,13914
|
|
3
3
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
4
|
-
lollms_client/lollms_core.py,sha256=
|
|
5
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
4
|
+
lollms_client/lollms_core.py,sha256=aCEoxmEF6ZmkBgJgZd74lKkM4A3PVVyt2IwMvLfScWw,315053
|
|
5
|
+
lollms_client/lollms_discussion.py,sha256=jWw1lSq0Oz_X5pnkECf1XwdDP2Lf84im00VpwuvsXXk,123041
|
|
6
6
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
7
7
|
lollms_client/lollms_llm_binding.py,sha256=Dj1PI2bQBYv_JgPxCIaIC7DMUvWdFJGwXFdsP5hdGBg,25014
|
|
8
8
|
lollms_client/lollms_mcp_binding.py,sha256=psb27A23VFWDfZsR2WUbQXQxiZDW5yfOak6ZtbMfszI,10222
|
|
@@ -49,7 +49,7 @@ lollms_client/stt_bindings/lollms/__init__.py,sha256=9Vmn1sQQZKLGLe7nZnc-0LnNeSY
|
|
|
49
49
|
lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZkxJASkWm5eF07ztDQ,15363
|
|
50
50
|
lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
|
|
51
51
|
lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
|
-
lollms_client/tti_bindings/diffusers/__init__.py,sha256=
|
|
52
|
+
lollms_client/tti_bindings/diffusers/__init__.py,sha256=Pi5Zw4nHGXVc0Vcb0ib7KkoiOx__0JukWtL01BUzd7c,41692
|
|
53
53
|
lollms_client/tti_bindings/gemini/__init__.py,sha256=f9fPuqnrBZ1Z-obcoP6EVvbEXNbNCSg21cd5efLCk8U,16707
|
|
54
54
|
lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
|
|
55
55
|
lollms_client/tti_bindings/openai/__init__.py,sha256=YWJolJSQfIzTJvrLQVe8rQewP7rddf6z87g4rnp-lTs,4932
|
|
@@ -71,8 +71,8 @@ lollms_client/tts_bindings/xtts/server/main.py,sha256=T-Kn5NM-u1FJMygeV8rOoZKlqn
|
|
|
71
71
|
lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
|
|
72
72
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
73
73
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
74
|
-
lollms_client-1.
|
|
75
|
-
lollms_client-1.
|
|
76
|
-
lollms_client-1.
|
|
77
|
-
lollms_client-1.
|
|
78
|
-
lollms_client-1.
|
|
74
|
+
lollms_client-1.4.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
75
|
+
lollms_client-1.4.1.dist-info/METADATA,sha256=eBfpms3EJ5sD7D-xBTXggnqOc1g8IE0inftnXGQmb6w,58689
|
|
76
|
+
lollms_client-1.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
77
|
+
lollms_client-1.4.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
|
|
78
|
+
lollms_client-1.4.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|