lollms-client 0.21.0__py3-none-any.whl → 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/console_discussion.py +340 -99
- examples/gradio_lollms_chat.py +2 -2
- examples/lollms_discussions_test.py +3 -3
- lollms_client/__init__.py +6 -3
- lollms_client/lollms_core.py +3 -1
- lollms_client/lollms_discussion.py +444 -413
- lollms_client/lollms_personality.py +182 -0
- {lollms_client-0.21.0.dist-info → lollms_client-0.22.0.dist-info}/METADATA +1 -1
- {lollms_client-0.21.0.dist-info → lollms_client-0.22.0.dist-info}/RECORD +13 -11
- {lollms_client-0.21.0.dist-info → lollms_client-0.22.0.dist-info}/top_level.txt +1 -0
- personalities/parrot.py +10 -0
- {lollms_client-0.21.0.dist-info → lollms_client-0.22.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.21.0.dist-info → lollms_client-0.22.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import importlib.util
|
|
2
|
+
import json
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Callable, List, Optional, Union
|
|
5
|
+
|
|
6
|
+
class LollmsPersonality:
|
|
7
|
+
"""
|
|
8
|
+
A class that encapsulates the full personality of an AI agent.
|
|
9
|
+
|
|
10
|
+
This includes its identity, system prompts, specialized knowledge (via RAG),
|
|
11
|
+
and custom execution logic (via a Python script). It is designed to be a
|
|
12
|
+
portable and self-contained unit that can be loaded and used by any
|
|
13
|
+
application using the lollms-client.
|
|
14
|
+
"""
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
# Metadata for identification and display
|
|
18
|
+
name: str,
|
|
19
|
+
author: str,
|
|
20
|
+
category: str,
|
|
21
|
+
description: str,
|
|
22
|
+
# Core behavioral instruction
|
|
23
|
+
system_prompt: str,
|
|
24
|
+
icon: Optional[str] = None, # Base64 encoded image string
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# RAG - Data Files and Application-provided Callbacks
|
|
28
|
+
data_files: Optional[List[Union[str, Path]]] = None,
|
|
29
|
+
vectorize_chunk_callback: Optional[Callable[[str, str], None]] = None, # (chunk_text, chunk_id) -> None
|
|
30
|
+
is_vectorized_callback: Optional[Callable[[str], bool]] = None, # (chunk_id) -> bool
|
|
31
|
+
query_rag_callback: Optional[Callable[[str], str]] = None, # (user_query) -> rag_context_str
|
|
32
|
+
|
|
33
|
+
# Custom Logic Override
|
|
34
|
+
script: Optional[str] = None, # The Python script as a raw string
|
|
35
|
+
|
|
36
|
+
# Internal state
|
|
37
|
+
personality_id: Optional[str] = None
|
|
38
|
+
):
|
|
39
|
+
"""
|
|
40
|
+
Initializes a LollmsPersonality instance.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
name: The display name of the personality.
|
|
44
|
+
author: The author of the personality.
|
|
45
|
+
category: A category for organization (e.g., 'Code', 'Writing', 'Fun').
|
|
46
|
+
description: A brief description of what the personality does.
|
|
47
|
+
icon: An optional base64 encoded string for a display icon.
|
|
48
|
+
system_prompt: The core system prompt that defines the AI's behavior.
|
|
49
|
+
data_files: A list of file paths to be used as a knowledge base for RAG.
|
|
50
|
+
vectorize_chunk_callback: A function provided by the host app to vectorize and store a text chunk.
|
|
51
|
+
is_vectorized_callback: A function provided by the host app to check if a chunk is already vectorized.
|
|
52
|
+
query_rag_callback: A function provided by the host app to query the vector store for relevant context.
|
|
53
|
+
script: A string containing a Python script to override default chat behavior.
|
|
54
|
+
personality_id: An optional unique identifier. If not provided, it's generated from the author and name.
|
|
55
|
+
"""
|
|
56
|
+
self.name = name
|
|
57
|
+
self.author = author
|
|
58
|
+
self.category = category
|
|
59
|
+
self.description = description
|
|
60
|
+
self.icon = icon
|
|
61
|
+
self.system_prompt = system_prompt
|
|
62
|
+
self.data_files = [Path(f) for f in data_files] if data_files else []
|
|
63
|
+
|
|
64
|
+
# RAG Callbacks provided by the host application
|
|
65
|
+
self.vectorize_chunk_callback = vectorize_chunk_callback
|
|
66
|
+
self.is_vectorized_callback = is_vectorized_callback
|
|
67
|
+
self.query_rag_callback = query_rag_callback
|
|
68
|
+
|
|
69
|
+
self.script = script
|
|
70
|
+
self.script_module = None
|
|
71
|
+
self.personality_id = personality_id or self._generate_id()
|
|
72
|
+
|
|
73
|
+
# Prepare custom logic and data upon initialization
|
|
74
|
+
self._prepare_script()
|
|
75
|
+
self.ensure_data_vectorized()
|
|
76
|
+
|
|
77
|
+
def _generate_id(self) -> str:
|
|
78
|
+
"""
|
|
79
|
+
Creates a filesystem-safe, unique ID based on the author and name.
|
|
80
|
+
"""
|
|
81
|
+
safe_author = "".join(c if c.isalnum() else '_' for c in self.author)
|
|
82
|
+
safe_name = "".join(c if c.isalnum() else '_' for c in self.name)
|
|
83
|
+
return f"{safe_author}_{safe_name}"
|
|
84
|
+
|
|
85
|
+
def _prepare_script(self):
|
|
86
|
+
"""
|
|
87
|
+
Dynamically loads the personality's script as an in-memory Python module.
|
|
88
|
+
|
|
89
|
+
This allows the script to be executed without being saved as a .py file on disk,
|
|
90
|
+
making the system more secure and self-contained.
|
|
91
|
+
"""
|
|
92
|
+
if not self.script:
|
|
93
|
+
return
|
|
94
|
+
try:
|
|
95
|
+
module_name = f"lollms_personality_script_{self.personality_id}"
|
|
96
|
+
|
|
97
|
+
# Create a module specification and a module object from it
|
|
98
|
+
spec = importlib.util.spec_from_loader(module_name, loader=None)
|
|
99
|
+
self.script_module = importlib.util.module_from_spec(spec)
|
|
100
|
+
|
|
101
|
+
# Execute the script code within the new module's namespace
|
|
102
|
+
exec(self.script, self.script_module.__dict__)
|
|
103
|
+
print(f"[{self.name}] Custom script loaded successfully.")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
print(f"[{self.name}] Failed to load custom script: {e}")
|
|
106
|
+
self.script_module = None
|
|
107
|
+
|
|
108
|
+
def ensure_data_vectorized(self, chunk_size: int = 1024):
|
|
109
|
+
"""
|
|
110
|
+
Checks if the personality's data files are vectorized using the host callbacks.
|
|
111
|
+
|
|
112
|
+
It iterates through each data file, splits it into chunks, and for each chunk,
|
|
113
|
+
it checks if it's already processed. If not, it calls the vectorization callback
|
|
114
|
+
provided by the host application.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
chunk_size: The size of each text chunk to process for vectorization.
|
|
118
|
+
"""
|
|
119
|
+
if not self.data_files or not self.vectorize_chunk_callback or not self.is_vectorized_callback:
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
print(f"[{self.name}] Checking RAG data vectorization...")
|
|
123
|
+
all_vectorized = True
|
|
124
|
+
for file_path in self.data_files:
|
|
125
|
+
if not file_path.exists():
|
|
126
|
+
print(f" - Warning: Data file not found, skipping: {file_path}")
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
content = file_path.read_text(encoding='utf-8')
|
|
131
|
+
chunks = [content[i:i+chunk_size] for i in range(0, len(content), chunk_size)]
|
|
132
|
+
|
|
133
|
+
for i, chunk in enumerate(chunks):
|
|
134
|
+
# Generate a unique and deterministic ID for each chunk
|
|
135
|
+
chunk_id = f"{self.personality_id}_{file_path.name}_chunk_{i}"
|
|
136
|
+
|
|
137
|
+
if not self.is_vectorized_callback(chunk_id):
|
|
138
|
+
all_vectorized = False
|
|
139
|
+
print(f" - Vectorizing '{file_path.name}' chunk {i+1}/{len(chunks)}...")
|
|
140
|
+
self.vectorize_chunk_callback(chunk, chunk_id)
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
print(f" - Error processing file {file_path.name}: {e}")
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
if all_vectorized:
|
|
147
|
+
print(f"[{self.name}] All RAG data is already vectorized.")
|
|
148
|
+
else:
|
|
149
|
+
print(f"[{self.name}] RAG data vectorization complete.")
|
|
150
|
+
|
|
151
|
+
def get_rag_context(self, query: str) -> Optional[str]:
|
|
152
|
+
"""
|
|
153
|
+
Queries the vectorized data to get relevant context for a given query.
|
|
154
|
+
|
|
155
|
+
This method relies on the `query_rag_callback` provided by the host application
|
|
156
|
+
to perform the actual search in the vector store.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
query: The user's query string.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
A string containing the relevant context, or None if no callback is available.
|
|
163
|
+
"""
|
|
164
|
+
if not self.query_rag_callback:
|
|
165
|
+
return None
|
|
166
|
+
return self.query_rag_callback(query)
|
|
167
|
+
|
|
168
|
+
def to_dict(self) -> dict:
|
|
169
|
+
"""
|
|
170
|
+
Serializes the personality's metadata to a dictionary.
|
|
171
|
+
Note: Callbacks and the script module are not serialized.
|
|
172
|
+
"""
|
|
173
|
+
return {
|
|
174
|
+
"personality_id": self.personality_id,
|
|
175
|
+
"name": self.name,
|
|
176
|
+
"author": self.author,
|
|
177
|
+
"category": self.category,
|
|
178
|
+
"description": self.description,
|
|
179
|
+
"system_prompt": self.system_prompt,
|
|
180
|
+
"data_files": [str(p) for p in self.data_files],
|
|
181
|
+
"has_script": self.script is not None
|
|
182
|
+
}
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
examples/console_discussion.py,sha256=
|
|
1
|
+
examples/console_discussion.py,sha256=JxjVaAxtt1WEXLN8vCJosu-cYNgfIX2JGO25kg1FFNY,20490
|
|
2
2
|
examples/external_mcp.py,sha256=swx1KCOz6jk8jGTAycq-xu7GXPAhRMDe1x--SKocugE,13371
|
|
3
3
|
examples/function_calling_with_local_custom_mcp.py,sha256=g6wOFRB8-p9Cv7hKmQaGzPvtMX3H77gas01QVNEOduM,12407
|
|
4
4
|
examples/generate_a_benchmark_for_safe_store.py,sha256=bkSt0mrpNsN0krZAUShm0jgVM1ukrPpjI7VwSgcNdSA,3974
|
|
5
5
|
examples/generate_text_with_multihop_rag_example.py,sha256=riEyVYo97r6ZYdySL-NJkRhE4MnpwbZku1sN8RNvbvs,11519
|
|
6
6
|
examples/gradio_chat_app.py,sha256=ZZ_D1U0wvvwE9THmAPXUvNKkFG2gi7tQq1f2pQx_2ug,15315
|
|
7
|
-
examples/gradio_lollms_chat.py,sha256=
|
|
7
|
+
examples/gradio_lollms_chat.py,sha256=z5FDE62dmPU3nb16zbZX6jkVitML1PMfPxYyWr8VLz8,10135
|
|
8
8
|
examples/internet_search_with_rag.py,sha256=ioTb_WI2M6kFeh1Dg-EGcKjccphnCsIGD_e9PZgZshw,12314
|
|
9
9
|
examples/local_mcp.py,sha256=w40dgayvHYe01yvekEE0LjcbkpwKjWwJ-9v4_wGYsUk,9113
|
|
10
|
-
examples/lollms_discussions_test.py,sha256=
|
|
10
|
+
examples/lollms_discussions_test.py,sha256=Jk1cCUDBBhTcK5glI50jAgzfB3IOiiUlnK3q7RYfMkA,6796
|
|
11
11
|
examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE-pGeFY,11060
|
|
12
12
|
examples/run_remote_mcp_example copy.py,sha256=pGT8A5iXK9oHtjGNEUCm8fnj9DQ37gcznjLYqAEI20o,10075
|
|
13
13
|
examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
@@ -25,13 +25,14 @@ examples/deep_analyze/deep_analyze_multiple_files.py,sha256=fOryShA33P4IFxcxUDe-
|
|
|
25
25
|
examples/generate_and_speak/generate_and_speak.py,sha256=RAlvRwtEKXCh894l9M3iQbADe8CvF5N442jtRurK02I,13908
|
|
26
26
|
examples/generate_game_sfx/generate_game_fx.py,sha256=MgLNGi4hGBRoyr4bqYuCUdCSqd-ldDVfF0VSDUjgzsg,10467
|
|
27
27
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
28
|
-
lollms_client/__init__.py,sha256=
|
|
28
|
+
lollms_client/__init__.py,sha256=Cd4G7paIm0kKNqc90yAJ7VJ8mpi2jfzoMBabmWRBZbY,1047
|
|
29
29
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
30
|
-
lollms_client/lollms_core.py,sha256=
|
|
31
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
30
|
+
lollms_client/lollms_core.py,sha256=y_KIVFCBsA0BYizGAbvQZrtWZCX5jB00yMuAgHlHaD4,143685
|
|
31
|
+
lollms_client/lollms_discussion.py,sha256=zdAUOhbFod65-VZYfKaldHYURR7wWnuccqv6FJa1qrM,36291
|
|
32
32
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
33
33
|
lollms_client/lollms_llm_binding.py,sha256=E81g4yBlQn76WTSLicnTETJuQhf_WZUMZaxotgRnOcA,12096
|
|
34
34
|
lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
|
|
35
|
+
lollms_client/lollms_personality.py,sha256=dILUI5DZdzJ3NDDQiIsK2UptVF-jZK3XYXZ2bpXP_ew,8035
|
|
35
36
|
lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
|
|
36
37
|
lollms_client/lollms_stt_binding.py,sha256=jAUhLouEhh2hmm1bK76ianfw_6B59EHfY3FmLv6DU-g,5111
|
|
37
38
|
lollms_client/lollms_tti_binding.py,sha256=afO0-d-Kqsmh8UHTijTvy6dZAt-XDB6R-IHmdbf-_fs,5928
|
|
@@ -77,8 +78,9 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
77
78
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
78
79
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
79
80
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
80
|
-
lollms_client-0.
|
|
81
|
-
|
|
82
|
-
lollms_client-0.
|
|
83
|
-
lollms_client-0.
|
|
84
|
-
lollms_client-0.
|
|
81
|
+
lollms_client-0.22.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
82
|
+
personalities/parrot.py,sha256=-HdbK1h7Ixvp8FX69Nv92Z_El6_UVtsF8WuAvNpKbfg,478
|
|
83
|
+
lollms_client-0.22.0.dist-info/METADATA,sha256=71v2Qlp7tSyr_fL9I8Nw-9LLlncfuUkYgtJkK1EKayc,13374
|
|
84
|
+
lollms_client-0.22.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
85
|
+
lollms_client-0.22.0.dist-info/top_level.txt,sha256=vgtOcmtJbKs9gEiIIPp2scIsvtvU2y5u3tngqlme1RU,37
|
|
86
|
+
lollms_client-0.22.0.dist-info/RECORD,,
|
personalities/parrot.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
|
|
2
|
+
def run(discussion, on_chunk_callback):
|
|
3
|
+
# This script overrides the normal chat flow.
|
|
4
|
+
user_message = discussion.get_branch(discussion.active_branch_id)[-1].content
|
|
5
|
+
response = f"Squawk! {user_message}! Squawk!"
|
|
6
|
+
if on_chunk_callback:
|
|
7
|
+
# We need to simulate the message type for the callback
|
|
8
|
+
from lollms_client import MSG_TYPE
|
|
9
|
+
on_chunk_callback(response, MSG_TYPE.MSG_TYPE_CHUNK)
|
|
10
|
+
return response # Return the full raw response
|
|
File without changes
|
|
File without changes
|