lollms-client 0.20.10__py3-none-any.whl → 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/console_discussion.py +448 -0
- examples/gradio_lollms_chat.py +259 -0
- examples/lollms_discussions_test.py +155 -0
- lollms_client/__init__.py +5 -2
- lollms_client/llm_bindings/ollama/__init__.py +1 -1
- lollms_client/lollms_core.py +86 -2
- lollms_client/lollms_discussion.py +638 -386
- lollms_client/lollms_personality.py +182 -0
- lollms_client/lollms_types.py +19 -16
- lollms_client/lollms_utilities.py +71 -57
- lollms_client/mcp_bindings/remote_mcp/__init__.py +2 -1
- {lollms_client-0.20.10.dist-info → lollms_client-0.22.0.dist-info}/METADATA +1 -1
- {lollms_client-0.20.10.dist-info → lollms_client-0.22.0.dist-info}/RECORD +17 -15
- {lollms_client-0.20.10.dist-info → lollms_client-0.22.0.dist-info}/top_level.txt +1 -0
- personalities/parrot.py +10 -0
- examples/personality_test/chat_test.py +0 -37
- examples/personality_test/chat_with_aristotle.py +0 -42
- examples/personality_test/tesks_test.py +0 -62
- {lollms_client-0.20.10.dist-info → lollms_client-0.22.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.20.10.dist-info → lollms_client-0.22.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import importlib.util
|
|
2
|
+
import json
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Callable, List, Optional, Union
|
|
5
|
+
|
|
6
|
+
class LollmsPersonality:
|
|
7
|
+
"""
|
|
8
|
+
A class that encapsulates the full personality of an AI agent.
|
|
9
|
+
|
|
10
|
+
This includes its identity, system prompts, specialized knowledge (via RAG),
|
|
11
|
+
and custom execution logic (via a Python script). It is designed to be a
|
|
12
|
+
portable and self-contained unit that can be loaded and used by any
|
|
13
|
+
application using the lollms-client.
|
|
14
|
+
"""
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
# Metadata for identification and display
|
|
18
|
+
name: str,
|
|
19
|
+
author: str,
|
|
20
|
+
category: str,
|
|
21
|
+
description: str,
|
|
22
|
+
# Core behavioral instruction
|
|
23
|
+
system_prompt: str,
|
|
24
|
+
icon: Optional[str] = None, # Base64 encoded image string
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# RAG - Data Files and Application-provided Callbacks
|
|
28
|
+
data_files: Optional[List[Union[str, Path]]] = None,
|
|
29
|
+
vectorize_chunk_callback: Optional[Callable[[str, str], None]] = None, # (chunk_text, chunk_id) -> None
|
|
30
|
+
is_vectorized_callback: Optional[Callable[[str], bool]] = None, # (chunk_id) -> bool
|
|
31
|
+
query_rag_callback: Optional[Callable[[str], str]] = None, # (user_query) -> rag_context_str
|
|
32
|
+
|
|
33
|
+
# Custom Logic Override
|
|
34
|
+
script: Optional[str] = None, # The Python script as a raw string
|
|
35
|
+
|
|
36
|
+
# Internal state
|
|
37
|
+
personality_id: Optional[str] = None
|
|
38
|
+
):
|
|
39
|
+
"""
|
|
40
|
+
Initializes a LollmsPersonality instance.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
name: The display name of the personality.
|
|
44
|
+
author: The author of the personality.
|
|
45
|
+
category: A category for organization (e.g., 'Code', 'Writing', 'Fun').
|
|
46
|
+
description: A brief description of what the personality does.
|
|
47
|
+
icon: An optional base64 encoded string for a display icon.
|
|
48
|
+
system_prompt: The core system prompt that defines the AI's behavior.
|
|
49
|
+
data_files: A list of file paths to be used as a knowledge base for RAG.
|
|
50
|
+
vectorize_chunk_callback: A function provided by the host app to vectorize and store a text chunk.
|
|
51
|
+
is_vectorized_callback: A function provided by the host app to check if a chunk is already vectorized.
|
|
52
|
+
query_rag_callback: A function provided by the host app to query the vector store for relevant context.
|
|
53
|
+
script: A string containing a Python script to override default chat behavior.
|
|
54
|
+
personality_id: An optional unique identifier. If not provided, it's generated from the author and name.
|
|
55
|
+
"""
|
|
56
|
+
self.name = name
|
|
57
|
+
self.author = author
|
|
58
|
+
self.category = category
|
|
59
|
+
self.description = description
|
|
60
|
+
self.icon = icon
|
|
61
|
+
self.system_prompt = system_prompt
|
|
62
|
+
self.data_files = [Path(f) for f in data_files] if data_files else []
|
|
63
|
+
|
|
64
|
+
# RAG Callbacks provided by the host application
|
|
65
|
+
self.vectorize_chunk_callback = vectorize_chunk_callback
|
|
66
|
+
self.is_vectorized_callback = is_vectorized_callback
|
|
67
|
+
self.query_rag_callback = query_rag_callback
|
|
68
|
+
|
|
69
|
+
self.script = script
|
|
70
|
+
self.script_module = None
|
|
71
|
+
self.personality_id = personality_id or self._generate_id()
|
|
72
|
+
|
|
73
|
+
# Prepare custom logic and data upon initialization
|
|
74
|
+
self._prepare_script()
|
|
75
|
+
self.ensure_data_vectorized()
|
|
76
|
+
|
|
77
|
+
def _generate_id(self) -> str:
|
|
78
|
+
"""
|
|
79
|
+
Creates a filesystem-safe, unique ID based on the author and name.
|
|
80
|
+
"""
|
|
81
|
+
safe_author = "".join(c if c.isalnum() else '_' for c in self.author)
|
|
82
|
+
safe_name = "".join(c if c.isalnum() else '_' for c in self.name)
|
|
83
|
+
return f"{safe_author}_{safe_name}"
|
|
84
|
+
|
|
85
|
+
def _prepare_script(self):
|
|
86
|
+
"""
|
|
87
|
+
Dynamically loads the personality's script as an in-memory Python module.
|
|
88
|
+
|
|
89
|
+
This allows the script to be executed without being saved as a .py file on disk,
|
|
90
|
+
making the system more secure and self-contained.
|
|
91
|
+
"""
|
|
92
|
+
if not self.script:
|
|
93
|
+
return
|
|
94
|
+
try:
|
|
95
|
+
module_name = f"lollms_personality_script_{self.personality_id}"
|
|
96
|
+
|
|
97
|
+
# Create a module specification and a module object from it
|
|
98
|
+
spec = importlib.util.spec_from_loader(module_name, loader=None)
|
|
99
|
+
self.script_module = importlib.util.module_from_spec(spec)
|
|
100
|
+
|
|
101
|
+
# Execute the script code within the new module's namespace
|
|
102
|
+
exec(self.script, self.script_module.__dict__)
|
|
103
|
+
print(f"[{self.name}] Custom script loaded successfully.")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
print(f"[{self.name}] Failed to load custom script: {e}")
|
|
106
|
+
self.script_module = None
|
|
107
|
+
|
|
108
|
+
def ensure_data_vectorized(self, chunk_size: int = 1024):
|
|
109
|
+
"""
|
|
110
|
+
Checks if the personality's data files are vectorized using the host callbacks.
|
|
111
|
+
|
|
112
|
+
It iterates through each data file, splits it into chunks, and for each chunk,
|
|
113
|
+
it checks if it's already processed. If not, it calls the vectorization callback
|
|
114
|
+
provided by the host application.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
chunk_size: The size of each text chunk to process for vectorization.
|
|
118
|
+
"""
|
|
119
|
+
if not self.data_files or not self.vectorize_chunk_callback or not self.is_vectorized_callback:
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
print(f"[{self.name}] Checking RAG data vectorization...")
|
|
123
|
+
all_vectorized = True
|
|
124
|
+
for file_path in self.data_files:
|
|
125
|
+
if not file_path.exists():
|
|
126
|
+
print(f" - Warning: Data file not found, skipping: {file_path}")
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
content = file_path.read_text(encoding='utf-8')
|
|
131
|
+
chunks = [content[i:i+chunk_size] for i in range(0, len(content), chunk_size)]
|
|
132
|
+
|
|
133
|
+
for i, chunk in enumerate(chunks):
|
|
134
|
+
# Generate a unique and deterministic ID for each chunk
|
|
135
|
+
chunk_id = f"{self.personality_id}_{file_path.name}_chunk_{i}"
|
|
136
|
+
|
|
137
|
+
if not self.is_vectorized_callback(chunk_id):
|
|
138
|
+
all_vectorized = False
|
|
139
|
+
print(f" - Vectorizing '{file_path.name}' chunk {i+1}/{len(chunks)}...")
|
|
140
|
+
self.vectorize_chunk_callback(chunk, chunk_id)
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
print(f" - Error processing file {file_path.name}: {e}")
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
if all_vectorized:
|
|
147
|
+
print(f"[{self.name}] All RAG data is already vectorized.")
|
|
148
|
+
else:
|
|
149
|
+
print(f"[{self.name}] RAG data vectorization complete.")
|
|
150
|
+
|
|
151
|
+
def get_rag_context(self, query: str) -> Optional[str]:
|
|
152
|
+
"""
|
|
153
|
+
Queries the vectorized data to get relevant context for a given query.
|
|
154
|
+
|
|
155
|
+
This method relies on the `query_rag_callback` provided by the host application
|
|
156
|
+
to perform the actual search in the vector store.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
query: The user's query string.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
A string containing the relevant context, or None if no callback is available.
|
|
163
|
+
"""
|
|
164
|
+
if not self.query_rag_callback:
|
|
165
|
+
return None
|
|
166
|
+
return self.query_rag_callback(query)
|
|
167
|
+
|
|
168
|
+
def to_dict(self) -> dict:
|
|
169
|
+
"""
|
|
170
|
+
Serializes the personality's metadata to a dictionary.
|
|
171
|
+
Note: Callbacks and the script module are not serialized.
|
|
172
|
+
"""
|
|
173
|
+
return {
|
|
174
|
+
"personality_id": self.personality_id,
|
|
175
|
+
"name": self.name,
|
|
176
|
+
"author": self.author,
|
|
177
|
+
"category": self.category,
|
|
178
|
+
"description": self.description,
|
|
179
|
+
"system_prompt": self.system_prompt,
|
|
180
|
+
"data_files": [str(p) for p in self.data_files],
|
|
181
|
+
"has_script": self.script is not None
|
|
182
|
+
}
|
lollms_client/lollms_types.py
CHANGED
|
@@ -6,31 +6,35 @@ class MSG_TYPE(Enum):
|
|
|
6
6
|
MSG_TYPE_CONTENT_INVISIBLE_TO_AI = 2 # A full message (for some personality the answer is sent in bulk)
|
|
7
7
|
MSG_TYPE_CONTENT_INVISIBLE_TO_USER = 3 # A full message (for some personality the answer is sent in bulk)
|
|
8
8
|
|
|
9
|
+
# Thoughts
|
|
10
|
+
MSG_TYPE_THOUGHT_CHUNK = 4 # A chunk of a thought content (used for classical chat)
|
|
11
|
+
MSG_TYPE_THOUGHT_CONTENT = 5 # A full thought content (for some personality the answer is sent in bulk)
|
|
12
|
+
|
|
9
13
|
# Conditionning
|
|
10
14
|
# Informations
|
|
11
|
-
MSG_TYPE_EXCEPTION =
|
|
12
|
-
MSG_TYPE_WARNING =
|
|
13
|
-
MSG_TYPE_INFO =
|
|
15
|
+
MSG_TYPE_EXCEPTION = 6 # An exception occured
|
|
16
|
+
MSG_TYPE_WARNING = 7 # A warning occured
|
|
17
|
+
MSG_TYPE_INFO = 8 # An information to be shown to user
|
|
14
18
|
|
|
15
19
|
# Steps
|
|
16
|
-
MSG_TYPE_STEP =
|
|
17
|
-
MSG_TYPE_STEP_START =
|
|
18
|
-
MSG_TYPE_STEP_PROGRESS =
|
|
19
|
-
MSG_TYPE_STEP_END =
|
|
20
|
+
MSG_TYPE_STEP = 9 # An instant step (a step that doesn't need time to be executed)
|
|
21
|
+
MSG_TYPE_STEP_START = 10 # A step has started (the text contains an explanation of the step done by he personality)
|
|
22
|
+
MSG_TYPE_STEP_PROGRESS = 11 # The progress value (the text contains a percentage and can be parsed by the reception)
|
|
23
|
+
MSG_TYPE_STEP_END = 12# A step has been done (the text contains an explanation of the step done by he personality)
|
|
20
24
|
|
|
21
25
|
#Extra
|
|
22
|
-
MSG_TYPE_JSON_INFOS =
|
|
23
|
-
MSG_TYPE_REF =
|
|
24
|
-
MSG_TYPE_CODE =
|
|
25
|
-
MSG_TYPE_UI =
|
|
26
|
+
MSG_TYPE_JSON_INFOS = 13# A JSON output that is useful for summarizing the process of generation used by personalities like chain of thoughts and tree of thooughts
|
|
27
|
+
MSG_TYPE_REF = 14# References (in form of [text](path))
|
|
28
|
+
MSG_TYPE_CODE = 15# A javascript code to execute
|
|
29
|
+
MSG_TYPE_UI = 16# A vue.js component to show (we need to build some and parse the text to show it)
|
|
26
30
|
|
|
27
31
|
#Commands
|
|
28
|
-
MSG_TYPE_NEW_MESSAGE =
|
|
29
|
-
MSG_TYPE_FINISHED_MESSAGE =
|
|
32
|
+
MSG_TYPE_NEW_MESSAGE = 17# A new message
|
|
33
|
+
MSG_TYPE_FINISHED_MESSAGE = 18# End of current message
|
|
30
34
|
|
|
31
35
|
#Tool calling
|
|
32
|
-
MSG_TYPE_TOOL_CALL =
|
|
33
|
-
MSG_TYPE_TOOL_OUTPUT =
|
|
36
|
+
MSG_TYPE_TOOL_CALL = 19# a tool call
|
|
37
|
+
MSG_TYPE_TOOL_OUTPUT = 20# the output of the tool
|
|
34
38
|
|
|
35
39
|
|
|
36
40
|
class SENDER_TYPES(Enum):
|
|
@@ -39,7 +43,6 @@ class SENDER_TYPES(Enum):
|
|
|
39
43
|
SENDER_TYPES_SYSTEM = 2 # Sent by athe system
|
|
40
44
|
|
|
41
45
|
|
|
42
|
-
|
|
43
46
|
class SUMMARY_MODE(Enum):
|
|
44
47
|
SUMMARY_MODE_SEQUENCIAL = 0
|
|
45
48
|
SUMMARY_MODE_HIERARCHICAL = 0
|
|
@@ -9,93 +9,108 @@ import re
|
|
|
9
9
|
import numpy as np
|
|
10
10
|
|
|
11
11
|
import json
|
|
12
|
-
import re
|
|
13
12
|
from ascii_colors import ASCIIColors, trace_exception
|
|
13
|
+
|
|
14
14
|
def robust_json_parser(json_string: str) -> dict:
|
|
15
15
|
"""
|
|
16
|
-
Parses a
|
|
16
|
+
Parses a possibly malformed JSON string using a series of corrective strategies.
|
|
17
17
|
|
|
18
18
|
Args:
|
|
19
|
-
json_string:
|
|
19
|
+
json_string: A string expected to represent a JSON object or array.
|
|
20
20
|
|
|
21
21
|
Returns:
|
|
22
22
|
A dictionary parsed from the JSON string.
|
|
23
23
|
|
|
24
24
|
Raises:
|
|
25
|
-
ValueError: If
|
|
26
|
-
|
|
27
|
-
Strategies Applied in Order:
|
|
28
|
-
1. Tries to parse the string directly.
|
|
29
|
-
2. If that fails, it extracts the main JSON object or array from the string.
|
|
30
|
-
3. Applies a series of fixes:
|
|
31
|
-
a. Replaces Python/JS boolean/null values with JSON-compliant ones.
|
|
32
|
-
b. Removes single-line and multi-line comments.
|
|
33
|
-
c. Fixes improperly escaped characters (e.g., \_).
|
|
34
|
-
d. Removes trailing commas from objects and arrays.
|
|
35
|
-
e. Escapes unescaped newline characters within strings.
|
|
36
|
-
4. Tries to parse the cleaned string.
|
|
25
|
+
ValueError: If parsing fails after all correction attempts.
|
|
37
26
|
"""
|
|
38
|
-
|
|
27
|
+
|
|
28
|
+
# STEP 0: Remove code block wrappers if present (e.g., ```json ... ```)
|
|
29
|
+
json_string = re.sub(r"^```(?:json)?\s*|\s*```$", '', json_string.strip())
|
|
30
|
+
|
|
31
|
+
# STEP 1: Attempt to parse directly
|
|
39
32
|
try:
|
|
40
33
|
return json.loads(json_string)
|
|
41
34
|
except json.JSONDecodeError:
|
|
42
|
-
pass
|
|
35
|
+
pass
|
|
43
36
|
|
|
44
|
-
# 2
|
|
45
|
-
# This is useful if the LLM adds text like "Here is the JSON: {..}"
|
|
46
|
-
# Regex to find a JSON object `{...}` or array `[...]`
|
|
37
|
+
# STEP 2: Extract likely JSON substring
|
|
47
38
|
json_match = re.search(r'(\{[\s\S]*\}|\[[\s\S]*\])', json_string)
|
|
48
|
-
if json_match
|
|
49
|
-
json_substring = json_match.group(0)
|
|
50
|
-
else:
|
|
51
|
-
# If no object or array is found, we work with the original string
|
|
52
|
-
json_substring = json_string
|
|
53
|
-
|
|
54
|
-
# Store the potentially cleaned string
|
|
55
|
-
cleaned_string = json_substring
|
|
39
|
+
cleaned_string = json_match.group(0) if json_match else json_string
|
|
56
40
|
|
|
57
|
-
# 3. Third attempt: Apply a series of cleaning functions
|
|
58
41
|
try:
|
|
59
|
-
#
|
|
42
|
+
# STEP 3a: Normalize Python/JS booleans/nulls
|
|
60
43
|
cleaned_string = re.sub(r'\bTrue\b', 'true', cleaned_string)
|
|
61
44
|
cleaned_string = re.sub(r'\bFalse\b', 'false', cleaned_string)
|
|
62
45
|
cleaned_string = re.sub(r'\bNone\b', 'null', cleaned_string)
|
|
63
46
|
|
|
64
|
-
#
|
|
65
|
-
# Remove // comments
|
|
47
|
+
# STEP 3b: Remove comments (single-line and block)
|
|
66
48
|
cleaned_string = re.sub(r'//.*', '', cleaned_string)
|
|
67
|
-
# Remove /* ... */ comments
|
|
68
49
|
cleaned_string = re.sub(r'/\*[\s\S]*?\*/', '', cleaned_string)
|
|
69
|
-
|
|
70
|
-
# c. Un-escape characters that are not valid JSON escape sequences
|
|
71
|
-
# e.g., \_ , \- , \* etc. that LLMs sometimes add.
|
|
72
|
-
cleaned_string = re.sub(r'\\([_`*#-])', r'\1', cleaned_string)
|
|
73
50
|
|
|
74
|
-
#
|
|
75
|
-
cleaned_string = re.sub(r'
|
|
51
|
+
# STEP 3c: Remove bad escape sequences like \_ or \*
|
|
52
|
+
cleaned_string = re.sub(r'\\([_`*#\-])', r'\1', cleaned_string)
|
|
76
53
|
|
|
77
|
-
#
|
|
78
|
-
|
|
79
|
-
in_string = False
|
|
80
|
-
escaped_string = []
|
|
81
|
-
for i, char in enumerate(cleaned_string):
|
|
82
|
-
if char == '"' and (i == 0 or cleaned_string[i-1] != '\\'):
|
|
83
|
-
in_string = not in_string
|
|
84
|
-
|
|
85
|
-
if in_string and char == '\n':
|
|
86
|
-
escaped_string.append('\\n')
|
|
87
|
-
else:
|
|
88
|
-
escaped_string.append(char)
|
|
89
|
-
|
|
90
|
-
cleaned_string = "".join(escaped_string)
|
|
54
|
+
# STEP 3d: Remove trailing commas
|
|
55
|
+
cleaned_string = re.sub(r',\s*(\}|\])', r'\1', cleaned_string)
|
|
91
56
|
|
|
57
|
+
# STEP 3e: Escape unescaped newlines inside string literals
|
|
58
|
+
def escape_newlines_in_strings(text: str) -> str:
|
|
59
|
+
in_string = False
|
|
60
|
+
result = []
|
|
61
|
+
i = 0
|
|
62
|
+
while i < len(text):
|
|
63
|
+
c = text[i]
|
|
64
|
+
if c == '"' and (i == 0 or text[i - 1] != '\\'):
|
|
65
|
+
in_string = not in_string
|
|
66
|
+
if in_string and c == '\n':
|
|
67
|
+
result.append('\\n')
|
|
68
|
+
else:
|
|
69
|
+
result.append(c)
|
|
70
|
+
i += 1
|
|
71
|
+
return ''.join(result)
|
|
72
|
+
|
|
73
|
+
cleaned_string = escape_newlines_in_strings(cleaned_string)
|
|
74
|
+
|
|
75
|
+
# STEP 3f: Escape unescaped inner double quotes inside strings
|
|
76
|
+
def escape_unescaped_inner_quotes(text: str) -> str:
|
|
77
|
+
def fix(match):
|
|
78
|
+
s = match.group(0)
|
|
79
|
+
inner = s[1:-1]
|
|
80
|
+
# Escape double quotes that aren't already escaped
|
|
81
|
+
inner_fixed = re.sub(r'(?<!\\)"', r'\\"', inner)
|
|
82
|
+
return f'"{inner_fixed}"'
|
|
83
|
+
return re.sub(r'"(?:[^"\\]|\\.)*"', fix, text)
|
|
84
|
+
|
|
85
|
+
cleaned_string = escape_unescaped_inner_quotes(cleaned_string)
|
|
86
|
+
|
|
87
|
+
# STEP 3g: Convert single-quoted strings to double quotes (arrays or object keys)
|
|
88
|
+
cleaned_string = re.sub(
|
|
89
|
+
r"(?<=[:\[,])\s*'([^']*?)'\s*(?=[,\}\]])",
|
|
90
|
+
lambda m: '"' + m.group(1).replace('"', '\\"') + '"',
|
|
91
|
+
cleaned_string
|
|
92
|
+
)
|
|
93
|
+
cleaned_string = re.sub(
|
|
94
|
+
r"(?<=\{)\s*'([^']*?)'\s*:",
|
|
95
|
+
lambda m: '"' + m.group(1).replace('"', '\\"') + '":',
|
|
96
|
+
cleaned_string
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# STEP 3h: Remove non-breaking spaces and control characters
|
|
100
|
+
cleaned_string = re.sub(r'[\x00-\x1F\x7F\u00A0]', '', cleaned_string)
|
|
101
|
+
|
|
102
|
+
# STEP 3i: Fix smart quotes
|
|
103
|
+
cleaned_string = cleaned_string.replace("“", '"').replace("”", '"').replace("‘", "'").replace("’", "'")
|
|
104
|
+
|
|
105
|
+
# STEP 3j: Remove line breaks between JSON tokens that don't belong
|
|
106
|
+
cleaned_string = re.sub(r'"\s*\n\s*"', '"\\n"', cleaned_string)
|
|
107
|
+
|
|
108
|
+
# Final parse
|
|
92
109
|
return json.loads(cleaned_string)
|
|
93
110
|
|
|
94
111
|
except json.JSONDecodeError as e:
|
|
95
|
-
# If all else fails, raise the final error with context
|
|
96
|
-
ASCIIColors.error("Failed to parse JSON after all cleaning attempts. See details below.")
|
|
97
112
|
print("\n--- JSONDecodeError ---")
|
|
98
|
-
|
|
113
|
+
print(e)
|
|
99
114
|
print("\n--- Original String ---")
|
|
100
115
|
print(json_string)
|
|
101
116
|
print("\n--- Final Cleaned String Attempted ---")
|
|
@@ -103,7 +118,6 @@ def robust_json_parser(json_string: str) -> dict:
|
|
|
103
118
|
raise ValueError(f"Failed to parse JSON. Final error: {e}") from e
|
|
104
119
|
|
|
105
120
|
|
|
106
|
-
|
|
107
121
|
class PromptReshaper:
|
|
108
122
|
def __init__(self, template:str):
|
|
109
123
|
self.template = template
|
|
@@ -44,7 +44,8 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
44
44
|
**other_config_params (Any): Additional configuration parameters.
|
|
45
45
|
"""
|
|
46
46
|
super().__init__(binding_name="remote_mcp")
|
|
47
|
-
|
|
47
|
+
# initialization in case no servers are present
|
|
48
|
+
self.servers = None
|
|
48
49
|
if not MCP_LIBRARY_AVAILABLE:
|
|
49
50
|
ASCIIColors.error(f"{self.binding_name}: MCP library not available. This binding will be disabled.")
|
|
50
51
|
return
|
|
@@ -1,10 +1,13 @@
|
|
|
1
|
+
examples/console_discussion.py,sha256=JxjVaAxtt1WEXLN8vCJosu-cYNgfIX2JGO25kg1FFNY,20490
|
|
1
2
|
examples/external_mcp.py,sha256=swx1KCOz6jk8jGTAycq-xu7GXPAhRMDe1x--SKocugE,13371
|
|
2
3
|
examples/function_calling_with_local_custom_mcp.py,sha256=g6wOFRB8-p9Cv7hKmQaGzPvtMX3H77gas01QVNEOduM,12407
|
|
3
4
|
examples/generate_a_benchmark_for_safe_store.py,sha256=bkSt0mrpNsN0krZAUShm0jgVM1ukrPpjI7VwSgcNdSA,3974
|
|
4
5
|
examples/generate_text_with_multihop_rag_example.py,sha256=riEyVYo97r6ZYdySL-NJkRhE4MnpwbZku1sN8RNvbvs,11519
|
|
5
6
|
examples/gradio_chat_app.py,sha256=ZZ_D1U0wvvwE9THmAPXUvNKkFG2gi7tQq1f2pQx_2ug,15315
|
|
7
|
+
examples/gradio_lollms_chat.py,sha256=z5FDE62dmPU3nb16zbZX6jkVitML1PMfPxYyWr8VLz8,10135
|
|
6
8
|
examples/internet_search_with_rag.py,sha256=ioTb_WI2M6kFeh1Dg-EGcKjccphnCsIGD_e9PZgZshw,12314
|
|
7
9
|
examples/local_mcp.py,sha256=w40dgayvHYe01yvekEE0LjcbkpwKjWwJ-9v4_wGYsUk,9113
|
|
10
|
+
examples/lollms_discussions_test.py,sha256=Jk1cCUDBBhTcK5glI50jAgzfB3IOiiUlnK3q7RYfMkA,6796
|
|
8
11
|
examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE-pGeFY,11060
|
|
9
12
|
examples/run_remote_mcp_example copy.py,sha256=pGT8A5iXK9oHtjGNEUCm8fnj9DQ37gcznjLYqAEI20o,10075
|
|
10
13
|
examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
@@ -21,29 +24,27 @@ examples/deep_analyze/deep_analyse.py,sha256=fZNmDrfEAuxEAfdbjAgJYIh1k6wbiuZ4Rvw
|
|
|
21
24
|
examples/deep_analyze/deep_analyze_multiple_files.py,sha256=fOryShA33P4IFxcxUDe-nJ2kW0v9w9yW8KsToS3ETl8,1032
|
|
22
25
|
examples/generate_and_speak/generate_and_speak.py,sha256=RAlvRwtEKXCh894l9M3iQbADe8CvF5N442jtRurK02I,13908
|
|
23
26
|
examples/generate_game_sfx/generate_game_fx.py,sha256=MgLNGi4hGBRoyr4bqYuCUdCSqd-ldDVfF0VSDUjgzsg,10467
|
|
24
|
-
examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5DluqxBwHqw,1417
|
|
25
|
-
examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
|
|
26
|
-
examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
|
|
27
27
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
28
|
-
lollms_client/__init__.py,sha256=
|
|
28
|
+
lollms_client/__init__.py,sha256=Cd4G7paIm0kKNqc90yAJ7VJ8mpi2jfzoMBabmWRBZbY,1047
|
|
29
29
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
30
|
-
lollms_client/lollms_core.py,sha256=
|
|
31
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
30
|
+
lollms_client/lollms_core.py,sha256=y_KIVFCBsA0BYizGAbvQZrtWZCX5jB00yMuAgHlHaD4,143685
|
|
31
|
+
lollms_client/lollms_discussion.py,sha256=zdAUOhbFod65-VZYfKaldHYURR7wWnuccqv6FJa1qrM,36291
|
|
32
32
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
33
33
|
lollms_client/lollms_llm_binding.py,sha256=E81g4yBlQn76WTSLicnTETJuQhf_WZUMZaxotgRnOcA,12096
|
|
34
34
|
lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
|
|
35
|
+
lollms_client/lollms_personality.py,sha256=dILUI5DZdzJ3NDDQiIsK2UptVF-jZK3XYXZ2bpXP_ew,8035
|
|
35
36
|
lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
|
|
36
37
|
lollms_client/lollms_stt_binding.py,sha256=jAUhLouEhh2hmm1bK76ianfw_6B59EHfY3FmLv6DU-g,5111
|
|
37
38
|
lollms_client/lollms_tti_binding.py,sha256=afO0-d-Kqsmh8UHTijTvy6dZAt-XDB6R-IHmdbf-_fs,5928
|
|
38
39
|
lollms_client/lollms_ttm_binding.py,sha256=FjVVSNXOZXK1qvcKEfxdiX6l2b4XdGOSNnZ0utAsbDg,4167
|
|
39
40
|
lollms_client/lollms_tts_binding.py,sha256=5cJYECj8PYLJAyB6SEH7_fhHYK3Om-Y3arkygCnZ24o,4342
|
|
40
41
|
lollms_client/lollms_ttv_binding.py,sha256=KkTaHLBhEEdt4sSVBlbwr5i_g_TlhcrwrT-7DjOsjWQ,4131
|
|
41
|
-
lollms_client/lollms_types.py,sha256=
|
|
42
|
-
lollms_client/lollms_utilities.py,sha256=
|
|
42
|
+
lollms_client/lollms_types.py,sha256=NfvTmICzRCgfjjy5zLMFeDaiW6zyUsdnxRF69gAEyAk,3110
|
|
43
|
+
lollms_client/lollms_utilities.py,sha256=qK5iNmrFD7NGaEVW3nCWT6AtEhLIVHCXMzEpYxG_M5w,11293
|
|
43
44
|
lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
|
|
44
45
|
lollms_client/llm_bindings/llamacpp/__init__.py,sha256=Qj5RvsgPeHGNfb5AEwZSzFwAp4BOWjyxmm9qBNtstrc,63716
|
|
45
46
|
lollms_client/llm_bindings/lollms/__init__.py,sha256=17TwGMDJMxRPjZjZZSysR8AwjMXZeRfDBy8RqWWuaIY,17769
|
|
46
|
-
lollms_client/llm_bindings/ollama/__init__.py,sha256=
|
|
47
|
+
lollms_client/llm_bindings/ollama/__init__.py,sha256=QufsYqak2VlA2XGbzks8u55yNJFeDH2V35NGeZABkm8,32554
|
|
47
48
|
lollms_client/llm_bindings/openai/__init__.py,sha256=ay_2JJi4La258Eg3alUhnh6Y5IRyOWnHaFLXqvN_4ao,19144
|
|
48
49
|
lollms_client/llm_bindings/openllm/__init__.py,sha256=xv2XDhJNCYe6NPnWBboDs24AQ1VJBOzsTuMcmuQ6xYY,29864
|
|
49
50
|
lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=7dM42TCGKh0eV0njNL1tc9cInhyvBRIXzN3dcy12Gl0,33551
|
|
@@ -55,7 +56,7 @@ lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py,sh
|
|
|
55
56
|
lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py,sha256=THtZsMxNnXZiBdkwoBlfbWY2C5hhDdmPtnM-8cSKN6s,9488
|
|
56
57
|
lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py,sha256=PLC31-D04QKTOTb1uuCHnrAlpysQjsk89yIJngK0VGc,4586
|
|
57
58
|
lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py,sha256=McDCBVoVrMDYgU7EYtyOY7mCk1uEeTea0PSD69QqDsQ,6228
|
|
58
|
-
lollms_client/mcp_bindings/remote_mcp/__init__.py,sha256=
|
|
59
|
+
lollms_client/mcp_bindings/remote_mcp/__init__.py,sha256=_wtrwq5_kRV3Of2lB-45G7BZQVR_fYvacHYc_s6q9fk,16615
|
|
59
60
|
lollms_client/mcp_bindings/standard_mcp/__init__.py,sha256=zpF4h8cTUxoERI-xcVjmS_V772LK0V4jegjz2k1PK98,31658
|
|
60
61
|
lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
61
62
|
lollms_client/stt_bindings/lollms/__init__.py,sha256=jBz3285atdPRqQe9ZRrb-AvjqKRB4f8tjLXjma0DLfE,6082
|
|
@@ -77,8 +78,9 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
77
78
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
78
79
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
79
80
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
80
|
-
lollms_client-0.
|
|
81
|
-
|
|
82
|
-
lollms_client-0.
|
|
83
|
-
lollms_client-0.
|
|
84
|
-
lollms_client-0.
|
|
81
|
+
lollms_client-0.22.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
82
|
+
personalities/parrot.py,sha256=-HdbK1h7Ixvp8FX69Nv92Z_El6_UVtsF8WuAvNpKbfg,478
|
|
83
|
+
lollms_client-0.22.0.dist-info/METADATA,sha256=71v2Qlp7tSyr_fL9I8Nw-9LLlncfuUkYgtJkK1EKayc,13374
|
|
84
|
+
lollms_client-0.22.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
85
|
+
lollms_client-0.22.0.dist-info/top_level.txt,sha256=vgtOcmtJbKs9gEiIIPp2scIsvtvU2y5u3tngqlme1RU,37
|
|
86
|
+
lollms_client-0.22.0.dist-info/RECORD,,
|
personalities/parrot.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
|
|
2
|
+
def run(discussion, on_chunk_callback):
|
|
3
|
+
# This script overrides the normal chat flow.
|
|
4
|
+
user_message = discussion.get_branch(discussion.active_branch_id)[-1].content
|
|
5
|
+
response = f"Squawk! {user_message}! Squawk!"
|
|
6
|
+
if on_chunk_callback:
|
|
7
|
+
# We need to simulate the message type for the callback
|
|
8
|
+
from lollms_client import MSG_TYPE
|
|
9
|
+
on_chunk_callback(response, MSG_TYPE.MSG_TYPE_CHUNK)
|
|
10
|
+
return response # Return the full raw response
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient, LollmsDiscussion
|
|
2
|
-
from lollms_client import LollmsPersonality
|
|
3
|
-
from lollms_client import MSG_TYPE, ELF_GENERATION_FORMAT
|
|
4
|
-
from ascii_colors import ASCIIColors
|
|
5
|
-
# Callback send
|
|
6
|
-
def cb(chunk, type: MSG_TYPE):
|
|
7
|
-
print(chunk,end="", flush=True)
|
|
8
|
-
|
|
9
|
-
# Initialize the LollmsClient instance
|
|
10
|
-
lc = LollmsClient("http://localhost:9600",default_generation_mode=ELF_GENERATION_FORMAT.OPENAI)
|
|
11
|
-
# Bu_ild inline personality
|
|
12
|
-
p = LollmsPersonality(
|
|
13
|
-
lc,
|
|
14
|
-
"./personality/test/work_dir",
|
|
15
|
-
"./personality/test/config_dir",
|
|
16
|
-
cb,
|
|
17
|
-
None,
|
|
18
|
-
author="ParisNeo",
|
|
19
|
-
name="test_persona",
|
|
20
|
-
user_name="user",
|
|
21
|
-
category="generic",
|
|
22
|
-
category_desc="generic stuff",
|
|
23
|
-
language="English",
|
|
24
|
-
personality_conditioning="!@>system: Act as a helper to the user.",
|
|
25
|
-
welcome_message="Hi, I'm your helper. Let me help you",
|
|
26
|
-
|
|
27
|
-
)
|
|
28
|
-
d = LollmsDiscussion(lc)
|
|
29
|
-
prompt=""
|
|
30
|
-
ASCIIColors.green("To quit press q")
|
|
31
|
-
ASCIIColors.yellow(p.welcome_message)
|
|
32
|
-
while prompt!="q":
|
|
33
|
-
prompt = input("user:")
|
|
34
|
-
if prompt=="q":
|
|
35
|
-
break
|
|
36
|
-
p.generate(d,prompt,stream=True)
|
|
37
|
-
print("")
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient, LollmsDiscussion
|
|
2
|
-
from lollms_client import LollmsPersonality
|
|
3
|
-
from lollms_client import MSG_TYPE, ELF_GENERATION_FORMAT
|
|
4
|
-
from ascii_colors import ASCIIColors
|
|
5
|
-
# Callback send
|
|
6
|
-
def cb(chunk, type: MSG_TYPE):
|
|
7
|
-
print(chunk,end="", flush=True)
|
|
8
|
-
|
|
9
|
-
# Initialize the LollmsClient instance
|
|
10
|
-
lc = LollmsClient("http://localhost:9600",default_generation_mode=ELF_GENERATION_FORMAT.LOLLMS)
|
|
11
|
-
# Bu_ild inline personality
|
|
12
|
-
aristotle_personality = LollmsPersonality(
|
|
13
|
-
lc,
|
|
14
|
-
"./personality/test/work_dir",
|
|
15
|
-
"./personality/test/config_dir",
|
|
16
|
-
cb,
|
|
17
|
-
None,
|
|
18
|
-
author="ParisNeo",
|
|
19
|
-
name="test_persona",
|
|
20
|
-
user_name="user",
|
|
21
|
-
category="generic",
|
|
22
|
-
category_desc="generic stuff",
|
|
23
|
-
language="English",
|
|
24
|
-
personality_conditioning="!@>system: Act as the philosopher Aristotle, sharing wisdom and engaging in logical discussions.",
|
|
25
|
-
welcome_message="Greetings, I am Aristotle, your guide in the pursuit of knowledge. How may I assist you in your philosophical inquiries?",
|
|
26
|
-
)
|
|
27
|
-
# Create a Discussion instance for Aristotle
|
|
28
|
-
aristotle_discussion = LollmsDiscussion(lc)
|
|
29
|
-
|
|
30
|
-
# Initialize user prompt
|
|
31
|
-
prompt = ""
|
|
32
|
-
|
|
33
|
-
# Print welcome message in yellow
|
|
34
|
-
ASCIIColors.yellow(aristotle_personality.welcome_message)
|
|
35
|
-
|
|
36
|
-
# Interaction loop
|
|
37
|
-
while prompt.lower() != "q":
|
|
38
|
-
prompt = input("student: ")
|
|
39
|
-
if prompt.lower() == "q":
|
|
40
|
-
break
|
|
41
|
-
aristotle_personality.generate(aristotle_discussion, prompt, stream=True)
|
|
42
|
-
print("")
|