lollms-client 1.3.3__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/llamacpp/__init__.py +354 -233
- lollms_client/llm_bindings/lollms/__init__.py +152 -153
- lollms_client/lollms_core.py +163 -75
- lollms_client/lollms_discussion.py +2 -2
- lollms_client/lollms_llm_binding.py +3 -3
- lollms_client/lollms_tts_binding.py +80 -67
- lollms_client/tts_bindings/bark/__init__.py +110 -329
- lollms_client/tts_bindings/bark/server/install_bark.py +64 -0
- lollms_client/tts_bindings/bark/server/main.py +311 -0
- lollms_client/tts_bindings/piper_tts/__init__.py +115 -335
- lollms_client/tts_bindings/piper_tts/server/install_piper.py +92 -0
- lollms_client/tts_bindings/piper_tts/server/main.py +425 -0
- lollms_client/tts_bindings/piper_tts/server/setup_voices.py +67 -0
- lollms_client/tts_bindings/xtts/__init__.py +99 -305
- lollms_client/tts_bindings/xtts/server/main.py +314 -0
- lollms_client/tts_bindings/xtts/server/setup_voices.py +67 -0
- {lollms_client-1.3.3.dist-info → lollms_client-1.3.6.dist-info}/METADATA +1 -1
- {lollms_client-1.3.3.dist-info → lollms_client-1.3.6.dist-info}/RECORD +22 -15
- {lollms_client-1.3.3.dist-info → lollms_client-1.3.6.dist-info}/WHEEL +0 -0
- {lollms_client-1.3.3.dist-info → lollms_client-1.3.6.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.3.3.dist-info → lollms_client-1.3.6.dist-info}/top_level.txt +0 -0
|
@@ -67,178 +67,177 @@ class LollmsBinding(LollmsLLMBinding):
|
|
|
67
67
|
else:
|
|
68
68
|
return {"status": False, "error": response.text}
|
|
69
69
|
|
|
70
|
-
|
|
71
|
-
def
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
n_threads: Optional[int] = None,
|
|
84
|
-
ctx_size: int | None = None,
|
|
85
|
-
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
86
|
-
split:Optional[bool]=False, # put to true if the prompt is a discussion
|
|
87
|
-
user_keyword:Optional[str]="!@>user:",
|
|
88
|
-
ai_keyword:Optional[str]="!@>assistant:",
|
|
89
|
-
) -> Union[str, dict]:
|
|
90
|
-
"""
|
|
91
|
-
Generate text using the active LLM binding, using instance defaults if parameters are not provided.
|
|
70
|
+
|
|
71
|
+
def _build_openai_params(self, messages: list, **kwargs) -> dict:
|
|
72
|
+
model = kwargs.get("model", self.model_name)
|
|
73
|
+
if "n_predict" in kwargs:
|
|
74
|
+
kwargs["max_tokens"] = kwargs.pop("n_predict")
|
|
75
|
+
|
|
76
|
+
restricted_families = [
|
|
77
|
+
"gpt-5",
|
|
78
|
+
"gpt-4o",
|
|
79
|
+
"o1",
|
|
80
|
+
"o3",
|
|
81
|
+
"o4"
|
|
82
|
+
]
|
|
92
83
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
84
|
+
allowed_params = {
|
|
85
|
+
"model", "messages", "temperature", "top_p", "n",
|
|
86
|
+
"stop", "max_tokens", "presence_penalty", "frequency_penalty",
|
|
87
|
+
"logit_bias", "stream", "user", "max_completion_tokens"
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
params = {
|
|
91
|
+
"model": model,
|
|
92
|
+
"messages": messages,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
for k, v in kwargs.items():
|
|
96
|
+
if k in allowed_params and v is not None:
|
|
97
|
+
params[k] = v
|
|
98
|
+
else:
|
|
99
|
+
if v is not None:
|
|
100
|
+
ASCIIColors.warning(f"Removed unsupported OpenAI param '{k}'")
|
|
101
|
+
|
|
102
|
+
model_lower = model.lower()
|
|
103
|
+
if any(fam in model_lower for fam in restricted_families):
|
|
104
|
+
if "temperature" in params and params["temperature"] != 1:
|
|
105
|
+
ASCIIColors.warning(f"{model} does not support temperature != 1. Overriding to 1.")
|
|
106
|
+
params["temperature"] = 1
|
|
107
|
+
if "top_p" in params:
|
|
108
|
+
ASCIIColors.warning(f"{model} does not support top_p. Removing it.")
|
|
109
|
+
params.pop("top_p")
|
|
110
|
+
|
|
111
|
+
return params
|
|
112
|
+
|
|
113
|
+
def generate_text(self,
|
|
114
|
+
prompt: str,
|
|
115
|
+
images: Optional[List[str]] = None,
|
|
116
|
+
system_prompt: str = "",
|
|
117
|
+
n_predict: Optional[int] = None,
|
|
118
|
+
stream: Optional[bool] = None,
|
|
119
|
+
temperature: float = 0.7,
|
|
120
|
+
top_k: int = 40,
|
|
121
|
+
top_p: float = 0.9,
|
|
122
|
+
repeat_penalty: float = 1.1,
|
|
123
|
+
repeat_last_n: int = 64,
|
|
124
|
+
seed: Optional[int] = None,
|
|
125
|
+
n_threads: Optional[int] = None,
|
|
126
|
+
ctx_size: int | None = None,
|
|
127
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
128
|
+
split: Optional[bool] = False,
|
|
129
|
+
user_keyword: Optional[str] = "!@>user:",
|
|
130
|
+
ai_keyword: Optional[str] = "!@>assistant:"
|
|
131
|
+
) -> Union[str, dict]:
|
|
112
132
|
|
|
113
|
-
Returns:
|
|
114
|
-
Union[str, dict]: Generated text or error dictionary if failed.
|
|
115
|
-
"""
|
|
116
133
|
count = 0
|
|
117
134
|
output = ""
|
|
118
|
-
messages = [
|
|
119
|
-
{
|
|
120
|
-
"role": "system",
|
|
121
|
-
"content": system_prompt or "You are a helpful assistant.",
|
|
122
|
-
}
|
|
123
|
-
]
|
|
135
|
+
messages = [{"role": "system", "content": system_prompt or "You are a helpful assistant."}]
|
|
124
136
|
|
|
125
|
-
# Prepare messages based on whether images are provided
|
|
126
137
|
if images:
|
|
127
138
|
if split:
|
|
128
|
-
messages += self.split_discussion(prompt,user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
"text": messages[-1]["content"]
|
|
134
|
-
}
|
|
135
|
-
]+[
|
|
136
|
-
{
|
|
137
|
-
"type": "image_url",
|
|
138
|
-
"image_url": {
|
|
139
|
-
"url": f"data:image/jpeg;base64,{encode_image(image_path)}"
|
|
140
|
-
}
|
|
141
|
-
}
|
|
142
|
-
for image_path in images
|
|
143
|
-
]
|
|
139
|
+
messages += self.split_discussion(prompt, user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
140
|
+
messages[-1]["content"] = [{"type": "text", "text": messages[-1]["content"]}] + [
|
|
141
|
+
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}}
|
|
142
|
+
for path in images
|
|
143
|
+
]
|
|
144
144
|
else:
|
|
145
145
|
messages.append({
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
] + [
|
|
153
|
-
{
|
|
154
|
-
"type": "image_url",
|
|
155
|
-
"image_url": {
|
|
156
|
-
"url": f"data:image/jpeg;base64,{encode_image(image_path)}"
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
for image_path in images
|
|
160
|
-
]
|
|
161
|
-
}
|
|
162
|
-
)
|
|
163
|
-
|
|
146
|
+
'role': 'user',
|
|
147
|
+
'content': [{"type": "text", "text": prompt}] + [
|
|
148
|
+
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}}
|
|
149
|
+
for path in images
|
|
150
|
+
]
|
|
151
|
+
})
|
|
164
152
|
else:
|
|
165
|
-
|
|
166
153
|
if split:
|
|
167
|
-
messages += self.split_discussion(prompt,user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
168
|
-
if images:
|
|
169
|
-
messages[-1]["content"] = [
|
|
170
|
-
{
|
|
171
|
-
"type": "text",
|
|
172
|
-
"text": messages[-1]["content"]
|
|
173
|
-
}
|
|
174
|
-
]
|
|
154
|
+
messages += self.split_discussion(prompt, user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
175
155
|
else:
|
|
176
|
-
messages.append({
|
|
177
|
-
'role': 'user',
|
|
178
|
-
'content': [
|
|
179
|
-
{
|
|
180
|
-
"type": "text",
|
|
181
|
-
"text": prompt
|
|
182
|
-
}
|
|
183
|
-
]
|
|
184
|
-
}
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
# Generate text using the OpenAI API
|
|
188
|
-
if self.completion_format == ELF_COMPLETION_FORMAT.Chat:
|
|
189
|
-
chat_completion = self.client.chat.completions.create(
|
|
190
|
-
model=self.model_name, # Choose the engine according to your OpenAI plan
|
|
191
|
-
messages=messages,
|
|
192
|
-
max_tokens=n_predict, # Adjust the desired length of the generated response
|
|
193
|
-
n=1, # Specify the number of responses you want
|
|
194
|
-
temperature=temperature, # Adjust the temperature for more or less randomness in the output
|
|
195
|
-
stream=stream
|
|
196
|
-
)
|
|
156
|
+
messages.append({'role': 'user', 'content': [{"type": "text", "text": prompt}]})
|
|
197
157
|
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
158
|
+
try:
|
|
159
|
+
if self.completion_format == ELF_COMPLETION_FORMAT.Chat:
|
|
160
|
+
params = self._build_openai_params(messages=messages,
|
|
161
|
+
n_predict=n_predict,
|
|
162
|
+
stream=stream,
|
|
163
|
+
temperature=temperature,
|
|
164
|
+
top_p=top_p,
|
|
165
|
+
repeat_penalty=repeat_penalty,
|
|
166
|
+
seed=seed)
|
|
167
|
+
try:
|
|
168
|
+
chat_completion = self.client.chat.completions.create(**params)
|
|
169
|
+
except Exception as ex:
|
|
170
|
+
# exception for new openai models
|
|
171
|
+
params["max_completion_tokens"]=params["max_tokens"]
|
|
172
|
+
params["temperature"]=1
|
|
173
|
+
try: del params["max_tokens"]
|
|
174
|
+
except Exception: pass
|
|
175
|
+
try: del params["top_p"]
|
|
176
|
+
except Exception: pass
|
|
177
|
+
try: del params["frequency_penalty"]
|
|
178
|
+
except Exception: pass
|
|
179
|
+
|
|
180
|
+
chat_completion = self.client.chat.completions.create(**params)
|
|
181
|
+
|
|
182
|
+
if stream:
|
|
183
|
+
for resp in chat_completion:
|
|
184
|
+
if count >= (n_predict or float('inf')):
|
|
208
185
|
break
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
186
|
+
word = getattr(resp.choices[0].delta, "content", "") or ""
|
|
187
|
+
if streaming_callback and not streaming_callback(word, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
188
|
+
break
|
|
189
|
+
if word:
|
|
190
|
+
output += word
|
|
191
|
+
count += 1
|
|
192
|
+
else:
|
|
193
|
+
output = chat_completion.choices[0].message.content
|
|
194
|
+
|
|
212
195
|
else:
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
196
|
+
params = self._build_openai_params(prompt=prompt,
|
|
197
|
+
n_predict=n_predict,
|
|
198
|
+
stream=stream,
|
|
199
|
+
temperature=temperature,
|
|
200
|
+
top_p=top_p,
|
|
201
|
+
repeat_penalty=repeat_penalty,
|
|
202
|
+
seed=seed)
|
|
203
|
+
try:
|
|
204
|
+
completion = self.client.completions.create(**params)
|
|
205
|
+
except Exception as ex:
|
|
206
|
+
# exception for new openai models
|
|
207
|
+
params["max_completion_tokens"]=params["max_tokens"]
|
|
208
|
+
params["temperature"]=1
|
|
209
|
+
try: del params["max_tokens"]
|
|
210
|
+
except Exception: pass
|
|
211
|
+
try: del params["top_p"]
|
|
212
|
+
except Exception: pass
|
|
213
|
+
try: del params["frequency_penalty"]
|
|
214
|
+
except Exception: pass
|
|
223
215
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
except Exception as ex:
|
|
231
|
-
word = ""
|
|
232
|
-
if streaming_callback is not None:
|
|
233
|
-
if not streaming_callback(word, "MSG_TYPE_CHUNK"):
|
|
216
|
+
|
|
217
|
+
completion = self.client.completions.create(**params)
|
|
218
|
+
|
|
219
|
+
if stream:
|
|
220
|
+
for resp in completion:
|
|
221
|
+
if count >= (n_predict or float('inf')):
|
|
234
222
|
break
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
223
|
+
word = getattr(resp.choices[0], "text", "") or ""
|
|
224
|
+
if streaming_callback and not streaming_callback(word, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
225
|
+
break
|
|
226
|
+
if word:
|
|
227
|
+
output += word
|
|
228
|
+
count += 1
|
|
229
|
+
else:
|
|
230
|
+
output = completion.choices[0].text
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
trace_exception(e)
|
|
234
|
+
err_msg = f"An error occurred with the OpenAI API: {e}"
|
|
235
|
+
if streaming_callback:
|
|
236
|
+
streaming_callback(err_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
|
|
237
|
+
return {"status": "error", "message": err_msg}
|
|
240
238
|
|
|
241
239
|
return output
|
|
240
|
+
|
|
242
241
|
|
|
243
242
|
def generate_from_messages(self,
|
|
244
243
|
messages: List[Dict],
|
lollms_client/lollms_core.py
CHANGED
|
@@ -1483,7 +1483,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1483
1483
|
system_prompt: str|None = None,
|
|
1484
1484
|
reasoning_system_prompt: str = "You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.",
|
|
1485
1485
|
images: Optional[List[str]] = None,
|
|
1486
|
-
max_reasoning_steps: int =
|
|
1486
|
+
max_reasoning_steps: int|None = None,
|
|
1487
1487
|
decision_temperature: float = 0.5,
|
|
1488
1488
|
final_answer_temperature: float = 0.7,
|
|
1489
1489
|
streaming_callback: Optional[Callable[[str, 'MSG_TYPE', Optional[Dict], Optional[List]], bool]] = None,
|
|
@@ -1499,6 +1499,14 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1499
1499
|
|
|
1500
1500
|
if not self.llm:
|
|
1501
1501
|
return {"final_answer": "", "tool_calls": [], "sources": [], "error": "LLM binding not initialized."}
|
|
1502
|
+
if max_reasoning_steps is None:
|
|
1503
|
+
max_reasoning_steps=15
|
|
1504
|
+
if rag_min_similarity_percent is None:
|
|
1505
|
+
rag_min_similarity_percent=50.0
|
|
1506
|
+
if final_answer_temperature is None:
|
|
1507
|
+
final_answer_temperature=0.7
|
|
1508
|
+
if rag_top_k is None:
|
|
1509
|
+
rag_top_k=5
|
|
1502
1510
|
|
|
1503
1511
|
def log_event(desc, event_type=MSG_TYPE.MSG_TYPE_CHUNK, meta=None, event_id=None) -> Optional[str]:
|
|
1504
1512
|
if not streaming_callback: return None
|
|
@@ -1606,7 +1614,7 @@ Output ONLY the JSON for the tool's parameters: {{"tool_params": {{...}}}}"""
|
|
|
1606
1614
|
else:
|
|
1607
1615
|
tool_result = {"status": "failure", "error": f"Tool '{tool_name}' could not be executed in single-step mode."}
|
|
1608
1616
|
|
|
1609
|
-
if tool_result.get("status") != "success":
|
|
1617
|
+
if tool_result.get("status","success") != "success" or "error" in tool_result:
|
|
1610
1618
|
error_detail = tool_result.get("error", "Unknown tool error in single-step mode.")
|
|
1611
1619
|
raise RuntimeError(error_detail)
|
|
1612
1620
|
|
|
@@ -1734,7 +1742,7 @@ Output only: {{"tool_params": {{...}}}}"""
|
|
|
1734
1742
|
if tool_name in rag_registry:
|
|
1735
1743
|
query = hydrated_params.get("query", "")
|
|
1736
1744
|
top_k, min_sim = rag_tool_specs[tool_name]["default_top_k"], rag_tool_specs[tool_name]["default_min_sim"]
|
|
1737
|
-
raw_results = rag_registry[tool_name](query=query,
|
|
1745
|
+
raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
|
|
1738
1746
|
raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
|
|
1739
1747
|
docs = [{"text": d.get("text", str(d)), "score": d.get("score", 0)*100, "metadata": d.get("metadata", {})} for d in raw_iter or []]
|
|
1740
1748
|
kept = [x for x in docs if x['score'] >= min_sim]
|
|
@@ -1832,7 +1840,7 @@ FINAL ANSWER:"""
|
|
|
1832
1840
|
if not system_prompt:
|
|
1833
1841
|
system_prompt = f"""Act as a code generation assistant that generates code from user prompt."""
|
|
1834
1842
|
|
|
1835
|
-
if template:
|
|
1843
|
+
if template and template !="{}":
|
|
1836
1844
|
if language in ["json","yaml","xml"]:
|
|
1837
1845
|
system_prompt += f"\nMake sure the generated context follows the following schema:\n```{language}\n{template}\n```\n"
|
|
1838
1846
|
else:
|
|
@@ -1921,92 +1929,172 @@ Do not split the code in multiple tags.
|
|
|
1921
1929
|
def generate_structured_content(
|
|
1922
1930
|
self,
|
|
1923
1931
|
prompt,
|
|
1924
|
-
images=
|
|
1925
|
-
schema=
|
|
1932
|
+
images=None,
|
|
1933
|
+
schema=None,
|
|
1926
1934
|
system_prompt=None,
|
|
1935
|
+
max_retries=1,
|
|
1927
1936
|
**kwargs
|
|
1928
1937
|
):
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
prompt (str):
|
|
1938
|
-
The user's request (e.g., "Extract the name, age, and city of the person described").
|
|
1939
|
-
schema (dict or str):
|
|
1940
|
-
A Python dictionary or a JSON string representing the desired output
|
|
1941
|
-
structure. This will be used as a schema for the LLM.
|
|
1942
|
-
Example: {"name": "string", "age": "integer", "city": "string"}
|
|
1943
|
-
system_prompt (str, optional):
|
|
1944
|
-
Additional instructions for the system prompt, to be appended to the
|
|
1945
|
-
main instructions. Defaults to None.
|
|
1946
|
-
**kwargs:
|
|
1947
|
-
Additional keyword arguments to be passed directly to the
|
|
1948
|
-
`generate_code` method (e.g., temperature, n_predict, top_k, debug).
|
|
1938
|
+
import json
|
|
1939
|
+
images = [] if images is None else images
|
|
1940
|
+
schema = {} if schema is None else schema
|
|
1941
|
+
try:
|
|
1942
|
+
from jsonschema import validate
|
|
1943
|
+
has_validator = True
|
|
1944
|
+
except ImportError:
|
|
1945
|
+
has_validator = False
|
|
1949
1946
|
|
|
1950
|
-
Returns:
|
|
1951
|
-
dict: The parsed JSON data as a Python dictionary, or None if
|
|
1952
|
-
generation or parsing fails.
|
|
1953
|
-
"""
|
|
1954
|
-
# 1. Validate and prepare the schema string from the schema
|
|
1955
1947
|
if isinstance(schema, dict):
|
|
1956
|
-
|
|
1957
|
-
schema_str = json.dumps(schema, indent=2)
|
|
1948
|
+
schema_obj = schema
|
|
1958
1949
|
elif isinstance(schema, str):
|
|
1959
|
-
|
|
1960
|
-
|
|
1950
|
+
try:
|
|
1951
|
+
schema_obj = json.loads(schema)
|
|
1952
|
+
except json.JSONDecodeError as e:
|
|
1953
|
+
raise ValueError(f"The provided schema string is not valid JSON: {e}")
|
|
1961
1954
|
else:
|
|
1962
|
-
# It's good practice to fail early for invalid input types
|
|
1963
1955
|
raise TypeError("schema must be a dict or a JSON string.")
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1956
|
+
|
|
1957
|
+
# --- FIX STARTS HERE ---
|
|
1958
|
+
# Heuristic to detect if the schema is a properties-only dictionary
|
|
1959
|
+
# and needs to be wrapped in a root object to be a valid schema.
|
|
1960
|
+
# This handles cases where the user provides `{"field1": {...}, "field2": {...}}`
|
|
1961
|
+
# instead of `{"type": "object", "properties": {"field1": ...}}`.
|
|
1962
|
+
if "type" not in schema_obj and "properties" not in schema_obj and all(isinstance(v, dict) for v in schema_obj.values()):
|
|
1963
|
+
if kwargs.get("debug"):
|
|
1964
|
+
ASCIIColors.info("Schema appears to be a properties-only dictionary; wrapping it in a root object.")
|
|
1965
|
+
schema_obj = {
|
|
1966
|
+
"type": "object",
|
|
1967
|
+
"properties": schema_obj,
|
|
1968
|
+
# Assume all top-level keys are required when wrapping
|
|
1969
|
+
"required": list(schema_obj.keys())
|
|
1970
|
+
}
|
|
1971
|
+
# --- FIX ENDS HERE ---
|
|
1972
|
+
|
|
1973
|
+
def _instance_skeleton(s):
|
|
1974
|
+
if not isinstance(s, dict):
|
|
1975
|
+
return {}
|
|
1976
|
+
if "const" in s:
|
|
1977
|
+
return s["const"]
|
|
1978
|
+
if "enum" in s and isinstance(s["enum"], list) and s["enum"]:
|
|
1979
|
+
return s["enum"][0]
|
|
1980
|
+
|
|
1981
|
+
# Handle default values
|
|
1982
|
+
if "default" in s:
|
|
1983
|
+
return s["default"]
|
|
1984
|
+
|
|
1985
|
+
t = s.get("type")
|
|
1986
|
+
if t == "string":
|
|
1987
|
+
return ""
|
|
1988
|
+
if t == "integer":
|
|
1989
|
+
return 0
|
|
1990
|
+
if t == "number":
|
|
1991
|
+
return 0.0
|
|
1992
|
+
if t == "boolean":
|
|
1993
|
+
return False
|
|
1994
|
+
if t == "array":
|
|
1995
|
+
# Generate one minimal item if schema is provided
|
|
1996
|
+
items = s.get("items", {})
|
|
1997
|
+
min_items = s.get("minItems", 0)
|
|
1998
|
+
# Let's generate at least one item for the example if possible
|
|
1999
|
+
num_items = max(min_items, 1) if items and not min_items == 0 else min_items
|
|
2000
|
+
return [_instance_skeleton(items) for _ in range(num_items)]
|
|
2001
|
+
if t == "object":
|
|
2002
|
+
props = s.get("properties", {})
|
|
2003
|
+
# Use required fields, otherwise fall back to all properties for the skeleton
|
|
2004
|
+
req = s.get("required", list(props.keys()))
|
|
2005
|
+
out = {}
|
|
2006
|
+
for k in req:
|
|
2007
|
+
if k in props:
|
|
2008
|
+
out[k] = _instance_skeleton(props[k])
|
|
2009
|
+
else:
|
|
2010
|
+
out[k] = None # Should not happen if schema is well-formed
|
|
2011
|
+
return out
|
|
2012
|
+
if "oneOf" in s and isinstance(s["oneOf"], list) and s["oneOf"]:
|
|
2013
|
+
return _instance_skeleton(s["oneOf"][0])
|
|
2014
|
+
if "anyOf" in s and isinstance(s["anyOf"], list) and s["anyOf"]:
|
|
2015
|
+
return _instance_skeleton(s["anyOf"][0])
|
|
2016
|
+
if "allOf" in s and isinstance(s["allOf"], list) and s["allOf"]:
|
|
2017
|
+
merged = {}
|
|
2018
|
+
for sub in s["allOf"]:
|
|
2019
|
+
val = _instance_skeleton(sub)
|
|
2020
|
+
if isinstance(val, dict):
|
|
2021
|
+
merged.update(val)
|
|
2022
|
+
return merged if merged else {}
|
|
2023
|
+
return {}
|
|
2024
|
+
|
|
2025
|
+
# Now derive strings from the (potentially corrected) schema_obj
|
|
2026
|
+
schema_str = json.dumps(schema_obj, indent=2, ensure_ascii=False)
|
|
2027
|
+
example_obj = _instance_skeleton(schema_obj)
|
|
2028
|
+
example_str = json.dumps(example_obj, indent=2, ensure_ascii=False)
|
|
2029
|
+
|
|
2030
|
+
base_system = (
|
|
2031
|
+
"Your objective is to generate a JSON object that satisfies the user's request and conforms to the provided schema.\n"
|
|
2032
|
+
"Rules:\n"
|
|
2033
|
+
"1) The schema is reference ONLY. Do not include the schema in the output.\n"
|
|
2034
|
+
"2) Output exactly ONE valid JSON object.\n"
|
|
2035
|
+
"3) Wrap the JSON object inside a single ```json code block.\n"
|
|
2036
|
+
"4) Do not output explanations or text outside the JSON.\n"
|
|
2037
|
+
"5) Use 2 spaces for indentation. Do not use tabs.\n"
|
|
2038
|
+
"6) Only include fields allowed by the schema and ensure all required fields are present.\n"
|
|
2039
|
+
"7) For enums, choose a valid value from the list.\n\n"
|
|
2040
|
+
"Schema (reference only):\n"
|
|
2041
|
+
f"```json\n{schema_str}\n```\n\n"
|
|
2042
|
+
"Correct example of output format (structure only, values are illustrative):\n"
|
|
2043
|
+
f"```json\n{example_str}\n```"
|
|
1969
2044
|
)
|
|
1970
|
-
if system_prompt
|
|
1971
|
-
full_system_prompt = f"{system_prompt}\n\n{full_system_prompt}"
|
|
2045
|
+
full_system_prompt = f"{system_prompt}\n\n{base_system}" if system_prompt else base_system
|
|
1972
2046
|
|
|
1973
|
-
|
|
1974
|
-
if kwargs.get('debug'):
|
|
2047
|
+
if kwargs.get("debug"):
|
|
1975
2048
|
ASCIIColors.info("Generating structured content...")
|
|
1976
2049
|
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
|
|
2050
|
+
last_error = None
|
|
2051
|
+
for attempt in range(max_retries + 1):
|
|
2052
|
+
json_string = self.generate_code(
|
|
2053
|
+
prompt=prompt,
|
|
2054
|
+
images=images,
|
|
2055
|
+
system_prompt=full_system_prompt if attempt == 0 else f"{full_system_prompt}\n\nPrevious attempt failed validation: {last_error}\nReturn a corrected JSON instance that strictly satisfies the schema.",
|
|
2056
|
+
template=example_str,
|
|
2057
|
+
language="json",
|
|
2058
|
+
code_tag_format="markdown",
|
|
2059
|
+
**kwargs
|
|
2060
|
+
)
|
|
2061
|
+
if not json_string:
|
|
2062
|
+
last_error = "LLM returned an empty response."
|
|
2063
|
+
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
2064
|
+
continue
|
|
1991
2065
|
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
2066
|
+
if kwargs.get("debug"):
|
|
2067
|
+
ASCIIColors.info("Parsing generated JSON string...")
|
|
2068
|
+
print(f"--- Raw JSON String ---\n{json_string}\n-----------------------")
|
|
1995
2069
|
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
return None
|
|
2070
|
+
try:
|
|
2071
|
+
parsed_json = robust_json_parser(json_string)
|
|
2072
|
+
if parsed_json is None:
|
|
2073
|
+
last_error = "Failed to robustly parse the generated string into JSON."
|
|
2074
|
+
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
2075
|
+
continue
|
|
2003
2076
|
|
|
2004
|
-
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2077
|
+
if has_validator:
|
|
2078
|
+
try:
|
|
2079
|
+
validate(instance=parsed_json, schema=schema_obj)
|
|
2080
|
+
return parsed_json
|
|
2081
|
+
except Exception as ve:
|
|
2082
|
+
last_error = f"JSON Schema Validation Error: {ve}"
|
|
2083
|
+
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
2084
|
+
if attempt < max_retries:
|
|
2085
|
+
continue
|
|
2086
|
+
# Return the invalid object after last retry if validation fails
|
|
2087
|
+
return parsed_json
|
|
2088
|
+
return parsed_json
|
|
2089
|
+
except Exception as e:
|
|
2090
|
+
trace_exception(e)
|
|
2091
|
+
ASCIIColors.error(f"Unexpected error during JSON processing: {e}")
|
|
2092
|
+
last_error = f"An unexpected error occurred: {e}"
|
|
2093
|
+
# Do not retry on unexpected errors, break the loop
|
|
2094
|
+
break
|
|
2095
|
+
|
|
2096
|
+
ASCIIColors.error(f"Failed to generate valid structured content after {max_retries + 1} attempts. Last error: {last_error}")
|
|
2097
|
+
return None
|
|
2010
2098
|
|
|
2011
2099
|
|
|
2012
2100
|
def extract_code_blocks(self, text: str, format: str = "markdown") -> List[dict]:
|
|
@@ -1944,7 +1944,7 @@ class LollmsDiscussion:
|
|
|
1944
1944
|
"required": ["title"],
|
|
1945
1945
|
"description": "JSON object as title of the discussion."
|
|
1946
1946
|
}
|
|
1947
|
-
infos = self.lollmsClient.generate_structured_content(prompt = prompt, system_prompt=system_prompt, schema = title_generation_schema)
|
|
1947
|
+
infos = self.lollmsClient.generate_structured_content(prompt = prompt, system_prompt=system_prompt, schema = title_generation_schema, n_predict=512)
|
|
1948
1948
|
if infos is None or "title" not in infos:
|
|
1949
1949
|
raise ValueError("Title generation failed or returned invalid data.")
|
|
1950
1950
|
discussion_title = infos["title"]
|
|
@@ -1995,7 +1995,7 @@ class LollmsDiscussion:
|
|
|
1995
1995
|
(a list of base64 strings), it converts it to the new format (a list
|
|
1996
1996
|
of dictionaries) and marks the discussion for saving.
|
|
1997
1997
|
"""
|
|
1998
|
-
if not self.images:
|
|
1998
|
+
if not self.images or len(self.images) == 0 or type(self.images) is not list:
|
|
1999
1999
|
return []
|
|
2000
2000
|
|
|
2001
2001
|
# Check if migration is needed (if the first element is a string).
|