lollms-client 1.4.0__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +678 -78
- lollms_client/lollms_discussion.py +13 -151
- lollms_client/tti_bindings/diffusers/__init__.py +34 -12
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.1.dist-info}/METADATA +1 -1
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.1.dist-info}/RECORD +9 -9
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.1.dist-info}/WHEEL +0 -0
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.1.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.4.0.dist-info → lollms_client-1.4.1.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "1.4.
|
|
11
|
+
__version__ = "1.4.1" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
lollms_client/lollms_core.py
CHANGED
|
@@ -3921,23 +3921,35 @@ FINAL RESPONSE:"""
|
|
|
3921
3921
|
repeat_penalty:float|None=None,
|
|
3922
3922
|
repeat_last_n:int|None=None,
|
|
3923
3923
|
callback=None,
|
|
3924
|
-
debug:bool=False
|
|
3924
|
+
debug:bool=False,
|
|
3925
|
+
override_all_prompts:bool=False ):
|
|
3925
3926
|
"""
|
|
3926
3927
|
Generates a single code block based on a prompt.
|
|
3927
3928
|
Uses the underlying LLM binding via `generate_text`.
|
|
3928
3929
|
Handles potential continuation if the code block is incomplete.
|
|
3930
|
+
|
|
3931
|
+
Args:
|
|
3932
|
+
override_all_prompts: If True, uses only the provided prompt and system_prompt
|
|
3933
|
+
without any internal prompt engineering or modifications.
|
|
3929
3934
|
"""
|
|
3930
|
-
|
|
3931
|
-
|
|
3935
|
+
|
|
3936
|
+
# Use original prompts without modification if override is enabled
|
|
3937
|
+
if override_all_prompts:
|
|
3938
|
+
final_system_prompt = system_prompt if system_prompt else ""
|
|
3939
|
+
final_prompt = prompt
|
|
3940
|
+
else:
|
|
3941
|
+
# Original prompt engineering logic
|
|
3942
|
+
if not system_prompt:
|
|
3943
|
+
system_prompt = f"""Act as a code generation assistant that generates code from user prompt."""
|
|
3932
3944
|
|
|
3933
|
-
|
|
3934
|
-
|
|
3935
|
-
|
|
3936
|
-
|
|
3937
|
-
|
|
3938
|
-
|
|
3939
|
-
|
|
3940
|
-
|
|
3945
|
+
if template and template !="{}":
|
|
3946
|
+
if language in ["json","yaml","xml"]:
|
|
3947
|
+
system_prompt += f"\nMake sure the generated context follows the following schema:\n```{language}\n{template}\n```\n"
|
|
3948
|
+
else:
|
|
3949
|
+
system_prompt += f"\nHere is a template of the answer:\n```{language}\n{template}\n```\n"
|
|
3950
|
+
|
|
3951
|
+
if code_tag_format=="markdown":
|
|
3952
|
+
system_prompt += f"""You must answer with the code placed inside the markdown code tag:
|
|
3941
3953
|
```{language}
|
|
3942
3954
|
```
|
|
3943
3955
|
"""
|
|
@@ -3946,14 +3958,17 @@ FINAL RESPONSE:"""
|
|
|
3946
3958
|
<code language="{language}">
|
|
3947
3959
|
</code>
|
|
3948
3960
|
"""
|
|
3949
|
-
|
|
3961
|
+
system_prompt += f"""You must return a single code tag.
|
|
3950
3962
|
Do not split the code in multiple tags.
|
|
3951
3963
|
{self.ai_full_header}"""
|
|
3964
|
+
|
|
3965
|
+
final_system_prompt = system_prompt
|
|
3966
|
+
final_prompt = prompt
|
|
3952
3967
|
|
|
3953
3968
|
response = self.generate_text(
|
|
3954
|
-
|
|
3969
|
+
final_prompt,
|
|
3955
3970
|
images=images,
|
|
3956
|
-
system_prompt=
|
|
3971
|
+
system_prompt=final_system_prompt,
|
|
3957
3972
|
n_predict=n_predict,
|
|
3958
3973
|
temperature=temperature,
|
|
3959
3974
|
top_k=top_k,
|
|
@@ -3964,8 +3979,8 @@ Do not split the code in multiple tags.
|
|
|
3964
3979
|
)
|
|
3965
3980
|
|
|
3966
3981
|
if isinstance(response, dict) and not response.get("status", True):
|
|
3967
|
-
|
|
3968
|
-
|
|
3982
|
+
ASCIIColors.error(f"Code generation failed: {response.get('error')}")
|
|
3983
|
+
return None
|
|
3969
3984
|
|
|
3970
3985
|
codes = self.extract_code_blocks(response, format=code_tag_format)
|
|
3971
3986
|
code_content = None
|
|
@@ -3975,46 +3990,425 @@ Do not split the code in multiple tags.
|
|
|
3975
3990
|
code_content = last_code["content"]
|
|
3976
3991
|
|
|
3977
3992
|
# Handle incomplete code block continuation (simple approach)
|
|
3978
|
-
|
|
3979
|
-
|
|
3980
|
-
|
|
3981
|
-
retries
|
|
3982
|
-
|
|
3983
|
-
|
|
3984
|
-
|
|
3985
|
-
|
|
3986
|
-
|
|
3987
|
-
|
|
3988
|
-
|
|
3989
|
-
|
|
3993
|
+
# Skip continuation logic if override_all_prompts is True to respect user's intent
|
|
3994
|
+
if not override_all_prompts:
|
|
3995
|
+
max_retries = 3 # Limit continuation attempts
|
|
3996
|
+
retries = 0
|
|
3997
|
+
while not last_code["is_complete"] and retries < max_retries:
|
|
3998
|
+
retries += 1
|
|
3999
|
+
ASCIIColors.info(f"Code block seems incomplete. Attempting continuation ({retries}/{max_retries})...")
|
|
4000
|
+
continuation_prompt = f"{prompt}\n\nAssistant:\n{code_content}\n\n{self.user_full_header}The previous code block was incomplete. Continue the code exactly from where it left off. Do not repeat the previous part. Only provide the continuation inside a single {code_tag_format} code tag.\n{self.ai_full_header}"
|
|
4001
|
+
|
|
4002
|
+
continuation_response = self.generate_text(
|
|
4003
|
+
continuation_prompt,
|
|
4004
|
+
images=images, # Resend images if needed for context
|
|
4005
|
+
n_predict=n_predict, # Allow space for continuation
|
|
4006
|
+
temperature=temperature, # Use same parameters
|
|
4007
|
+
top_k=top_k,
|
|
4008
|
+
top_p=top_p,
|
|
4009
|
+
repeat_penalty=repeat_penalty,
|
|
4010
|
+
repeat_last_n=repeat_last_n,
|
|
4011
|
+
streaming_callback=callback
|
|
4012
|
+
)
|
|
4013
|
+
|
|
4014
|
+
if isinstance(continuation_response, dict) and not continuation_response.get("status", True):
|
|
4015
|
+
ASCIIColors.warning(f"Continuation attempt failed: {continuation_response.get('error')}")
|
|
4016
|
+
break # Stop trying if generation fails
|
|
4017
|
+
|
|
4018
|
+
continuation_codes = self.extract_code_blocks(continuation_response, format=code_tag_format)
|
|
4019
|
+
|
|
4020
|
+
if continuation_codes:
|
|
4021
|
+
new_code_part = continuation_codes[0]["content"]
|
|
4022
|
+
code_content += "\n" + new_code_part # Append continuation
|
|
4023
|
+
last_code["is_complete"] = continuation_codes[0]["is_complete"] # Update completeness
|
|
4024
|
+
if last_code["is_complete"]:
|
|
4025
|
+
ASCIIColors.info("Code block continuation successful.")
|
|
4026
|
+
break # Exit loop if complete
|
|
4027
|
+
else:
|
|
4028
|
+
ASCIIColors.warning("Continuation response contained no code block.")
|
|
4029
|
+
break # Stop if no code block found in continuation
|
|
4030
|
+
|
|
4031
|
+
if not last_code["is_complete"]:
|
|
4032
|
+
ASCIIColors.warning("Code block remained incomplete after multiple attempts.")
|
|
4033
|
+
|
|
4034
|
+
return code_content # Return the (potentially completed) code content or None
|
|
4035
|
+
|
|
4036
|
+
|
|
4037
|
+
def update_code(
|
|
4038
|
+
self,
|
|
4039
|
+
original_code: str,
|
|
4040
|
+
modification_prompt: str,
|
|
4041
|
+
language: str = "python",
|
|
4042
|
+
images=[],
|
|
4043
|
+
system_prompt: str | None = None,
|
|
4044
|
+
patch_format: str = "unified", # "unified" or "simple"
|
|
4045
|
+
n_predict: int | None = None,
|
|
4046
|
+
temperature: float | None = None,
|
|
4047
|
+
top_k: int | None = None,
|
|
4048
|
+
top_p: float | None = None,
|
|
4049
|
+
repeat_penalty: float | None = None,
|
|
4050
|
+
repeat_last_n: int | None = None,
|
|
4051
|
+
callback=None,
|
|
4052
|
+
debug: bool = False,
|
|
4053
|
+
max_retries: int = 3
|
|
4054
|
+
):
|
|
4055
|
+
"""
|
|
4056
|
+
Updates existing code based on a modification prompt by generating and applying patches.
|
|
4057
|
+
Designed to work reliably even with smaller LLMs.
|
|
4058
|
+
|
|
4059
|
+
Args:
|
|
4060
|
+
original_code: The original code to be modified
|
|
4061
|
+
modification_prompt: Description of changes to apply to the code
|
|
4062
|
+
language: Programming language of the code
|
|
4063
|
+
patch_format: "unified" for diff-style patches or "simple" for line-based replacements
|
|
4064
|
+
max_retries: Maximum number of attempts if patch generation/application fails
|
|
4065
|
+
|
|
4066
|
+
Returns:
|
|
4067
|
+
dict: Contains 'success' (bool), 'updated_code' (str or None), 'patch' (str or None),
|
|
4068
|
+
and 'error' (str or None)
|
|
4069
|
+
"""
|
|
4070
|
+
|
|
4071
|
+
if not original_code or not original_code.strip():
|
|
4072
|
+
return {
|
|
4073
|
+
"success": False,
|
|
4074
|
+
"updated_code": None,
|
|
4075
|
+
"patch": None,
|
|
4076
|
+
"error": "Original code is empty"
|
|
4077
|
+
}
|
|
4078
|
+
|
|
4079
|
+
# Choose patch format based on LLM capabilities
|
|
4080
|
+
if patch_format == "simple":
|
|
4081
|
+
# Simple format for smaller LLMs - just old/new line pairs
|
|
4082
|
+
patch_system_prompt = f"""You are a code modification assistant.
|
|
4083
|
+
You will receive {language} code and a modification request.
|
|
4084
|
+
Generate a patch using this EXACT format:
|
|
4085
|
+
|
|
4086
|
+
PATCH_START
|
|
4087
|
+
REPLACE_LINE: <line_number>
|
|
4088
|
+
OLD: <exact_old_line>
|
|
4089
|
+
NEW: <new_line>
|
|
4090
|
+
REPLACE_LINE: <another_line_number>
|
|
4091
|
+
OLD: <exact_old_line>
|
|
4092
|
+
NEW: <new_line>
|
|
4093
|
+
PATCH_END
|
|
4094
|
+
|
|
4095
|
+
For adding lines:
|
|
4096
|
+
ADD_AFTER: <line_number>
|
|
4097
|
+
NEW: <line_to_add>
|
|
4098
|
+
|
|
4099
|
+
For removing lines:
|
|
4100
|
+
REMOVE_LINE: <line_number>
|
|
4101
|
+
|
|
4102
|
+
Rules:
|
|
4103
|
+
- Line numbers start at 1
|
|
4104
|
+
- Match OLD lines EXACTLY including whitespace
|
|
4105
|
+
- Only include lines that need changes
|
|
4106
|
+
- Keep changes minimal and focused"""
|
|
4107
|
+
|
|
4108
|
+
else: # unified diff format
|
|
4109
|
+
patch_system_prompt = f"""You are a code modification assistant.
|
|
4110
|
+
You will receive {language} code and a modification request.
|
|
4111
|
+
Generate a unified diff patch showing the changes.
|
|
4112
|
+
|
|
4113
|
+
Format your response as:
|
|
4114
|
+
```diff
|
|
4115
|
+
@@ -start_line,count +start_line,count @@
|
|
4116
|
+
context_line (unchanged)
|
|
4117
|
+
-removed_line
|
|
4118
|
+
+added_line
|
|
4119
|
+
context_line (unchanged)
|
|
4120
|
+
```
|
|
4121
|
+
|
|
4122
|
+
Rules:
|
|
4123
|
+
- Use standard unified diff format
|
|
4124
|
+
- Include 1-2 lines of context around changes
|
|
4125
|
+
- Be precise with line numbers
|
|
4126
|
+
- Keep changes minimal"""
|
|
4127
|
+
|
|
4128
|
+
if system_prompt:
|
|
4129
|
+
patch_system_prompt = system_prompt + "\n\n" + patch_system_prompt
|
|
4130
|
+
|
|
4131
|
+
# Prepare the prompt with line numbers for reference
|
|
4132
|
+
numbered_code = "\n".join(
|
|
4133
|
+
f"{i+1:4d}: {line}"
|
|
4134
|
+
for i, line in enumerate(original_code.split("\n"))
|
|
4135
|
+
)
|
|
4136
|
+
|
|
4137
|
+
patch_prompt = f"""Original {language} code (with line numbers for reference):
|
|
4138
|
+
```{language}
|
|
4139
|
+
{numbered_code}
|
|
4140
|
+
```
|
|
4141
|
+
|
|
4142
|
+
Modification request: {modification_prompt}
|
|
4143
|
+
|
|
4144
|
+
Generate a patch to apply these changes. Follow the format specified in your instructions exactly."""
|
|
4145
|
+
|
|
4146
|
+
retry_count = 0
|
|
4147
|
+
last_error = None
|
|
4148
|
+
|
|
4149
|
+
while retry_count < max_retries:
|
|
4150
|
+
try:
|
|
4151
|
+
if debug:
|
|
4152
|
+
ASCIIColors.info(f"Attempting to generate patch (attempt {retry_count + 1}/{max_retries})")
|
|
4153
|
+
|
|
4154
|
+
# Generate the patch
|
|
4155
|
+
response = self.generate_text(
|
|
4156
|
+
patch_prompt,
|
|
4157
|
+
images=images,
|
|
4158
|
+
system_prompt=patch_system_prompt,
|
|
4159
|
+
n_predict=n_predict or 2000, # Usually patches are not too long
|
|
4160
|
+
temperature=temperature or 0.3, # Lower temperature for more deterministic patches
|
|
3990
4161
|
top_k=top_k,
|
|
3991
4162
|
top_p=top_p,
|
|
3992
4163
|
repeat_penalty=repeat_penalty,
|
|
3993
4164
|
repeat_last_n=repeat_last_n,
|
|
3994
4165
|
streaming_callback=callback
|
|
3995
4166
|
)
|
|
4167
|
+
|
|
4168
|
+
if isinstance(response, dict) and not response.get("status", True):
|
|
4169
|
+
raise Exception(f"Patch generation failed: {response.get('error')}")
|
|
4170
|
+
|
|
4171
|
+
# Extract and apply the patch
|
|
4172
|
+
if patch_format == "simple":
|
|
4173
|
+
updated_code, patch_text = self._apply_simple_patch(original_code, response, debug)
|
|
4174
|
+
else:
|
|
4175
|
+
updated_code, patch_text = self._apply_unified_patch(original_code, response, debug)
|
|
4176
|
+
|
|
4177
|
+
if updated_code:
|
|
4178
|
+
if debug:
|
|
4179
|
+
ASCIIColors.success("Code successfully updated")
|
|
4180
|
+
return {
|
|
4181
|
+
"success": True,
|
|
4182
|
+
"updated_code": updated_code,
|
|
4183
|
+
"patch": patch_text,
|
|
4184
|
+
"error": None
|
|
4185
|
+
}
|
|
4186
|
+
else:
|
|
4187
|
+
raise Exception("Failed to apply patch - no valid changes found")
|
|
4188
|
+
|
|
4189
|
+
except Exception as e:
|
|
4190
|
+
last_error = str(e)
|
|
4191
|
+
if debug:
|
|
4192
|
+
ASCIIColors.warning(f"Attempt {retry_count + 1} failed: {last_error}")
|
|
4193
|
+
|
|
4194
|
+
retry_count += 1
|
|
4195
|
+
|
|
4196
|
+
# Try simpler approach on retry
|
|
4197
|
+
if retry_count < max_retries and patch_format == "unified":
|
|
4198
|
+
patch_format = "simple"
|
|
4199
|
+
if debug:
|
|
4200
|
+
ASCIIColors.info("Switching to simple patch format for next attempt")
|
|
4201
|
+
|
|
4202
|
+
# All retries exhausted
|
|
4203
|
+
return {
|
|
4204
|
+
"success": False,
|
|
4205
|
+
"updated_code": None,
|
|
4206
|
+
"patch": None,
|
|
4207
|
+
"error": f"Failed after {max_retries} attempts. Last error: {last_error}"
|
|
4208
|
+
}
|
|
3996
4209
|
|
|
3997
|
-
if isinstance(continuation_response, dict) and not continuation_response.get("status", True):
|
|
3998
|
-
ASCIIColors.warning(f"Continuation attempt failed: {continuation_response.get('error')}")
|
|
3999
|
-
break # Stop trying if generation fails
|
|
4000
4210
|
|
|
4001
|
-
|
|
4211
|
+
def _apply_simple_patch(self, original_code: str, patch_response: str, debug: bool = False):
|
|
4212
|
+
"""
|
|
4213
|
+
Apply a simple line-based patch to the original code.
|
|
4214
|
+
|
|
4215
|
+
Returns:
|
|
4216
|
+
tuple: (updated_code or None, patch_text or None)
|
|
4217
|
+
"""
|
|
4218
|
+
try:
|
|
4219
|
+
lines = original_code.split("\n")
|
|
4220
|
+
patch_lines = []
|
|
4221
|
+
|
|
4222
|
+
# Extract patch content
|
|
4223
|
+
if "PATCH_START" in patch_response and "PATCH_END" in patch_response:
|
|
4224
|
+
start_idx = patch_response.index("PATCH_START")
|
|
4225
|
+
end_idx = patch_response.index("PATCH_END")
|
|
4226
|
+
patch_content = patch_response[start_idx + len("PATCH_START"):end_idx].strip()
|
|
4227
|
+
else:
|
|
4228
|
+
# Try to extract patch instructions even without markers
|
|
4229
|
+
patch_content = patch_response
|
|
4230
|
+
|
|
4231
|
+
# Track modifications to apply them in reverse order (to preserve line numbers)
|
|
4232
|
+
modifications = []
|
|
4233
|
+
|
|
4234
|
+
for line in patch_content.split("\n"):
|
|
4235
|
+
line = line.strip()
|
|
4236
|
+
if not line:
|
|
4237
|
+
continue
|
|
4238
|
+
|
|
4239
|
+
patch_lines.append(line)
|
|
4240
|
+
|
|
4241
|
+
if line.startswith("REPLACE_LINE:"):
|
|
4242
|
+
try:
|
|
4243
|
+
line_num = int(line.split(":")[1].strip()) - 1 # Convert to 0-based
|
|
4244
|
+
# Look for OLD and NEW in next lines
|
|
4245
|
+
idx = patch_content.index(line)
|
|
4246
|
+
remaining = patch_content[idx:].split("\n")
|
|
4247
|
+
|
|
4248
|
+
old_line = None
|
|
4249
|
+
new_line = None
|
|
4250
|
+
|
|
4251
|
+
for next_line in remaining[1:]:
|
|
4252
|
+
if next_line.strip().startswith("OLD:"):
|
|
4253
|
+
old_line = next_line[next_line.index("OLD:") + 4:].strip()
|
|
4254
|
+
elif next_line.strip().startswith("NEW:"):
|
|
4255
|
+
new_line = next_line[next_line.index("NEW:") + 4:].strip()
|
|
4256
|
+
break
|
|
4257
|
+
|
|
4258
|
+
if old_line is not None and new_line is not None:
|
|
4259
|
+
modifications.append(("replace", line_num, old_line, new_line))
|
|
4260
|
+
|
|
4261
|
+
except (ValueError, IndexError) as e:
|
|
4262
|
+
if debug:
|
|
4263
|
+
ASCIIColors.warning(f"Failed to parse REPLACE_LINE: {e}")
|
|
4264
|
+
|
|
4265
|
+
elif line.startswith("ADD_AFTER:"):
|
|
4266
|
+
try:
|
|
4267
|
+
line_num = int(line.split(":")[1].strip()) - 1
|
|
4268
|
+
# Look for NEW in next line
|
|
4269
|
+
idx = patch_content.index(line)
|
|
4270
|
+
remaining = patch_content[idx:].split("\n")
|
|
4271
|
+
|
|
4272
|
+
for next_line in remaining[1:]:
|
|
4273
|
+
if next_line.strip().startswith("NEW:"):
|
|
4274
|
+
new_line = next_line[next_line.index("NEW:") + 4:].strip()
|
|
4275
|
+
modifications.append(("add_after", line_num, None, new_line))
|
|
4276
|
+
break
|
|
4277
|
+
|
|
4278
|
+
except (ValueError, IndexError) as e:
|
|
4279
|
+
if debug:
|
|
4280
|
+
ASCIIColors.warning(f"Failed to parse ADD_AFTER: {e}")
|
|
4281
|
+
|
|
4282
|
+
elif line.startswith("REMOVE_LINE:"):
|
|
4283
|
+
try:
|
|
4284
|
+
line_num = int(line.split(":")[1].strip()) - 1
|
|
4285
|
+
modifications.append(("remove", line_num, None, None))
|
|
4286
|
+
|
|
4287
|
+
except (ValueError, IndexError) as e:
|
|
4288
|
+
if debug:
|
|
4289
|
+
ASCIIColors.warning(f"Failed to parse REMOVE_LINE: {e}")
|
|
4290
|
+
|
|
4291
|
+
if not modifications:
|
|
4292
|
+
return None, None
|
|
4293
|
+
|
|
4294
|
+
# Sort modifications by line number in reverse order
|
|
4295
|
+
modifications.sort(key=lambda x: x[1], reverse=True)
|
|
4296
|
+
|
|
4297
|
+
# Apply modifications
|
|
4298
|
+
for mod_type, line_num, old_line, new_line in modifications:
|
|
4299
|
+
if line_num < 0 or line_num >= len(lines):
|
|
4300
|
+
if debug:
|
|
4301
|
+
ASCIIColors.warning(f"Line number {line_num + 1} out of range")
|
|
4302
|
+
continue
|
|
4303
|
+
|
|
4304
|
+
if mod_type == "replace":
|
|
4305
|
+
# Verify old line matches (with some flexibility for whitespace)
|
|
4306
|
+
if old_line and lines[line_num].strip() == old_line.strip():
|
|
4307
|
+
# Preserve original indentation
|
|
4308
|
+
indent = len(lines[line_num]) - len(lines[line_num].lstrip())
|
|
4309
|
+
lines[line_num] = " " * indent + new_line.lstrip()
|
|
4310
|
+
elif debug:
|
|
4311
|
+
ASCIIColors.warning(f"Line {line_num + 1} doesn't match expected content")
|
|
4312
|
+
|
|
4313
|
+
elif mod_type == "add_after":
|
|
4314
|
+
# Get indentation from the reference line
|
|
4315
|
+
indent = len(lines[line_num]) - len(lines[line_num].lstrip())
|
|
4316
|
+
lines.insert(line_num + 1, " " * indent + new_line.lstrip())
|
|
4317
|
+
|
|
4318
|
+
elif mod_type == "remove":
|
|
4319
|
+
del lines[line_num]
|
|
4320
|
+
|
|
4321
|
+
updated_code = "\n".join(lines)
|
|
4322
|
+
patch_text = "\n".join(patch_lines)
|
|
4323
|
+
|
|
4324
|
+
return updated_code, patch_text
|
|
4325
|
+
|
|
4326
|
+
except Exception as e:
|
|
4327
|
+
if debug:
|
|
4328
|
+
ASCIIColors.error(f"Error applying simple patch: {e}")
|
|
4329
|
+
return None, None
|
|
4002
4330
|
|
|
4003
|
-
if continuation_codes:
|
|
4004
|
-
new_code_part = continuation_codes[0]["content"]
|
|
4005
|
-
code_content += "\n" + new_code_part # Append continuation
|
|
4006
|
-
last_code["is_complete"] = continuation_codes[0]["is_complete"] # Update completeness
|
|
4007
|
-
if last_code["is_complete"]:
|
|
4008
|
-
ASCIIColors.info("Code block continuation successful.")
|
|
4009
|
-
break # Exit loop if complete
|
|
4010
|
-
else:
|
|
4011
|
-
ASCIIColors.warning("Continuation response contained no code block.")
|
|
4012
|
-
break # Stop if no code block found in continuation
|
|
4013
4331
|
|
|
4014
|
-
|
|
4015
|
-
|
|
4332
|
+
def _apply_unified_patch(self, original_code: str, patch_response: str, debug: bool = False):
|
|
4333
|
+
"""
|
|
4334
|
+
Apply a unified diff patch to the original code.
|
|
4335
|
+
|
|
4336
|
+
Returns:
|
|
4337
|
+
tuple: (updated_code or None, patch_text or None)
|
|
4338
|
+
"""
|
|
4339
|
+
try:
|
|
4340
|
+
import re
|
|
4341
|
+
|
|
4342
|
+
lines = original_code.split("\n")
|
|
4343
|
+
|
|
4344
|
+
# Extract diff content
|
|
4345
|
+
diff_pattern = r'```diff\n(.*?)\n```'
|
|
4346
|
+
diff_match = re.search(diff_pattern, patch_response, re.DOTALL)
|
|
4347
|
+
|
|
4348
|
+
if diff_match:
|
|
4349
|
+
patch_text = diff_match.group(1)
|
|
4350
|
+
else:
|
|
4351
|
+
# Try to find diff content without code blocks
|
|
4352
|
+
if "@@" in patch_response:
|
|
4353
|
+
patch_text = patch_response
|
|
4354
|
+
else:
|
|
4355
|
+
return None, None
|
|
4356
|
+
|
|
4357
|
+
# Parse and apply hunks
|
|
4358
|
+
hunk_pattern = r'@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@'
|
|
4359
|
+
hunks = re.finditer(hunk_pattern, patch_text)
|
|
4360
|
+
|
|
4361
|
+
changes = []
|
|
4362
|
+
for hunk in hunks:
|
|
4363
|
+
old_start = int(hunk.group(1)) - 1 # Convert to 0-based
|
|
4364
|
+
old_count = int(hunk.group(2)) if hunk.group(2) else 1
|
|
4365
|
+
new_start = int(hunk.group(3)) - 1
|
|
4366
|
+
new_count = int(hunk.group(4)) if hunk.group(4) else 1
|
|
4367
|
+
|
|
4368
|
+
# Extract hunk content
|
|
4369
|
+
hunk_start = hunk.end()
|
|
4370
|
+
next_hunk = re.search(hunk_pattern, patch_text[hunk_start:])
|
|
4371
|
+
hunk_end = hunk_start + next_hunk.start() if next_hunk else len(patch_text)
|
|
4372
|
+
|
|
4373
|
+
hunk_lines = patch_text[hunk_start:hunk_end].strip().split("\n")
|
|
4374
|
+
|
|
4375
|
+
# Process hunk lines
|
|
4376
|
+
for line in hunk_lines:
|
|
4377
|
+
if not line:
|
|
4378
|
+
continue
|
|
4379
|
+
|
|
4380
|
+
if line.startswith("-"):
|
|
4381
|
+
changes.append(("remove", old_start, line[1:].strip()))
|
|
4382
|
+
elif line.startswith("+"):
|
|
4383
|
+
changes.append(("add", old_start, line[1:].strip()))
|
|
4384
|
+
# Context lines (starting with space) are ignored
|
|
4385
|
+
|
|
4386
|
+
# Apply changes (in reverse order to maintain line numbers)
|
|
4387
|
+
changes.sort(key=lambda x: x[1], reverse=True)
|
|
4388
|
+
|
|
4389
|
+
for change_type, line_num, content in changes:
|
|
4390
|
+
if line_num < 0 or line_num > len(lines):
|
|
4391
|
+
continue
|
|
4392
|
+
|
|
4393
|
+
if change_type == "remove":
|
|
4394
|
+
if line_num < len(lines) and lines[line_num].strip() == content:
|
|
4395
|
+
del lines[line_num]
|
|
4396
|
+
elif change_type == "add":
|
|
4397
|
+
# Preserve indentation from nearby lines
|
|
4398
|
+
indent = 0
|
|
4399
|
+
if line_num > 0:
|
|
4400
|
+
indent = len(lines[line_num - 1]) - len(lines[line_num - 1].lstrip())
|
|
4401
|
+
lines.insert(line_num, " " * indent + content)
|
|
4402
|
+
|
|
4403
|
+
updated_code = "\n".join(lines)
|
|
4404
|
+
|
|
4405
|
+
return updated_code, patch_text
|
|
4406
|
+
|
|
4407
|
+
except Exception as e:
|
|
4408
|
+
if debug:
|
|
4409
|
+
ASCIIColors.error(f"Error applying unified patch: {e}")
|
|
4410
|
+
return None, None
|
|
4016
4411
|
|
|
4017
|
-
return code_content # Return the (potentially completed) code content or None
|
|
4018
4412
|
|
|
4019
4413
|
def generate_structured_content(
|
|
4020
4414
|
self,
|
|
@@ -4023,8 +4417,16 @@ Do not split the code in multiple tags.
|
|
|
4023
4417
|
schema=None,
|
|
4024
4418
|
system_prompt=None,
|
|
4025
4419
|
max_retries=1,
|
|
4420
|
+
use_override=False,
|
|
4026
4421
|
**kwargs
|
|
4027
4422
|
):
|
|
4423
|
+
"""
|
|
4424
|
+
Enhanced structured content generation with optional prompt override.
|
|
4425
|
+
|
|
4426
|
+
Args:
|
|
4427
|
+
use_override: If True, uses override_all_prompts=True in generate_code
|
|
4428
|
+
and relies entirely on provided system_prompt and prompt.
|
|
4429
|
+
"""
|
|
4028
4430
|
import json
|
|
4029
4431
|
images = [] if images is None else images
|
|
4030
4432
|
schema = {} if schema is None else schema
|
|
@@ -4044,21 +4446,15 @@ Do not split the code in multiple tags.
|
|
|
4044
4446
|
else:
|
|
4045
4447
|
raise TypeError("schema must be a dict or a JSON string.")
|
|
4046
4448
|
|
|
4047
|
-
# --- FIX STARTS HERE ---
|
|
4048
4449
|
# Heuristic to detect if the schema is a properties-only dictionary
|
|
4049
|
-
# and needs to be wrapped in a root object to be a valid schema.
|
|
4050
|
-
# This handles cases where the user provides `{"field1": {...}, "field2": {...}}`
|
|
4051
|
-
# instead of `{"type": "object", "properties": {"field1": ...}}`.
|
|
4052
4450
|
if "type" not in schema_obj and "properties" not in schema_obj and all(isinstance(v, dict) for v in schema_obj.values()):
|
|
4053
4451
|
if kwargs.get("debug"):
|
|
4054
4452
|
ASCIIColors.info("Schema appears to be a properties-only dictionary; wrapping it in a root object.")
|
|
4055
4453
|
schema_obj = {
|
|
4056
4454
|
"type": "object",
|
|
4057
4455
|
"properties": schema_obj,
|
|
4058
|
-
# Assume all top-level keys are required when wrapping
|
|
4059
4456
|
"required": list(schema_obj.keys())
|
|
4060
4457
|
}
|
|
4061
|
-
# --- FIX ENDS HERE ---
|
|
4062
4458
|
|
|
4063
4459
|
def _instance_skeleton(s):
|
|
4064
4460
|
if not isinstance(s, dict):
|
|
@@ -4068,7 +4464,6 @@ Do not split the code in multiple tags.
|
|
|
4068
4464
|
if "enum" in s and isinstance(s["enum"], list) and s["enum"]:
|
|
4069
4465
|
return s["enum"][0]
|
|
4070
4466
|
|
|
4071
|
-
# Handle default values
|
|
4072
4467
|
if "default" in s:
|
|
4073
4468
|
return s["default"]
|
|
4074
4469
|
|
|
@@ -4082,22 +4477,19 @@ Do not split the code in multiple tags.
|
|
|
4082
4477
|
if t == "boolean":
|
|
4083
4478
|
return False
|
|
4084
4479
|
if t == "array":
|
|
4085
|
-
# Generate one minimal item if schema is provided
|
|
4086
4480
|
items = s.get("items", {})
|
|
4087
4481
|
min_items = s.get("minItems", 0)
|
|
4088
|
-
# Let's generate at least one item for the example if possible
|
|
4089
4482
|
num_items = max(min_items, 1) if items and not min_items == 0 else min_items
|
|
4090
4483
|
return [_instance_skeleton(items) for _ in range(num_items)]
|
|
4091
4484
|
if t == "object":
|
|
4092
4485
|
props = s.get("properties", {})
|
|
4093
|
-
# Use required fields, otherwise fall back to all properties for the skeleton
|
|
4094
4486
|
req = s.get("required", list(props.keys()))
|
|
4095
4487
|
out = {}
|
|
4096
4488
|
for k in req:
|
|
4097
4489
|
if k in props:
|
|
4098
4490
|
out[k] = _instance_skeleton(props[k])
|
|
4099
4491
|
else:
|
|
4100
|
-
out[k] = None
|
|
4492
|
+
out[k] = None
|
|
4101
4493
|
return out
|
|
4102
4494
|
if "oneOf" in s and isinstance(s["oneOf"], list) and s["oneOf"]:
|
|
4103
4495
|
return _instance_skeleton(s["oneOf"][0])
|
|
@@ -4112,42 +4504,56 @@ Do not split the code in multiple tags.
|
|
|
4112
4504
|
return merged if merged else {}
|
|
4113
4505
|
return {}
|
|
4114
4506
|
|
|
4115
|
-
# Now derive strings from the (potentially corrected) schema_obj
|
|
4116
4507
|
schema_str = json.dumps(schema_obj, indent=2, ensure_ascii=False)
|
|
4117
4508
|
example_obj = _instance_skeleton(schema_obj)
|
|
4118
4509
|
example_str = json.dumps(example_obj, indent=2, ensure_ascii=False)
|
|
4119
4510
|
|
|
4120
|
-
|
|
4121
|
-
|
|
4122
|
-
"
|
|
4123
|
-
|
|
4124
|
-
|
|
4125
|
-
|
|
4126
|
-
|
|
4127
|
-
|
|
4128
|
-
|
|
4129
|
-
|
|
4130
|
-
|
|
4131
|
-
|
|
4132
|
-
|
|
4133
|
-
|
|
4134
|
-
|
|
4135
|
-
|
|
4511
|
+
if use_override:
|
|
4512
|
+
# Use provided system_prompt and prompt as-is
|
|
4513
|
+
final_system_prompt = system_prompt if system_prompt else ""
|
|
4514
|
+
final_prompt = prompt
|
|
4515
|
+
override_prompts = True
|
|
4516
|
+
else:
|
|
4517
|
+
# Use original prompt engineering
|
|
4518
|
+
base_system = (
|
|
4519
|
+
"Your objective is to generate a JSON object that satisfies the user's request and conforms to the provided schema.\n"
|
|
4520
|
+
"Rules:\n"
|
|
4521
|
+
"1) The schema is reference ONLY. Do not include the schema in the output.\n"
|
|
4522
|
+
"2) Output exactly ONE valid JSON object.\n"
|
|
4523
|
+
"3) Wrap the JSON object inside a single ```json code block.\n"
|
|
4524
|
+
"4) Do not output explanations or text outside the JSON.\n"
|
|
4525
|
+
"5) Use 2 spaces for indentation. Do not use tabs.\n"
|
|
4526
|
+
"6) Only include fields allowed by the schema and ensure all required fields are present.\n"
|
|
4527
|
+
"7) For enums, choose a valid value from the list.\n\n"
|
|
4528
|
+
"Schema (reference only):\n"
|
|
4529
|
+
f"```json\n{schema_str}\n```\n\n"
|
|
4530
|
+
"Correct example of output format (structure only, values are illustrative):\n"
|
|
4531
|
+
f"```json\n{example_str}\n```"
|
|
4532
|
+
)
|
|
4533
|
+
final_system_prompt = f"{system_prompt}\n\n{base_system}" if system_prompt else base_system
|
|
4534
|
+
final_prompt = prompt
|
|
4535
|
+
override_prompts = False
|
|
4136
4536
|
|
|
4137
4537
|
if kwargs.get("debug"):
|
|
4138
4538
|
ASCIIColors.info("Generating structured content...")
|
|
4139
4539
|
|
|
4140
4540
|
last_error = None
|
|
4141
4541
|
for attempt in range(max_retries + 1):
|
|
4542
|
+
retry_system_prompt = final_system_prompt
|
|
4543
|
+
if attempt > 0 and not use_override:
|
|
4544
|
+
retry_system_prompt = f"{final_system_prompt}\n\nPrevious attempt failed validation: {last_error}\nReturn a corrected JSON instance that strictly satisfies the schema."
|
|
4545
|
+
|
|
4142
4546
|
json_string = self.generate_code(
|
|
4143
|
-
prompt=
|
|
4547
|
+
prompt=final_prompt,
|
|
4144
4548
|
images=images,
|
|
4145
|
-
system_prompt=
|
|
4146
|
-
template=example_str,
|
|
4549
|
+
system_prompt=retry_system_prompt,
|
|
4550
|
+
template=example_str if not use_override else None,
|
|
4147
4551
|
language="json",
|
|
4148
4552
|
code_tag_format="markdown",
|
|
4553
|
+
override_all_prompts=override_prompts,
|
|
4149
4554
|
**kwargs
|
|
4150
4555
|
)
|
|
4556
|
+
|
|
4151
4557
|
if not json_string:
|
|
4152
4558
|
last_error = "LLM returned an empty response."
|
|
4153
4559
|
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
@@ -4173,19 +4579,213 @@ Do not split the code in multiple tags.
|
|
|
4173
4579
|
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
4174
4580
|
if attempt < max_retries:
|
|
4175
4581
|
continue
|
|
4176
|
-
# Return the invalid object after last retry if validation fails
|
|
4177
4582
|
return parsed_json
|
|
4178
4583
|
return parsed_json
|
|
4179
4584
|
except Exception as e:
|
|
4180
4585
|
trace_exception(e)
|
|
4181
4586
|
ASCIIColors.error(f"Unexpected error during JSON processing: {e}")
|
|
4182
4587
|
last_error = f"An unexpected error occurred: {e}"
|
|
4183
|
-
# Do not retry on unexpected errors, break the loop
|
|
4184
4588
|
break
|
|
4185
4589
|
|
|
4186
4590
|
ASCIIColors.error(f"Failed to generate valid structured content after {max_retries + 1} attempts. Last error: {last_error}")
|
|
4187
4591
|
return None
|
|
4188
4592
|
|
|
4593
|
+
def generate_structured_content_pydantic(
|
|
4594
|
+
self,
|
|
4595
|
+
prompt,
|
|
4596
|
+
pydantic_model,
|
|
4597
|
+
images=None,
|
|
4598
|
+
system_prompt=None,
|
|
4599
|
+
max_retries=1,
|
|
4600
|
+
use_override=False,
|
|
4601
|
+
**kwargs
|
|
4602
|
+
):
|
|
4603
|
+
"""
|
|
4604
|
+
Generate structured content using Pydantic models for validation.
|
|
4605
|
+
|
|
4606
|
+
Args:
|
|
4607
|
+
prompt: The user prompt
|
|
4608
|
+
pydantic_model: A Pydantic BaseModel class or instance
|
|
4609
|
+
images: Optional images for context
|
|
4610
|
+
system_prompt: Optional system prompt
|
|
4611
|
+
max_retries: Number of retry attempts
|
|
4612
|
+
use_override: If True, uses override_all_prompts=True in generate_code
|
|
4613
|
+
**kwargs: Additional arguments passed to generate_code
|
|
4614
|
+
|
|
4615
|
+
Returns:
|
|
4616
|
+
Validated Pydantic model instance or None if generation failed
|
|
4617
|
+
"""
|
|
4618
|
+
import json
|
|
4619
|
+
from typing import get_type_hints, get_origin, get_args
|
|
4620
|
+
|
|
4621
|
+
try:
|
|
4622
|
+
from pydantic import BaseModel, ValidationError
|
|
4623
|
+
from pydantic.fields import FieldInfo
|
|
4624
|
+
except ImportError:
|
|
4625
|
+
ASCIIColors.error("Pydantic is required for this method. Please install it with: pip install pydantic")
|
|
4626
|
+
return None
|
|
4627
|
+
|
|
4628
|
+
images = [] if images is None else images
|
|
4629
|
+
|
|
4630
|
+
# Handle both class and instance
|
|
4631
|
+
if isinstance(pydantic_model, type) and issubclass(pydantic_model, BaseModel):
|
|
4632
|
+
model_class = pydantic_model
|
|
4633
|
+
elif isinstance(pydantic_model, BaseModel):
|
|
4634
|
+
model_class = type(pydantic_model)
|
|
4635
|
+
else:
|
|
4636
|
+
raise TypeError("pydantic_model must be a Pydantic BaseModel class or instance")
|
|
4637
|
+
|
|
4638
|
+
def _get_pydantic_schema_info(model_cls):
|
|
4639
|
+
"""Extract schema information from Pydantic model."""
|
|
4640
|
+
schema = model_cls.model_json_schema()
|
|
4641
|
+
|
|
4642
|
+
# Create example instance
|
|
4643
|
+
try:
|
|
4644
|
+
# Try to create with defaults first
|
|
4645
|
+
example_instance = model_cls()
|
|
4646
|
+
example_dict = example_instance.model_dump()
|
|
4647
|
+
except:
|
|
4648
|
+
# If that fails, create minimal example based on schema
|
|
4649
|
+
example_dict = _create_example_from_schema(schema)
|
|
4650
|
+
|
|
4651
|
+
return schema, example_dict
|
|
4652
|
+
|
|
4653
|
+
def _create_example_from_schema(schema):
|
|
4654
|
+
"""Create example data from JSON schema."""
|
|
4655
|
+
if schema.get("type") == "object":
|
|
4656
|
+
properties = schema.get("properties", {})
|
|
4657
|
+
required = schema.get("required", [])
|
|
4658
|
+
example = {}
|
|
4659
|
+
|
|
4660
|
+
for field_name, field_schema in properties.items():
|
|
4661
|
+
if field_name in required or "default" in field_schema:
|
|
4662
|
+
example[field_name] = _get_example_value(field_schema)
|
|
4663
|
+
|
|
4664
|
+
return example
|
|
4665
|
+
return {}
|
|
4666
|
+
|
|
4667
|
+
def _get_example_value(field_schema):
|
|
4668
|
+
"""Get example value for a field based on its schema."""
|
|
4669
|
+
if "default" in field_schema:
|
|
4670
|
+
return field_schema["default"]
|
|
4671
|
+
if "const" in field_schema:
|
|
4672
|
+
return field_schema["const"]
|
|
4673
|
+
if "enum" in field_schema and field_schema["enum"]:
|
|
4674
|
+
return field_schema["enum"][0]
|
|
4675
|
+
|
|
4676
|
+
field_type = field_schema.get("type")
|
|
4677
|
+
if field_type == "string":
|
|
4678
|
+
return "example"
|
|
4679
|
+
elif field_type == "integer":
|
|
4680
|
+
return 0
|
|
4681
|
+
elif field_type == "number":
|
|
4682
|
+
return 0.0
|
|
4683
|
+
elif field_type == "boolean":
|
|
4684
|
+
return False
|
|
4685
|
+
elif field_type == "array":
|
|
4686
|
+
items_schema = field_schema.get("items", {})
|
|
4687
|
+
return [_get_example_value(items_schema)]
|
|
4688
|
+
elif field_type == "object":
|
|
4689
|
+
return _create_example_from_schema(field_schema)
|
|
4690
|
+
else:
|
|
4691
|
+
return None
|
|
4692
|
+
|
|
4693
|
+
# Get schema and example from Pydantic model
|
|
4694
|
+
try:
|
|
4695
|
+
schema, example_dict = _get_pydantic_schema_info(model_class)
|
|
4696
|
+
schema_str = json.dumps(schema, indent=2, ensure_ascii=False)
|
|
4697
|
+
example_str = json.dumps(example_dict, indent=2, ensure_ascii=False)
|
|
4698
|
+
except Exception as e:
|
|
4699
|
+
ASCIIColors.error(f"Failed to extract schema from Pydantic model: {e}")
|
|
4700
|
+
return None
|
|
4701
|
+
|
|
4702
|
+
if use_override:
|
|
4703
|
+
# Use provided system_prompt and prompt as-is
|
|
4704
|
+
final_system_prompt = system_prompt if system_prompt else ""
|
|
4705
|
+
final_prompt = prompt
|
|
4706
|
+
override_prompts = True
|
|
4707
|
+
else:
|
|
4708
|
+
# Enhanced prompt engineering for Pydantic
|
|
4709
|
+
base_system = (
|
|
4710
|
+
"Your objective is to generate a JSON object that satisfies the user's request and conforms to the provided Pydantic model schema.\n"
|
|
4711
|
+
"Rules:\n"
|
|
4712
|
+
"1) The schema is reference ONLY. Do not include the schema in the output.\n"
|
|
4713
|
+
"2) Output exactly ONE valid JSON object that can be parsed by the Pydantic model.\n"
|
|
4714
|
+
"3) Wrap the JSON object inside a single ```json code block.\n"
|
|
4715
|
+
"4) Do not output explanations or text outside the JSON.\n"
|
|
4716
|
+
"5) Use 2 spaces for indentation. Do not use tabs.\n"
|
|
4717
|
+
"6) Respect all field types, constraints, and validation rules.\n"
|
|
4718
|
+
"7) Include all required fields and use appropriate default values where specified.\n"
|
|
4719
|
+
"8) For enums, choose a valid value from the allowed options.\n\n"
|
|
4720
|
+
f"Pydantic Model Schema (reference only):\n"
|
|
4721
|
+
f"```json\n{schema_str}\n```\n\n"
|
|
4722
|
+
"Correct example of output format (structure only, values are illustrative):\n"
|
|
4723
|
+
f"```json\n{example_str}\n```"
|
|
4724
|
+
)
|
|
4725
|
+
final_system_prompt = f"{system_prompt}\n\n{base_system}" if system_prompt else base_system
|
|
4726
|
+
final_prompt = prompt
|
|
4727
|
+
override_prompts = False
|
|
4728
|
+
|
|
4729
|
+
if kwargs.get("debug"):
|
|
4730
|
+
ASCIIColors.info("Generating Pydantic-structured content...")
|
|
4731
|
+
ASCIIColors.info(f"Using model: {model_class.__name__}")
|
|
4732
|
+
|
|
4733
|
+
last_error = None
|
|
4734
|
+
for attempt in range(max_retries + 1):
|
|
4735
|
+
retry_system_prompt = final_system_prompt
|
|
4736
|
+
if attempt > 0:
|
|
4737
|
+
retry_system_prompt = f"{final_system_prompt}\n\nPrevious attempt failed Pydantic validation: {last_error}\nReturn a corrected JSON instance that strictly satisfies the Pydantic model."
|
|
4738
|
+
|
|
4739
|
+
json_string = self.generate_code(
|
|
4740
|
+
prompt=prompt,
|
|
4741
|
+
images=images,
|
|
4742
|
+
system_prompt=retry_system_prompt,
|
|
4743
|
+
language="json",
|
|
4744
|
+
code_tag_format="markdown",
|
|
4745
|
+
override_all_prompts=True, # Always use override for structured content
|
|
4746
|
+
**kwargs
|
|
4747
|
+
)
|
|
4748
|
+
|
|
4749
|
+
if not json_string:
|
|
4750
|
+
last_error = "LLM returned an empty response."
|
|
4751
|
+
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
4752
|
+
continue
|
|
4753
|
+
|
|
4754
|
+
if kwargs.get("debug"):
|
|
4755
|
+
ASCIIColors.info("Parsing generated JSON string...")
|
|
4756
|
+
print(f"--- Raw JSON String ---\n{json_string}\n-----------------------")
|
|
4757
|
+
|
|
4758
|
+
try:
|
|
4759
|
+
# Parse JSON
|
|
4760
|
+
parsed_json = robust_json_parser(json_string)
|
|
4761
|
+
if parsed_json is None:
|
|
4762
|
+
last_error = "Failed to robustly parse the generated string into JSON."
|
|
4763
|
+
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
4764
|
+
continue
|
|
4765
|
+
|
|
4766
|
+
# Validate with Pydantic
|
|
4767
|
+
try:
|
|
4768
|
+
validated_instance = model_class.model_validate(parsed_json)
|
|
4769
|
+
if kwargs.get("debug"):
|
|
4770
|
+
ASCIIColors.success("Pydantic validation successful!")
|
|
4771
|
+
return validated_instance
|
|
4772
|
+
except ValidationError as ve:
|
|
4773
|
+
last_error = f"Pydantic Validation Error: {ve}"
|
|
4774
|
+
if kwargs.get("debug"): ASCIIColors.warning(last_error)
|
|
4775
|
+
if attempt < max_retries:
|
|
4776
|
+
continue
|
|
4777
|
+
# Return the raw parsed JSON if validation fails on last attempt
|
|
4778
|
+
ASCIIColors.warning("Returning unvalidated JSON after final attempt.")
|
|
4779
|
+
return parsed_json
|
|
4780
|
+
|
|
4781
|
+
except Exception as e:
|
|
4782
|
+
trace_exception(e)
|
|
4783
|
+
ASCIIColors.error(f"Unexpected error during JSON processing: {e}")
|
|
4784
|
+
last_error = f"An unexpected error occurred: {e}"
|
|
4785
|
+
break
|
|
4786
|
+
|
|
4787
|
+
ASCIIColors.error(f"Failed to generate valid Pydantic-structured content after {max_retries + 1} attempts. Last error: {last_error}")
|
|
4788
|
+
return None
|
|
4189
4789
|
|
|
4190
4790
|
def extract_code_blocks(self, text: str, format: str = "markdown") -> List[dict]:
|
|
4191
4791
|
"""
|
|
@@ -1707,21 +1707,25 @@ class LollmsDiscussion:
|
|
|
1707
1707
|
)
|
|
1708
1708
|
|
|
1709
1709
|
if memory_json and memory_json.get("title") and memory_json.get("content"):
|
|
1710
|
-
title = memory_json["title"]
|
|
1711
|
-
self.add_memory(
|
|
1712
|
-
title=title,
|
|
1713
|
-
content=memory_json["content"]
|
|
1714
|
-
)
|
|
1715
|
-
# Automatically load the newly created memory into the context
|
|
1716
|
-
self.load_memory_into_context(title)
|
|
1717
1710
|
print(f"[INFO] Memorize: New memory created and loaded into context: '{title}'.")
|
|
1711
|
+
return memory_json
|
|
1718
1712
|
else:
|
|
1719
1713
|
print("[WARNING] Memorize: Failed to generate a valid memory from the discussion.")
|
|
1720
|
-
|
|
1714
|
+
return None
|
|
1721
1715
|
except Exception as e:
|
|
1722
1716
|
trace_exception(e)
|
|
1723
1717
|
print(f"[ERROR] Memorize: Failed to create memory. {e}")
|
|
1724
1718
|
|
|
1719
|
+
def set_memory(self, memory_text: str):
|
|
1720
|
+
"""Sets the discussion's memory content.
|
|
1721
|
+
This memory is included in the system context during exports and can be
|
|
1722
|
+
used to provide background information or retain important details across turns.
|
|
1723
|
+
Args:
|
|
1724
|
+
memory_text: The text to set as the discussion's memory.
|
|
1725
|
+
"""
|
|
1726
|
+
self.memory = memory_text.strip()
|
|
1727
|
+
self.touch()
|
|
1728
|
+
|
|
1725
1729
|
def count_discussion_tokens(self, format_type: str, branch_tip_id: Optional[str] = None) -> int:
|
|
1726
1730
|
"""Counts the number of tokens in the exported discussion content.
|
|
1727
1731
|
|
|
@@ -2363,7 +2367,7 @@ class LollmsDiscussion:
|
|
|
2363
2367
|
|
|
2364
2368
|
return self.add_artefact(
|
|
2365
2369
|
title, content=new_content, images=new_images,
|
|
2366
|
-
audios=latest_artefact.get("audios", []),
|
|
2370
|
+
audios=latest_artefact.get("audios", []),videos=latest_artefact.get("videos", []),
|
|
2367
2371
|
zip_content=latest_artefact.get("zip"), version=latest_version + 1, **extra_data
|
|
2368
2372
|
)
|
|
2369
2373
|
|
|
@@ -2489,148 +2493,6 @@ class LollmsDiscussion:
|
|
|
2489
2493
|
|
|
2490
2494
|
return removed_count
|
|
2491
2495
|
|
|
2492
|
-
# Memories management system
|
|
2493
|
-
def list_memories(self) -> List[Dict[str, Any]]:
|
|
2494
|
-
"""
|
|
2495
|
-
Lists all memories stored in the discussion's metadata.
|
|
2496
|
-
"""
|
|
2497
|
-
metadata = self.metadata or {}
|
|
2498
|
-
memories = metadata.get("_memories", [])
|
|
2499
|
-
now = datetime.utcnow().isoformat()
|
|
2500
|
-
|
|
2501
|
-
upgraded = []
|
|
2502
|
-
dirty = False
|
|
2503
|
-
for memory in memories:
|
|
2504
|
-
fixed = memory.copy()
|
|
2505
|
-
if "title" not in fixed: fixed["title"] = "untitled"; dirty = True
|
|
2506
|
-
if "content" not in fixed: fixed["content"] = ""; dirty = True
|
|
2507
|
-
if "created_at" not in fixed: fixed["created_at"] = now; dirty = True
|
|
2508
|
-
|
|
2509
|
-
section_start = f"--- Memory: {fixed['title']} ---"
|
|
2510
|
-
fixed["is_loaded"] = section_start in (self.memory or "")
|
|
2511
|
-
upgraded.append(fixed)
|
|
2512
|
-
|
|
2513
|
-
if dirty:
|
|
2514
|
-
metadata["_memories"] = upgraded
|
|
2515
|
-
self.metadata = metadata
|
|
2516
|
-
self.commit()
|
|
2517
|
-
|
|
2518
|
-
return upgraded
|
|
2519
|
-
|
|
2520
|
-
def add_memory(self, title: str, content: str, **extra_data) -> Dict[str, Any]:
|
|
2521
|
-
"""
|
|
2522
|
-
Adds or overwrites a memory in the discussion.
|
|
2523
|
-
"""
|
|
2524
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2525
|
-
memories = new_metadata.get("_memories", [])
|
|
2526
|
-
|
|
2527
|
-
memories = [m for m in memories if m.get('title') != title]
|
|
2528
|
-
|
|
2529
|
-
new_memory = {
|
|
2530
|
-
"title": title, "content": content,
|
|
2531
|
-
"created_at": datetime.utcnow().isoformat(),
|
|
2532
|
-
**extra_data
|
|
2533
|
-
}
|
|
2534
|
-
memories.append(new_memory)
|
|
2535
|
-
|
|
2536
|
-
new_metadata["_memories"] = memories
|
|
2537
|
-
self.metadata = new_metadata
|
|
2538
|
-
self.commit()
|
|
2539
|
-
return new_memory
|
|
2540
|
-
|
|
2541
|
-
def get_memory(self, title: str) -> Optional[Dict[str, Any]]:
|
|
2542
|
-
"""
|
|
2543
|
-
Retrieves a memory by title.
|
|
2544
|
-
"""
|
|
2545
|
-
memories = self.list_memories()
|
|
2546
|
-
return next((m for m in memories if m.get('title') == title), None)
|
|
2547
|
-
|
|
2548
|
-
def load_memory_into_context(self, title: str):
|
|
2549
|
-
"""
|
|
2550
|
-
Loads a memory's content into the long-term memory context.
|
|
2551
|
-
"""
|
|
2552
|
-
memory = self.get_memory(title)
|
|
2553
|
-
if not memory:
|
|
2554
|
-
raise ValueError(f"Memory '{title}' not found.")
|
|
2555
|
-
|
|
2556
|
-
if memory.get('content'):
|
|
2557
|
-
section = (
|
|
2558
|
-
f"--- Memory: {memory['title']} ---\n"
|
|
2559
|
-
f"{memory['content']}\n"
|
|
2560
|
-
f"--- End Memory: {memory['title']} ---\n\n"
|
|
2561
|
-
)
|
|
2562
|
-
if section not in (self.memory or ""):
|
|
2563
|
-
current_memory_zone = self.memory or ""
|
|
2564
|
-
self.memory = current_memory_zone.rstrip() + "\n\n" + section
|
|
2565
|
-
self.touch()
|
|
2566
|
-
self.commit()
|
|
2567
|
-
print(f"Loaded memory '{title}' into context.")
|
|
2568
|
-
|
|
2569
|
-
def unload_memory_from_context(self, title: str):
|
|
2570
|
-
"""
|
|
2571
|
-
Removes a memory's content from the long-term memory context.
|
|
2572
|
-
"""
|
|
2573
|
-
memory = self.get_memory(title)
|
|
2574
|
-
if not memory:
|
|
2575
|
-
raise ValueError(f"Memory '{title}' not found.")
|
|
2576
|
-
|
|
2577
|
-
if self.memory and memory.get('content'):
|
|
2578
|
-
section_start = f"--- Memory: {memory['title']} ---"
|
|
2579
|
-
pattern = rf"\n*\s*{re.escape(section_start)}.*?--- End Memory: {re.escape(memory['title'])} ---\s*\n*"
|
|
2580
|
-
self.memory = re.sub(pattern, "", self.memory, flags=re.DOTALL).strip()
|
|
2581
|
-
self.touch()
|
|
2582
|
-
self.commit()
|
|
2583
|
-
print(f"Unloaded memory '{title}' from context.")
|
|
2584
|
-
|
|
2585
|
-
def is_memory_loaded(self, title: str) -> bool:
|
|
2586
|
-
"""
|
|
2587
|
-
Checks if a memory is currently loaded in the long-term memory context.
|
|
2588
|
-
"""
|
|
2589
|
-
memory = self.get_memory(title)
|
|
2590
|
-
if not memory:
|
|
2591
|
-
return False
|
|
2592
|
-
|
|
2593
|
-
section_start = f"--- Memory: {memory['title']} ---"
|
|
2594
|
-
return section_start in (self.memory or "")
|
|
2595
|
-
|
|
2596
|
-
def purge_memories(self) -> bool:
|
|
2597
|
-
"""
|
|
2598
|
-
Removes all memories from the discussion.
|
|
2599
|
-
|
|
2600
|
-
Returns:
|
|
2601
|
-
The number of memories removed (0 or 1).
|
|
2602
|
-
"""
|
|
2603
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2604
|
-
new_metadata["_memories"] = []
|
|
2605
|
-
self.metadata = new_metadata
|
|
2606
|
-
self.commit()
|
|
2607
|
-
print(f"Removed memory titled.")
|
|
2608
|
-
return True
|
|
2609
|
-
|
|
2610
|
-
def remove_memory(self, title: str) -> int:
|
|
2611
|
-
"""
|
|
2612
|
-
Removes a memory by title.
|
|
2613
|
-
|
|
2614
|
-
Returns:
|
|
2615
|
-
The number of memories removed (0 or 1).
|
|
2616
|
-
"""
|
|
2617
|
-
new_metadata = (self.metadata or {}).copy()
|
|
2618
|
-
memories = new_metadata.get("_memories", [])
|
|
2619
|
-
if not memories:
|
|
2620
|
-
return 0
|
|
2621
|
-
|
|
2622
|
-
initial_count = len(memories)
|
|
2623
|
-
kept_memories = [m for m in memories if m.get('title') != title]
|
|
2624
|
-
|
|
2625
|
-
if len(kept_memories) < initial_count:
|
|
2626
|
-
new_metadata["_memories"] = kept_memories
|
|
2627
|
-
self.metadata = new_metadata
|
|
2628
|
-
self.commit()
|
|
2629
|
-
print(f"Removed memory titled '{title}'.")
|
|
2630
|
-
return 1
|
|
2631
|
-
|
|
2632
|
-
return 0
|
|
2633
|
-
|
|
2634
2496
|
def clone_without_messages(self) -> 'LollmsDiscussion':
|
|
2635
2497
|
"""
|
|
2636
2498
|
Creates a new discussion with the same context but no message history.
|
|
@@ -131,7 +131,28 @@ CIVITAI_MODELS = {
|
|
|
131
131
|
"filename": "papercut.safetensors",
|
|
132
132
|
"description": "Paper cutout SD1.5.",
|
|
133
133
|
"owned_by": "civitai"
|
|
134
|
-
}
|
|
134
|
+
},
|
|
135
|
+
"fantassifiedIcons": {
|
|
136
|
+
"display_name": "Fantassified Icons",
|
|
137
|
+
"url": "https://civitai.com/api/download/models/67584?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
138
|
+
"filename": "fantassifiedIcons_fantassifiedIconsV20.safetensors",
|
|
139
|
+
"description": "Flat, modern Icons.",
|
|
140
|
+
"owned_by": "civitai"
|
|
141
|
+
},
|
|
142
|
+
"game_icon_institute": {
|
|
143
|
+
"display_name": "Game icon institute",
|
|
144
|
+
"url": "https://civitai.com/api/download/models/158776?type=Model&format=SafeTensor&size=full&fp=fp16",
|
|
145
|
+
"filename": "gameIconInstituteV10_v10.safetensors",
|
|
146
|
+
"description": "Flat, modern game Icons.",
|
|
147
|
+
"owned_by": "civitai"
|
|
148
|
+
},
|
|
149
|
+
"M4RV3LS_DUNGEONS": {
|
|
150
|
+
"display_name": "M4RV3LS & DUNGEONS",
|
|
151
|
+
"url": "https://civitai.com/api/download/models/139417?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
152
|
+
"filename": "M4RV3LSDUNGEONSNEWV40COMICS_mD40.safetensors",
|
|
153
|
+
"description": "comics.",
|
|
154
|
+
"owned_by": "civitai"
|
|
155
|
+
},
|
|
135
156
|
}
|
|
136
157
|
|
|
137
158
|
TORCH_DTYPE_MAP_STR_TO_OBJ = {
|
|
@@ -453,8 +474,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
453
474
|
"safety_checker_on": True,
|
|
454
475
|
"num_inference_steps": 25,
|
|
455
476
|
"guidance_scale": 7.0,
|
|
456
|
-
"
|
|
457
|
-
"
|
|
477
|
+
"width": 512,
|
|
478
|
+
"height": 512,
|
|
458
479
|
"seed": -1,
|
|
459
480
|
"enable_cpu_offload": False,
|
|
460
481
|
"enable_sequential_cpu_offload": False,
|
|
@@ -484,6 +505,7 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
484
505
|
self.config = self.DEFAULT_CONFIG.copy()
|
|
485
506
|
self.config.update(kwargs)
|
|
486
507
|
self.model_name = self.config.get("model_name", "")
|
|
508
|
+
|
|
487
509
|
models_path_str = kwargs.get("models_path", str(Path(__file__).parent / "models"))
|
|
488
510
|
self.models_path = Path(models_path_str)
|
|
489
511
|
self.models_path.mkdir(parents=True, exist_ok=True)
|
|
@@ -600,11 +622,11 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
600
622
|
generator = self._prepare_seed(kwargs)
|
|
601
623
|
pipeline_args = {
|
|
602
624
|
"prompt": prompt,
|
|
603
|
-
"negative_prompt": negative_prompt or
|
|
604
|
-
"width": width if width is not None else self.config
|
|
605
|
-
"height": height if height is not None else self.config
|
|
606
|
-
"num_inference_steps": kwargs.pop("num_inference_steps", self.config
|
|
607
|
-
"guidance_scale": kwargs.pop("guidance_scale", self.config
|
|
625
|
+
"negative_prompt": negative_prompt or self.config.get("negative_prompt", ""),
|
|
626
|
+
"width": width if width is not None else self.config.get("width", 512),
|
|
627
|
+
"height": height if height is not None else self.config.get("height", 512),
|
|
628
|
+
"num_inference_steps": kwargs.pop("num_inference_steps", self.config.get("num_inference_steps",25)),
|
|
629
|
+
"guidance_scale": kwargs.pop("guidance_scale", self.config.get("guidance_scale",6.5)),
|
|
608
630
|
"generator": generator
|
|
609
631
|
}
|
|
610
632
|
pipeline_args.update(kwargs)
|
|
@@ -646,8 +668,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
646
668
|
self._acquire_manager()
|
|
647
669
|
imgs = [images] if isinstance(images, str) else list(images)
|
|
648
670
|
pil_images = [self._decode_image_input(s) for s in imgs]
|
|
649
|
-
out_w = width if width is not None else self.config["
|
|
650
|
-
out_h = height if height is not None else self.config["
|
|
671
|
+
out_w = width if width is not None else self.config["width"]
|
|
672
|
+
out_h = height if height is not None else self.config["height"]
|
|
651
673
|
generator = self._prepare_seed(kwargs)
|
|
652
674
|
steps = kwargs.pop("num_inference_steps", self.config["num_inference_steps"])
|
|
653
675
|
guidance = kwargs.pop("guidance_scale", self.config["guidance_scale"])
|
|
@@ -756,8 +778,8 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
756
778
|
{"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
|
|
757
779
|
{"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload."},
|
|
758
780
|
{"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
|
|
759
|
-
{"name": "
|
|
760
|
-
{"name": "
|
|
781
|
+
{"name": "width", "type": "int", "value": self.config["width"], "description": "Default image width."},
|
|
782
|
+
{"name": "height", "type": "int", "value": self.config["height"], "description": "Default image height."},
|
|
761
783
|
{"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
|
|
762
784
|
{"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
|
|
763
785
|
{"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
lollms_client/__init__.py,sha256=
|
|
1
|
+
lollms_client/__init__.py,sha256=G2ENRPwIlHb_nTaBEbn_AvUQvlsBYpIuQXGWYkYmyo0,1146
|
|
2
2
|
lollms_client/lollms_agentic.py,sha256=pQiMEuB_XkG29-SW6u4KTaMFPr6eKqacInggcCuCW3k,13914
|
|
3
3
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
4
|
-
lollms_client/lollms_core.py,sha256=
|
|
5
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
4
|
+
lollms_client/lollms_core.py,sha256=aCEoxmEF6ZmkBgJgZd74lKkM4A3PVVyt2IwMvLfScWw,315053
|
|
5
|
+
lollms_client/lollms_discussion.py,sha256=jWw1lSq0Oz_X5pnkECf1XwdDP2Lf84im00VpwuvsXXk,123041
|
|
6
6
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
7
7
|
lollms_client/lollms_llm_binding.py,sha256=Dj1PI2bQBYv_JgPxCIaIC7DMUvWdFJGwXFdsP5hdGBg,25014
|
|
8
8
|
lollms_client/lollms_mcp_binding.py,sha256=psb27A23VFWDfZsR2WUbQXQxiZDW5yfOak6ZtbMfszI,10222
|
|
@@ -49,7 +49,7 @@ lollms_client/stt_bindings/lollms/__init__.py,sha256=9Vmn1sQQZKLGLe7nZnc-0LnNeSY
|
|
|
49
49
|
lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZkxJASkWm5eF07ztDQ,15363
|
|
50
50
|
lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
|
|
51
51
|
lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
|
-
lollms_client/tti_bindings/diffusers/__init__.py,sha256=
|
|
52
|
+
lollms_client/tti_bindings/diffusers/__init__.py,sha256=Pi5Zw4nHGXVc0Vcb0ib7KkoiOx__0JukWtL01BUzd7c,41692
|
|
53
53
|
lollms_client/tti_bindings/gemini/__init__.py,sha256=f9fPuqnrBZ1Z-obcoP6EVvbEXNbNCSg21cd5efLCk8U,16707
|
|
54
54
|
lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
|
|
55
55
|
lollms_client/tti_bindings/openai/__init__.py,sha256=YWJolJSQfIzTJvrLQVe8rQewP7rddf6z87g4rnp-lTs,4932
|
|
@@ -71,8 +71,8 @@ lollms_client/tts_bindings/xtts/server/main.py,sha256=T-Kn5NM-u1FJMygeV8rOoZKlqn
|
|
|
71
71
|
lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
|
|
72
72
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
73
73
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
74
|
-
lollms_client-1.4.
|
|
75
|
-
lollms_client-1.4.
|
|
76
|
-
lollms_client-1.4.
|
|
77
|
-
lollms_client-1.4.
|
|
78
|
-
lollms_client-1.4.
|
|
74
|
+
lollms_client-1.4.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
75
|
+
lollms_client-1.4.1.dist-info/METADATA,sha256=eBfpms3EJ5sD7D-xBTXggnqOc1g8IE0inftnXGQmb6w,58689
|
|
76
|
+
lollms_client-1.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
77
|
+
lollms_client-1.4.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
|
|
78
|
+
lollms_client-1.4.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|