lollms-client 0.12.6__py3-none-any.whl → 0.13.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (34) hide show
  1. examples/article_summary/article_summary.py +58 -0
  2. examples/deep_analyze/deep_analyse.py +30 -0
  3. examples/deep_analyze/deep_analyze_multiple_files.py +32 -0
  4. examples/function_call/functions_call_with images.py +52 -0
  5. examples/personality_test/chat_test.py +37 -0
  6. examples/personality_test/chat_with_aristotle.py +42 -0
  7. examples/personality_test/tesks_test.py +62 -0
  8. examples/simple_text_gen_test.py +173 -0
  9. examples/simple_text_gen_with_image_test.py +166 -0
  10. examples/test_local_models/local_chat.py +9 -0
  11. examples/text_2_audio.py +77 -0
  12. examples/text_2_image.py +140 -0
  13. examples/text_and_image_2_audio.py +59 -0
  14. examples/text_gen.py +28 -0
  15. lollms_client/__init__.py +3 -2
  16. lollms_client/llm_bindings/lollms/__init__.py +13 -11
  17. lollms_client/llm_bindings/ollama/__init__.py +44 -60
  18. lollms_client/llm_bindings/openai/__init__.py +69 -29
  19. lollms_client/llm_bindings/tensor_rt/__init__.py +603 -0
  20. lollms_client/llm_bindings/transformers/__init__.py +7 -11
  21. lollms_client/llm_bindings/vllm/__init__.py +603 -0
  22. lollms_client/lollms_core.py +14 -4
  23. lollms_client/lollms_llm_binding.py +5 -25
  24. {lollms_client-0.12.6.dist-info → lollms_client-0.13.1.dist-info}/METADATA +19 -12
  25. lollms_client-0.13.1.dist-info/RECORD +52 -0
  26. {lollms_client-0.12.6.dist-info → lollms_client-0.13.1.dist-info}/WHEEL +1 -1
  27. {lollms_client-0.12.6.dist-info → lollms_client-0.13.1.dist-info}/top_level.txt +1 -0
  28. lollms_client/lollms_personality.py +0 -403
  29. lollms_client/lollms_personality_worker.py +0 -1485
  30. lollms_client/lollms_stt.py +0 -35
  31. lollms_client/lollms_tti.py +0 -35
  32. lollms_client/lollms_tts.py +0 -39
  33. lollms_client-0.12.6.dist-info/RECORD +0 -41
  34. {lollms_client-0.12.6.dist-info → lollms_client-0.13.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,1485 +0,0 @@
1
- from typing import List, Optional, Callable, Dict, Any
2
- from lollms_client.lollms_types import MSG_TYPE
3
- from ascii_colors import ASCIIColors
4
- from lollms_client.lollms_personality import LollmsPersonality
5
- from lollms_client.lollms_config import TypedConfig
6
- from ascii_colors import trace_exception, ASCIIColors
7
- import yaml
8
- import json
9
-
10
- class StateMachine:
11
- def __init__(self, states_dict):
12
- """
13
- states structure is the following
14
- [
15
- {
16
- "name": the state name,
17
- "commands": [ # list of commands
18
- "command": function
19
- ],
20
- "default": default function
21
- }
22
- ]
23
- """
24
- self.states_dict = states_dict
25
- self.current_state_id = 0
26
- self.callback = None
27
-
28
- def goto_state(self, state):
29
- """
30
- Transition to the state with the given name or index.
31
-
32
- Args:
33
- state (str or int): The name or index of the state to transition to.
34
-
35
- Raises:
36
- ValueError: If no state is found with the given name or index.
37
- """
38
- if isinstance(state, str):
39
- for i, state_dict in enumerate(self.states_dict):
40
- if state_dict["name"] == state:
41
- self.current_state_id = i
42
- return
43
- elif isinstance(state, int):
44
- if 0 <= state < len(self.states_dict):
45
- self.current_state_id = state
46
- return
47
- raise ValueError(f"No state found with name or index: {state}")
48
-
49
-
50
-
51
- def process_state(self, command, full_context, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None, client=None):
52
- """
53
- Process the given command based on the current state.
54
-
55
- Args:
56
- command: The command to process.
57
-
58
- Raises:
59
- ValueError: If the current state doesn't have the command and no default function is defined.
60
- """
61
- if callback:
62
- self.callback=callback
63
-
64
- current_state = self.states_dict[self.current_state_id]
65
- commands = current_state["commands"]
66
- command = command.strip()
67
-
68
- for cmd, func in commands.items():
69
- if cmd == command[0:len(cmd)]:
70
- try:
71
- func(command, full_context,client)
72
- except:# retrocompatibility
73
- func(command, full_context)
74
- return
75
-
76
- default_func = current_state.get("default")
77
- if default_func is not None:
78
- default_func(command, full_context)
79
- else:
80
- raise ValueError(f"Command '{command}' not found in current state and no default function defined.")
81
-
82
-
83
- class LollmsPersonalityWorker(StateMachine):
84
- """
85
- Template class for implementing personality processor classes in the APScript framework.
86
-
87
- This class provides a basic structure and placeholder methods for processing model inputs and outputs.
88
- Personality-specific processor classes should inherit from this class and override the necessary methods.
89
- """
90
- def __init__(
91
- self,
92
- personality :LollmsPersonality,
93
- personality_config :TypedConfig,
94
- states_dict :dict = {},
95
- callback = None
96
- ) -> None:
97
- super().__init__(states_dict)
98
- self.notify = personality.notify
99
-
100
- self.personality = personality
101
- self.personality_config = personality_config
102
- self.configuration_file_path = self.personality.personality_config_dir/f"config.yaml"
103
-
104
- self.personality_config.config.file_path = self.configuration_file_path
105
-
106
- self.callback = callback
107
-
108
- # Installation
109
- if (not self.configuration_file_path.exists()):
110
- self.install()
111
- self.personality_config.config.save_config()
112
- else:
113
- self.load_personality_config()
114
-
115
- def sink(self, s=None,i=None,d=None):
116
- pass
117
-
118
- def settings_updated(self):
119
- """
120
- To be implemented by the processor when the settings have changed
121
- """
122
- pass
123
-
124
- def mounted(self):
125
- """
126
- triggered when mounted
127
- """
128
- pass
129
-
130
- def selected(self):
131
- """
132
- triggered when mounted
133
- """
134
- pass
135
-
136
- def execute_command(self, command: str, parameters:list=[], client=None):
137
- """
138
- Recovers user commands and executes them. Each personality can define a set of commands that they can receive and execute
139
- Args:
140
- command: The command name
141
- parameters: A list of the command parameters
142
-
143
- """
144
- try:
145
- self.process_state(command, "", self.callback, client)
146
- except Exception as ex:
147
- trace_exception(ex)
148
- self.warning(f"Couldn't execute command {command}")
149
-
150
- async def handle_request(self, request) -> Dict[str, Any]:
151
- """
152
- Handle client requests.
153
-
154
- Args:
155
- data (dict): A dictionary containing the request data.
156
-
157
- Returns:
158
- dict: A dictionary containing the response, including at least a "status" key.
159
-
160
- This method should be implemented by a class that inherits from this one.
161
-
162
- Example usage:
163
- ```
164
- handler = YourHandlerClass()
165
- request_data = {"command": "some_command", "parameters": {...}}
166
- response = await handler.handle_request(request_data)
167
- ```
168
- """
169
- return {"status":True}
170
-
171
-
172
- def load_personality_config(self):
173
- """
174
- Load the content of local_config.yaml file.
175
-
176
- The function reads the content of the local_config.yaml file and returns it as a Python dictionary.
177
-
178
- Args:
179
- None
180
-
181
- Returns:
182
- dict: A dictionary containing the loaded data from the local_config.yaml file.
183
- """
184
- try:
185
- self.personality_config.config.load_config()
186
- except:
187
- self.personality_config.config.save_config()
188
- self.personality_config.sync()
189
-
190
- def install(self):
191
- """
192
- Installation procedure (to be implemented)
193
- """
194
- ASCIIColors.blue("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
195
- ASCIIColors.red(f"Installing {self.personality.personality_folder_name}")
196
- ASCIIColors.blue("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
197
-
198
-
199
- def uninstall(self):
200
- """
201
- Installation procedure (to be implemented)
202
- """
203
- ASCIIColors.blue("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
204
- ASCIIColors.red(f"Uninstalling {self.personality.personality_folder_name}")
205
- ASCIIColors.blue("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
206
-
207
-
208
- def add_file(self, path, client, callback=None, process=True):
209
- self.personality.add_file(path, client=client,callback=callback, process=process)
210
- if callback is not None:
211
- callback("File added successfully",MSG_TYPE.MSG_TYPE_INFO)
212
- return True
213
-
214
- def remove_file(self, path):
215
- if path in self.personality.text_files:
216
- self.personality.text_files.remove(path)
217
- elif path in self.personality.image_files:
218
- self.personality.image_files.remove(path)
219
-
220
-
221
- def load_config_file(self, path, default_config=None):
222
- """
223
- Load the content of local_config.yaml file.
224
-
225
- The function reads the content of the local_config.yaml file and returns it as a Python dictionary.
226
- If a default_config is provided, it fills any missing entries in the loaded dictionary.
227
- If at least one field from default configuration was not present in the loaded configuration, the updated
228
- configuration is saved.
229
-
230
- Args:
231
- path (str): The path to the local_config.yaml file.
232
- default_config (dict, optional): A dictionary with default values to fill missing entries.
233
-
234
- Returns:
235
- dict: A dictionary containing the loaded data from the local_config.yaml file, with missing entries filled
236
- by default_config if provided.
237
- """
238
- with open(path, 'r') as file:
239
- data = yaml.safe_load(file)
240
-
241
- if default_config:
242
- updated = False
243
- for key, value in default_config.items():
244
- if key not in data:
245
- data[key] = value
246
- updated = True
247
-
248
- if updated:
249
- self.save_config_file(path, data)
250
-
251
- return data
252
-
253
- def save_config_file(self, path, data):
254
- """
255
- Save the configuration data to a local_config.yaml file.
256
-
257
- Args:
258
- path (str): The path to save the local_config.yaml file.
259
- data (dict): The configuration data to be saved.
260
-
261
- Returns:
262
- None
263
- """
264
- with open(path, 'w') as file:
265
- yaml.dump(data, file)
266
-
267
- def generate_with_images(self, prompt, images, max_size, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ):
268
- return self.personality.generate_with_images(prompt, images, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
269
-
270
- def generate(self, prompt, max_size, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ):
271
- return self.personality.generate(prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
272
-
273
- from lollms.client_session import Client
274
- def run_workflow(self, prompt:str, previous_discussion_text:str="", callback: Callable[[str, MSG_TYPE, dict, list], bool]=None, context_details:dict=None, client:Client=None):
275
- """
276
- This function generates code based on the given parameters.
277
-
278
- Args:
279
- full_prompt (str): The full prompt for code generation.
280
- prompt (str): The prompt for code generation.
281
- context_details (dict): A dictionary containing the following context details for code generation:
282
- - conditionning (str): The conditioning information.
283
- - documentation (str): The documentation information.
284
- - knowledge (str): The knowledge information.
285
- - user_description (str): The user description information.
286
- - discussion_messages (str): The discussion messages information.
287
- - positive_boost (str): The positive boost information.
288
- - negative_boost (str): The negative boost information.
289
- - force_language (str): The force language information.
290
- - fun_mode (str): The fun mode conditionning text
291
- - ai_prefix (str): The AI prefix information.
292
- n_predict (int): The number of predictions to generate.
293
- client_id: The client ID for code generation.
294
- callback (function, optional): The callback function for code generation.
295
-
296
- Returns:
297
- None
298
- """
299
-
300
- return None
301
-
302
-
303
- # ================================================= Advanced methods ===========================================
304
- def compile_latex(self, file_path, pdf_latex_path=None):
305
- try:
306
- # Determine the pdflatex command based on the provided or default path
307
- if pdf_latex_path:
308
- pdflatex_command = pdf_latex_path
309
- else:
310
- pdflatex_command = self.personality.config.pdf_latex_path if self.personality.config.pdf_latex_path is not None else 'pdflatex'
311
-
312
- # Set the execution path to the folder containing the tmp_file
313
- execution_path = file_path.parent
314
- # Run the pdflatex command with the file path
315
- result = subprocess.run([pdflatex_command, "-interaction=nonstopmode", file_path], check=True, capture_output=True, text=True, cwd=execution_path)
316
- # Check the return code of the pdflatex command
317
- if result.returncode != 0:
318
- error_message = result.stderr.strip()
319
- return {"status":False,"error":error_message}
320
-
321
- # If the compilation is successful, you will get a PDF file
322
- pdf_file = file_path.with_suffix('.pdf')
323
- print(f"PDF file generated: {pdf_file}")
324
- return {"status":True,"file_path":pdf_file}
325
-
326
- except subprocess.CalledProcessError as e:
327
- print(f"Error occurred while compiling LaTeX: {e}")
328
- return {"status":False,"error":e}
329
-
330
- def find_numeric_value(self, text):
331
- pattern = r'\d+[.,]?\d*'
332
- match = re.search(pattern, text)
333
- if match:
334
- return float(match.group().replace(',', '.'))
335
- else:
336
- return None
337
- def remove_backticks(self, text):
338
- if text.startswith("```"):
339
- split_text = text.split("\n")
340
- text = "\n".join(split_text[1:])
341
- if text.endswith("```"):
342
- text= text[:-3]
343
- return text
344
-
345
- def search_duckduckgo(self, query: str, max_results: int = 10, instant_answers: bool = True, regular_search_queries: bool = True, get_webpage_content: bool = False) -> List[Dict[str, Union[str, None]]]:
346
- """
347
- Perform a search using the DuckDuckGo search engine and return the results as a list of dictionaries.
348
-
349
- Args:
350
- query (str): The search query to use in the search. This argument is required.
351
- max_results (int, optional): The maximum number of search results to return. Defaults to 10.
352
- instant_answers (bool, optional): Whether to include instant answers in the search results. Defaults to True.
353
- regular_search_queries (bool, optional): Whether to include regular search queries in the search results. Defaults to True.
354
- get_webpage_content (bool, optional): Whether to retrieve and include the website content for each result. Defaults to False.
355
-
356
- Returns:
357
- list[dict]: A list of dictionaries containing the search results. Each dictionary will contain 'title', 'body', and 'href' keys.
358
-
359
- Raises:
360
- ValueError: If neither instant_answers nor regular_search_queries is set to True.
361
- """
362
- if not PackageManager.check_package_installed("duckduckgo_search"):
363
- PackageManager.install_package("duckduckgo_search")
364
- from duckduckgo_search import DDGS
365
- if not (instant_answers or regular_search_queries):
366
- raise ValueError("One of ('instant_answers', 'regular_search_queries') must be True")
367
-
368
- query = query.strip("\"'")
369
-
370
- with DDGS() as ddgs:
371
- if instant_answers:
372
- answer_list = list(ddgs.answers(query))
373
- if answer_list:
374
- answer_dict = answer_list[0]
375
- answer_dict["title"] = query
376
- answer_dict["body"] = next((item['Text'] for item in answer_dict['AbstractText']), None)
377
- answer_dict["href"] = answer_dict.get('FirstURL', '')
378
- else:
379
- answer_list = []
380
-
381
- if regular_search_queries:
382
- results = ddgs.text(query, safe=False, result_type='link')
383
- for result in results[:max_results]:
384
- title = result['Text'] or query
385
- body = None
386
- href = result['FirstURL'] or ''
387
- answer_dict = {'title': title, 'body': body, 'href': href}
388
- answer_list.append(answer_dict)
389
-
390
- if get_webpage_content:
391
- for i, result in enumerate(answer_list):
392
- try:
393
- response = requests.get(result['href'])
394
- if response.status_code == 200:
395
- content = response.text
396
- answer_list[i]['body'] = content
397
- except Exception as e:
398
- print(f"Error retrieving webpage content for {result['href']}: {str(e)}")
399
-
400
- return answer_list
401
-
402
-
403
- def translate(self, text_chunk, output_language="french", max_generation_size=3000):
404
- translated = self.fast_gen(
405
- "\n".join([
406
- f"!@>system:",
407
- f"Translate the following text to {output_language}.",
408
- "Be faithful to the original text and do not add or remove any information.",
409
- "Respond only with the translated text.",
410
- "Do not add comments or explanations.",
411
- f"!@>text to translate:",
412
- f"{text_chunk}",
413
- f"!@>translation:",
414
- ]),
415
- max_generation_size=max_generation_size, callback=self.sink)
416
- return translated
417
-
418
- def summerize_text(
419
- self,
420
- text,
421
- summary_instruction="summerize",
422
- doc_name="chunk",
423
- answer_start="",
424
- max_generation_size=3000,
425
- max_summary_size=512,
426
- callback=None,
427
- chunk_summary_post_processing=None
428
- ):
429
- depth=0
430
- tk = self.personality.model.tokenize(text)
431
- prev_len = len(tk)
432
- while len(tk)>max_summary_size:
433
- self.step_start(f"Comprerssing {doc_name}... [depth {depth+1}]")
434
- chunk_size = int(self.personality.config.ctx_size*0.6)
435
- document_chunks = DocumentDecomposer.decompose_document(text, chunk_size, 0, self.personality.model.tokenize, self.personality.model.detokenize, True)
436
- text = self.summerize_chunks(document_chunks,summary_instruction, doc_name, answer_start, max_generation_size, callback, chunk_summary_post_processing=chunk_summary_post_processing)
437
- tk = self.personality.model.tokenize(text)
438
- tk = self.personality.model.tokenize(text)
439
- dtk_ln=prev_len-len(tk)
440
- prev_len = len(tk)
441
- self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
442
- self.step_end(f"Comprerssing {doc_name}... [depth {depth+1}]")
443
- depth += 1
444
- if dtk_ln<=10: # it is not sumlmarizing
445
- break
446
- return text
447
-
448
- def smart_data_extraction(
449
- self,
450
- text,
451
- data_extraction_instruction="summerize",
452
- final_task_instruction="reformulate with better wording",
453
- doc_name="chunk",
454
- answer_start="",
455
- max_generation_size=3000,
456
- max_summary_size=512,
457
- callback=None,
458
- chunk_summary_post_processing=None
459
- ):
460
- depth=0
461
- tk = self.personality.model.tokenize(text)
462
- prev_len = len(tk)
463
- while len(tk)>max_summary_size:
464
- self.step_start(f"Comprerssing... [depth {depth+1}]")
465
- chunk_size = int(self.personality.config.ctx_size*0.6)
466
- document_chunks = DocumentDecomposer.decompose_document(text, chunk_size, 0, self.personality.model.tokenize, self.personality.model.detokenize, True)
467
- text = self.summerize_chunks(document_chunks, data_extraction_instruction, doc_name, answer_start, max_generation_size, callback, chunk_summary_post_processing=chunk_summary_post_processing)
468
- tk = self.personality.model.tokenize(text)
469
- dtk_ln=prev_len-len(tk)
470
- prev_len = len(tk)
471
- self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
472
- self.step_end(f"Comprerssing... [depth {depth+1}]")
473
- depth += 1
474
- if dtk_ln<=10: # it is not sumlmarizing
475
- break
476
- self.step_start(f"Rewriting ...")
477
- text = self.summerize_chunks([text],
478
- final_task_instruction, doc_name, answer_start, max_generation_size, callback, chunk_summary_post_processing=chunk_summary_post_processing)
479
- self.step_end(f"Rewriting ...")
480
-
481
- return text
482
-
483
- def summerize_chunks(
484
- self,
485
- chunks,
486
- summary_instruction="summerize",
487
- doc_name="chunk",
488
- answer_start="",
489
- max_generation_size=3000,
490
- callback=None,
491
- chunk_summary_post_processing=None
492
- ):
493
- summeries = []
494
- for i, chunk in enumerate(chunks):
495
- self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
496
- summary = f"{answer_start}"+ self.fast_gen(
497
- "\n".join([
498
- f"!@>Document_chunk: {doc_name}:",
499
- f"{chunk}",
500
- f"!@>instruction: {summary_instruction}",
501
- f"Answer directly with the summary with no extra comments.",
502
- f"!@>summary:",
503
- f"{answer_start}"
504
- ]),
505
- max_generation_size=max_generation_size,
506
- callback=callback)
507
- if chunk_summary_post_processing:
508
- summary = chunk_summary_post_processing(summary)
509
- summeries.append(summary)
510
- self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
511
- return "\n".join(summeries)
512
-
513
- def sequencial_chunks_summary(
514
- self,
515
- chunks,
516
- summary_instruction="summerize",
517
- doc_name="chunk",
518
- answer_start="",
519
- max_generation_size=3000,
520
- callback=None,
521
- chunk_summary_post_processing=None
522
- ):
523
- summeries = []
524
- for i, chunk in enumerate(chunks):
525
- if i<len(chunks)-1:
526
- chunk1 = chunks[i+1]
527
- else:
528
- chunk1=""
529
- if i>0:
530
- chunk=summary
531
- self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
532
- summary = f"{answer_start}"+ self.fast_gen(
533
- "\n".join([
534
- f"!@>Document_chunk: {doc_name}:",
535
- f"Block1:",
536
- f"{chunk}",
537
- f"Block2:",
538
- f"{chunk1}",
539
- f"!@>instruction: {summary_instruction}",
540
- f"Answer directly with the summary with no extra comments.",
541
- f"!@>summary:",
542
- f"{answer_start}"
543
- ]),
544
- max_generation_size=max_generation_size,
545
- callback=callback)
546
- if chunk_summary_post_processing:
547
- summary = chunk_summary_post_processing(summary)
548
- summeries.append(summary)
549
- self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
550
- return "\n".join(summeries)
551
-
552
-
553
- def build_prompt(self, prompt_parts:List[str], sacrifice_id:int=-1, context_size:int=None, minimum_spare_context_size:int=None):
554
- """
555
- Builds the prompt for code generation.
556
-
557
- Args:
558
- prompt_parts (List[str]): A list of strings representing the parts of the prompt.
559
- sacrifice_id (int, optional): The ID of the part to sacrifice.
560
- context_size (int, optional): The size of the context.
561
- minimum_spare_context_size (int, optional): The minimum spare context size.
562
-
563
- Returns:
564
- str: The built prompt.
565
- """
566
- if context_size is None:
567
- context_size = self.personality.config.ctx_size
568
- if minimum_spare_context_size is None:
569
- minimum_spare_context_size = self.personality.config.min_n_predict
570
-
571
- if sacrifice_id == -1 or len(prompt_parts[sacrifice_id])<50:
572
- return "\n".join([s for s in prompt_parts if s!=""])
573
- else:
574
- part_tokens=[]
575
- nb_tokens=0
576
- for i,part in enumerate(prompt_parts):
577
- tk = self.personality.model.tokenize(part)
578
- part_tokens.append(tk)
579
- if i != sacrifice_id:
580
- nb_tokens += len(tk)
581
- if len(part_tokens[sacrifice_id])>0:
582
- sacrifice_tk = part_tokens[sacrifice_id]
583
- sacrifice_tk= sacrifice_tk[-(context_size-nb_tokens-minimum_spare_context_size):]
584
- sacrifice_text = self.personality.model.detokenize(sacrifice_tk)
585
- else:
586
- sacrifice_text = ""
587
- prompt_parts[sacrifice_id] = sacrifice_text
588
- return "\n".join([s for s in prompt_parts if s!=""])
589
- # ================================================= Sending commands to ui ===========================================
590
- def add_collapsible_entry(self, title, content, subtitle=""):
591
- return "\n".join(
592
- [
593
- f'<details class="flex w-full rounded-xl border border-gray-200 bg-white shadow-sm dark:border-gray-800 dark:bg-gray-900 mb-3.5 max-w-full svelte-1escu1z" open="">',
594
- f' <summary class="grid w-full select-none grid-cols-[40px,1fr] items-center gap-2.5 p-2 svelte-1escu1z">',
595
- f' <dl class="leading-4">',
596
- f' <dd class="text-sm"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-arrow-right">',
597
- f' <line x1="5" y1="12" x2="19" y2="12"></line>',
598
- f' <polyline points="12 5 19 12 12 19"></polyline>',
599
- f' </svg>',
600
- f' </dd>',
601
- f' </dl>',
602
- f' <dl class="leading-4">',
603
- f' <dd class="text-sm"><h3>{title}</h3></dd>',
604
- f' <dt class="flex items-center gap-1 truncate whitespace-nowrap text-[.82rem] text-gray-400">{subtitle}</dt>',
605
- f' </dl>',
606
- f' </summary>',
607
- f' <div class="content px-5 pb-5 pt-4">',
608
- content,
609
- f' </div>',
610
- f' </details>\n'
611
- ])
612
-
613
- def internet_search_with_vectorization(self, query, quick_search:bool=False ):
614
- """
615
- Do internet search and return the result
616
- """
617
- return self.personality.internet_search_with_vectorization(query, quick_search=quick_search)
618
-
619
-
620
- def vectorize_and_query(self, text, query, max_chunk_size=512, overlap_size=20, internet_vectorization_nb_chunks=3):
621
- vectorizer = TextVectorizer(VectorizationMethod.TFIDF_VECTORIZER, model = self.personality.model)
622
- decomposer = DocumentDecomposer()
623
- chunks = decomposer.decompose_document(text, max_chunk_size, overlap_size,self.personality.model.tokenize,self.personality.model.detokenize)
624
- for i, chunk in enumerate(chunks):
625
- vectorizer.add_document(f"chunk_{i}", self.personality.model.detokenize(chunk))
626
- vectorizer.index()
627
- docs, sorted_similarities, document_ids = vectorizer.recover_text(query, internet_vectorization_nb_chunks)
628
- return docs, sorted_similarities
629
-
630
-
631
- def step_start(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
632
- """This triggers a step start
633
-
634
- Args:
635
- step_text (str): The step text
636
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the step start to. Defaults to None.
637
- """
638
- if not callback and self.callback:
639
- callback = self.callback
640
-
641
- if callback:
642
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP_START)
643
-
644
- def step_end(self, step_text, status=True, callback: Callable[[str, int, dict, list], bool]=None):
645
- """This triggers a step end
646
-
647
- Args:
648
- step_text (str): The step text
649
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the step end to. Defaults to None.
650
- """
651
- if not callback and self.callback:
652
- callback = self.callback
653
-
654
- if callback:
655
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP_END, {'status':status})
656
-
657
- def step(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
658
- """This triggers a step information
659
-
660
- Args:
661
- step_text (str): The step text
662
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
663
- The callback has these fields:
664
- - chunk
665
- - Message Type : the type of message
666
- - Parameters (optional) : a dictionary of parameters
667
- - Metadata (optional) : a list of metadata
668
- """
669
- if not callback and self.callback:
670
- callback = self.callback
671
-
672
- if callback:
673
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP)
674
-
675
- def exception(self, ex, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
676
- """This sends exception to the client
677
-
678
- Args:
679
- step_text (str): The step text
680
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
681
- The callback has these fields:
682
- - chunk
683
- - Message Type : the type of message
684
- - Parameters (optional) : a dictionary of parameters
685
- - Metadata (optional) : a list of metadata
686
- """
687
- if not callback and self.callback:
688
- callback = self.callback
689
-
690
- if callback:
691
- callback(str(ex), MSG_TYPE.MSG_TYPE_EXCEPTION)
692
-
693
- def warning(self, warning:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
694
- """This sends exception to the client
695
-
696
- Args:
697
- step_text (str): The step text
698
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
699
- The callback has these fields:
700
- - chunk
701
- - Message Type : the type of message
702
- - Parameters (optional) : a dictionary of parameters
703
- - Metadata (optional) : a list of metadata
704
- """
705
- if not callback and self.callback:
706
- callback = self.callback
707
-
708
- if callback:
709
- callback(warning, MSG_TYPE.MSG_TYPE_EXCEPTION)
710
-
711
- def info(self, info:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
712
- """This sends exception to the client
713
-
714
- Args:
715
- inf (str): The information to be sent
716
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
717
- The callback has these fields:
718
- - chunk
719
- - Message Type : the type of message
720
- - Parameters (optional) : a dictionary of parameters
721
- - Metadata (optional) : a list of metadata
722
- """
723
- if not callback and self.callback:
724
- callback = self.callback
725
-
726
- if callback:
727
- callback(info, MSG_TYPE.MSG_TYPE_INFO)
728
-
729
- def json(self, title:str, json_infos:dict, callback: Callable[[str, int, dict, list], bool]=None, indent=4):
730
- """This sends json data to front end
731
-
732
- Args:
733
- step_text (dict): The step text
734
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
735
- The callback has these fields:
736
- - chunk
737
- - Message Type : the type of message
738
- - Parameters (optional) : a dictionary of parameters
739
- - Metadata (optional) : a list of metadata
740
- """
741
- if not callback and self.callback:
742
- callback = self.callback
743
-
744
- if callback:
745
- callback("", MSG_TYPE.MSG_TYPE_JSON_INFOS, metadata = [{"title":title, "content":json.dumps(json_infos, indent=indent)}])
746
-
747
- def ui(self, html_ui:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
748
- """This sends ui elements to front end
749
-
750
- Args:
751
- step_text (dict): The step text
752
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
753
- The callback has these fields:
754
- - chunk
755
- - Message Type : the type of message
756
- - Parameters (optional) : a dictionary of parameters
757
- - Metadata (optional) : a list of metadata
758
- """
759
- if not callback and self.callback:
760
- callback = self.callback
761
-
762
- if callback:
763
- callback(html_ui, MSG_TYPE.MSG_TYPE_UI)
764
-
765
- def code(self, code:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
766
- """This sends code to front end
767
-
768
- Args:
769
- step_text (dict): The step text
770
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
771
- The callback has these fields:
772
- - chunk
773
- - Message Type : the type of message
774
- - Parameters (optional) : a dictionary of parameters
775
- - Metadata (optional) : a list of metadata
776
- """
777
- if not callback and self.callback:
778
- callback = self.callback
779
-
780
- if callback:
781
- callback(code, MSG_TYPE.MSG_TYPE_CODE)
782
-
783
- def chunk(self, full_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
784
- """This sends full text to front end
785
-
786
- Args:
787
- step_text (dict): The step text
788
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the text to. Defaults to None.
789
- """
790
- if not callback and self.callback:
791
- callback = self.callback
792
-
793
- if callback:
794
- callback(full_text, MSG_TYPE.MSG_TYPE_CHUNK)
795
-
796
-
797
- def full(self, full_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None, msg_type:MSG_TYPE = MSG_TYPE.MSG_TYPE_FULL):
798
- """This sends full text to front end
799
-
800
- Args:
801
- step_text (dict): The step text
802
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the text to. Defaults to None.
803
- """
804
- if not callback and self.callback:
805
- callback = self.callback
806
-
807
- if callback:
808
- callback(full_text, msg_type)
809
-
810
- def full_invisible_to_ai(self, full_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
811
- """This sends full text to front end (INVISIBLE to AI)
812
-
813
- Args:
814
- step_text (dict): The step text
815
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the text to. Defaults to None.
816
- """
817
- if not callback and self.callback:
818
- callback = self.callback
819
-
820
- if callback:
821
- callback(full_text, MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_AI)
822
-
823
- def full_invisible_to_user(self, full_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
824
- """This sends full text to front end (INVISIBLE to user)
825
-
826
- Args:
827
- step_text (dict): The step text
828
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the text to. Defaults to None.
829
- """
830
- if not callback and self.callback:
831
- callback = self.callback
832
-
833
- if callback:
834
- callback(full_text, MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_USER)
835
-
836
-
837
-
838
-
839
- def execute_python(self, code, code_folder=None, code_file_name=None):
840
- if code_folder is not None:
841
- code_folder = Path(code_folder)
842
-
843
- """Executes Python code and returns the output as JSON."""
844
- # Create a temporary file.
845
- root_folder = code_folder if code_folder is not None else self.personality.personality_output_folder
846
- root_folder.mkdir(parents=True,exist_ok=True)
847
- tmp_file = root_folder/(code_file_name if code_file_name is not None else f"ai_code.py")
848
- with open(tmp_file,"w") as f:
849
- f.write(code)
850
-
851
- # Execute the Python code in a temporary file.
852
- process = subprocess.Popen(
853
- ["python", str(tmp_file)],
854
- stdout=subprocess.PIPE,
855
- stderr=subprocess.PIPE,
856
- cwd=root_folder
857
- )
858
-
859
- # Get the output and error from the process.
860
- output, error = process.communicate()
861
-
862
- # Check if the process was successful.
863
- if process.returncode != 0:
864
- # The child process threw an exception.
865
- error_message = f"Error executing Python code: {error.decode('utf8')}"
866
- return error_message
867
-
868
- # The child process was successful.
869
- return output.decode("utf8")
870
-
871
- def build_python_code(self, prompt, max_title_length=4096):
872
- if not PackageManager.check_package_installed("autopep8"):
873
- PackageManager.install_package("autopep8")
874
- import autopep8
875
- global_prompt = "\n".join([
876
- f"{prompt}",
877
- "!@>Extra conditions:",
878
- "- The code must be complete, not just snippets, and should be put inside a single python markdown code.",
879
- "-Preceive each python codeblock with a line using this syntax:",
880
- "$$file_name|the file path relative to the root folder of the project$$",
881
- "```python",
882
- "# Placeholder. Here you need to put the code for the file",
883
- "```",
884
- "!@>Code Builder:"
885
- ])
886
- code = self.fast_gen(global_prompt, max_title_length)
887
- code_blocks = self.extract_code_blocks(code)
888
- try:
889
- back_quote_index = code.index("```") # Remove trailing backticks
890
- if back_quote_index>=0:
891
- # Removing any extra text
892
- code = code[:back_quote_index]
893
- except:
894
- pass
895
- formatted_code = autopep8.fix_code(code) # Fix indentation errors
896
- return formatted_code
897
-
898
-
899
- def make_title(self, prompt, max_title_length: int = 50):
900
- """
901
- Generates a title for a given prompt.
902
-
903
- Args:
904
- prompt (str): The prompt for which a title needs to be generated.
905
- max_title_length (int, optional): The maximum length of the generated title. Defaults to 50.
906
-
907
- Returns:
908
- str: The generated title.
909
- """
910
- global_prompt = f"!@>instructions: Based on the provided prompt, suggest a concise and relevant title that captures the main topic or theme of the conversation. Only return the suggested title, without any additional text or explanation.\n!@>prompt: {prompt}\n!@>title:"
911
- title = self.fast_gen(global_prompt,max_title_length)
912
- return title
913
-
914
-
915
- def plan_with_images(self, request: str, images:list, actions_list:list=[LoLLMsAction], context:str = "", max_answer_length: int = 512) -> List[LoLLMsAction]:
916
- """
917
- creates a plan out of a request and a context
918
-
919
- Args:
920
- request (str): The request posed by the user.
921
- max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
922
-
923
- Returns:
924
- int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
925
- """
926
- template = """!@>instruction:
927
- Act as plan builder, a tool capable of making plans to perform the user requested operation.
928
- """
929
- if len(actions_list)>0:
930
- template +="""The plan builder is an AI that responds in json format. It should plan a succession of actions in order to reach the objective.
931
- !@>list of action types information:
932
- [
933
- {{actions_list}}
934
- ]
935
- The AI should respond in this format using data from actions_list:
936
- {
937
- "actions": [
938
- {
939
- "name": name of the action 1,
940
- "parameters":[
941
- parameter name: parameter value
942
- ]
943
- },
944
- {
945
- "name": name of the action 2,
946
- "parameters":[
947
- parameter name: parameter value
948
- ]
949
- }
950
- ...
951
- ]
952
- }
953
- """
954
- if context!="":
955
- template += """!@>Context:
956
- {{context}}Ok
957
- """
958
- template +="""!@>request: {{request}}
959
- """
960
- template +="""!@>plan: To acheive the requested objective, this is the list of actions to follow, formatted as requested in json format:\n```json\n"""
961
- pr = PromptReshaper(template)
962
- prompt = pr.build({
963
- "context":context,
964
- "request":request,
965
- "actions_list":",\n".join([f"{action}" for action in actions_list])
966
- },
967
- self.personality.model.tokenize,
968
- self.personality.model.detokenize,
969
- self.personality.model.config.ctx_size,
970
- ["previous_discussion"]
971
- )
972
- gen = self.generate_with_images(prompt, images, max_answer_length).strip().replace("</s>","").replace("<s>","")
973
- gen = self.remove_backticks(gen)
974
- self.print_prompt("full",prompt+gen)
975
- gen = fix_json(gen)
976
- return generate_actions(actions_list, gen)
977
-
978
- def plan(self, request: str, actions_list:list=[LoLLMsAction], context:str = "", max_answer_length: int = 512) -> List[LoLLMsAction]:
979
- """
980
- creates a plan out of a request and a context
981
-
982
- Args:
983
- request (str): The request posed by the user.
984
- max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
985
-
986
- Returns:
987
- int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
988
- """
989
- template = """!@>instruction:
990
- Act as plan builder, a tool capable of making plans to perform the user requested operation.
991
- """
992
- if len(actions_list)>0:
993
- template +="""The plan builder is an AI that responds in json format. It should plan a succession of actions in order to reach the objective.
994
- !@>list of action types information:
995
- [
996
- {{actions_list}}
997
- ]
998
- The AI should respond in this format using data from actions_list:
999
- {
1000
- "actions": [
1001
- {
1002
- "name": name of the action 1,
1003
- "parameters":[
1004
- parameter name: parameter value
1005
- ]
1006
- },
1007
- {
1008
- "name": name of the action 2,
1009
- "parameters":[
1010
- parameter name: parameter value
1011
- ]
1012
- }
1013
- ...
1014
- ]
1015
- }
1016
- """
1017
- if context!="":
1018
- template += """!@>Context:
1019
- {{context}}Ok
1020
- """
1021
- template +="""!@>request: {{request}}
1022
- """
1023
- template +="""!@>plan: To acheive the requested objective, this is the list of actions to follow, formatted as requested in json format:\n```json\n"""
1024
- pr = PromptReshaper(template)
1025
- prompt = pr.build({
1026
- "context":context,
1027
- "request":request,
1028
- "actions_list":",\n".join([f"{action}" for action in actions_list])
1029
- },
1030
- self.personality.model.tokenize,
1031
- self.personality.model.detokenize,
1032
- self.personality.model.config.ctx_size,
1033
- ["previous_discussion"]
1034
- )
1035
- gen = self.generate(prompt, max_answer_length).strip().replace("</s>","").replace("<s>","")
1036
- gen = self.remove_backticks(gen).strip()
1037
- if gen[-1]!="}":
1038
- gen+="}"
1039
- self.print_prompt("full",prompt+gen)
1040
- gen = fix_json(gen)
1041
- return generate_actions(actions_list, gen)
1042
-
1043
-
1044
- def parse_directory_structure(self, structure):
1045
- paths = []
1046
- lines = structure.strip().split('\n')
1047
- stack = []
1048
-
1049
- for line in lines:
1050
- line = line.rstrip()
1051
- level = (len(line) - len(line.lstrip())) // 4
1052
-
1053
- if '/' in line or line.endswith(':'):
1054
- directory = line.strip(' ├─└│').rstrip(':').rstrip('/')
1055
-
1056
- while stack and level < stack[-1][0]:
1057
- stack.pop()
1058
-
1059
- stack.append((level, directory))
1060
- path = '/'.join([dir for _, dir in stack]) + '/'
1061
- paths.append(path)
1062
- else:
1063
- file = line.strip(' ├─└│')
1064
- if stack:
1065
- path = '/'.join([dir for _, dir in stack]) + '/' + file
1066
- paths.append(path)
1067
-
1068
- return paths
1069
-
1070
- def extract_code_blocks(self, text: str) -> List[dict]:
1071
- remaining = text
1072
- bloc_index = 0
1073
- first_index=0
1074
- indices = []
1075
- while len(remaining)>0:
1076
- try:
1077
- index = remaining.index("```")
1078
- indices.append(index+first_index)
1079
- remaining = remaining[index+3:]
1080
- first_index += index+3
1081
- bloc_index +=1
1082
- except Exception as ex:
1083
- if bloc_index%2==1:
1084
- index=len(remaining)
1085
- indices.append(index)
1086
- remaining = ""
1087
-
1088
- code_blocks = []
1089
- is_start = True
1090
- for index, code_delimiter_position in enumerate(indices):
1091
- block_infos = {
1092
- 'index':index,
1093
- 'file_name': "",
1094
- 'content': "",
1095
- 'type':""
1096
- }
1097
- if is_start:
1098
-
1099
- sub_text = text[code_delimiter_position+3:]
1100
- if len(sub_text)>0:
1101
- try:
1102
- find_space = sub_text.index(" ")
1103
- except:
1104
- find_space = int(1e10)
1105
- try:
1106
- find_return = sub_text.index("\n")
1107
- except:
1108
- find_return = int(1e10)
1109
- next_index = min(find_return, find_space)
1110
- start_pos = next_index
1111
- if code_delimiter_position+3<len(text) and text[code_delimiter_position+3] in ["\n"," ","\t"] :
1112
- # No
1113
- block_infos["type"]='language-specific'
1114
- else:
1115
- block_infos["type"]=sub_text[:next_index]
1116
-
1117
- next_pos = indices[index+1]-code_delimiter_position
1118
- if sub_text[next_pos-3]=="`":
1119
- block_infos["content"]=sub_text[start_pos:next_pos-3].strip()
1120
- else:
1121
- block_infos["content"]=sub_text[start_pos:next_pos].strip()
1122
- code_blocks.append(block_infos)
1123
- is_start = False
1124
- else:
1125
- is_start = True
1126
- continue
1127
-
1128
- return code_blocks
1129
-
1130
-
1131
-
1132
- def build_and_execute_python_code(self,context, instructions, execution_function_signature, extra_imports=""):
1133
- code = "```python\n"+self.fast_gen(
1134
- self.build_prompt([
1135
- "!@>context!:",
1136
- context,
1137
- f"!@>system:",
1138
- f"{instructions}",
1139
- f"Here is the signature of the function:\n{execution_function_signature}",
1140
- "Don't call the function, just write it",
1141
- "Do not provide usage example.",
1142
- "The code must me without comments",
1143
- f"!@>coder: Sure, in the following code, I import the necessary libraries, then define the function as you asked.",
1144
- "The function is ready to be used in your code and performs the task as you asked:",
1145
- "```python\n"
1146
- ],2), callback=self.sink)
1147
- code = code.replace("```python\n```python\n", "```python\n").replace("```\n```","```")
1148
- code=self.extract_code_blocks(code)
1149
-
1150
- if len(code)>0:
1151
- # Perform the search query
1152
- code = code[0]["content"]
1153
- code = "\n".join([
1154
- extra_imports,
1155
- code
1156
- ])
1157
- ASCIIColors.magenta(code)
1158
- module_name = 'custom_module'
1159
- spec = importlib.util.spec_from_loader(module_name, loader=None)
1160
- module = importlib.util.module_from_spec(spec)
1161
- exec(code, module.__dict__)
1162
- return module, code
1163
-
1164
-
1165
- def yes_no(self, question: str, context:str="", max_answer_length: int = 50, conditionning="") -> bool:
1166
- """
1167
- Analyzes the user prompt and answers whether it is asking to generate an image.
1168
-
1169
- Args:
1170
- question (str): The user's message.
1171
- max_answer_length (int, optional): The maximum length of the generated answer. Defaults to 50.
1172
- conditionning: An optional system message to put at the beginning of the prompt
1173
- Returns:
1174
- bool: True if the user prompt is asking to generate an image, False otherwise.
1175
- """
1176
- return self.multichoice_question(question, ["no","yes"], context, max_answer_length, conditionning=conditionning)>0
1177
-
1178
- def multichoice_question(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50, conditionning="") -> int:
1179
- """
1180
- Interprets a multi-choice question from a users response. This function expects only one choice as true. All other choices are considered false. If none are correct, returns -1.
1181
-
1182
- Args:
1183
- question (str): The multi-choice question posed by the user.
1184
- possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
1185
- max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
1186
- conditionning: An optional system message to put at the beginning of the prompt
1187
-
1188
- Returns:
1189
- int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
1190
- """
1191
- choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
1192
- elements = [conditionning] if conditionning!="" else []
1193
- elements += [
1194
- "!@>system:",
1195
- "Answer this multi choices question.",
1196
- ]
1197
- if context!="":
1198
- elements+=[
1199
- "!@>Context:",
1200
- f"{context}",
1201
- ]
1202
- elements +=[
1203
- "Answer with an id from the possible answers.",
1204
- "Do not answer with an id outside this possible answers.",
1205
- "Do not explain your reasons or add comments.",
1206
- "the output should be an integer."
1207
- ]
1208
- elements += [
1209
- f"!@>question: {question}",
1210
- "!@>possible answers:",
1211
- f"{choices}",
1212
- ]
1213
- elements += ["!@>answer:"]
1214
- prompt = self.build_prompt(elements)
1215
-
1216
- gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50, callback=self.sink).strip().replace("</s>","").replace("<s>","")
1217
- if len(gen)>0:
1218
- selection = gen.strip().split()[0].replace(",","").replace(".","")
1219
- self.print_prompt("Multi choice selection",prompt+gen)
1220
- try:
1221
- return int(selection)
1222
- except:
1223
- ASCIIColors.cyan("Model failed to answer the question")
1224
- return -1
1225
- else:
1226
- return -1
1227
-
1228
- def multichoice_ranking(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50, conditionning="") -> int:
1229
- """
1230
- Ranks answers for a question from best to worst. returns a list of integers
1231
-
1232
- Args:
1233
- question (str): The multi-choice question posed by the user.
1234
- possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
1235
- max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
1236
- conditionning: An optional system message to put at the beginning of the prompt
1237
-
1238
- Returns:
1239
- int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
1240
- """
1241
- choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
1242
- elements = [conditionning] if conditionning!="" else []
1243
- elements += [
1244
- "!@>instructions:",
1245
- "Answer this multi choices question.",
1246
- "Answer with an id from the possible answers.",
1247
- "Do not answer with an id outside this possible answers.",
1248
- f"!@>question: {question}",
1249
- "!@>possible answers:",
1250
- f"{choices}",
1251
- ]
1252
- if context!="":
1253
- elements+=[
1254
- "!@>Context:",
1255
- f"{context}",
1256
- ]
1257
-
1258
- elements += ["!@>answer:"]
1259
- prompt = self.build_prompt(elements)
1260
-
1261
- gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50).strip().replace("</s>","").replace("<s>","")
1262
- self.print_prompt("Multi choice ranking",prompt+gen)
1263
- if gen.index("]")>=0:
1264
- try:
1265
- ranks = eval(gen.split("]")[0]+"]")
1266
- return ranks
1267
- except:
1268
- ASCIIColors.red("Model failed to rank inputs")
1269
- return None
1270
- else:
1271
- ASCIIColors.red("Model failed to rank inputs")
1272
- return None
1273
-
1274
-
1275
-
1276
- def build_html5_integration(self, html, ifram_name="unnamed"):
1277
- """
1278
- This function creates an HTML5 iframe with the given HTML content and iframe name.
1279
-
1280
- Args:
1281
- html (str): The HTML content to be displayed in the iframe.
1282
- ifram_name (str, optional): The name of the iframe. Defaults to "unnamed".
1283
-
1284
- Returns:
1285
- str: The HTML string for the iframe.
1286
- """
1287
- return "\n".join(
1288
- '<div style="width: 80%; margin: 0 auto;">',
1289
- f'<iframe id="{ifram_name}" srcdoc="',
1290
- html,
1291
- '" style="width: 100%; height: 600px; border: none;"></iframe>',
1292
- '</div>'
1293
- )
1294
-
1295
-
1296
-
1297
- def info(self, info_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
1298
- """This sends info text to front end
1299
-
1300
- Args:
1301
- step_text (dict): The step text
1302
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the info to. Defaults to None.
1303
- """
1304
- if not callback and self.callback:
1305
- callback = self.callback
1306
-
1307
- if callback:
1308
- callback(info_text, MSG_TYPE.MSG_TYPE_FULL)
1309
-
1310
- def step_progress(self, step_text:str, progress:float, callback: Callable[[str, MSG_TYPE, dict, list, LollmsPersonality], bool]=None):
1311
- """This sends step rogress to front end
1312
-
1313
- Args:
1314
- step_text (dict): The step progress in %
1315
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the progress to. Defaults to None.
1316
- """
1317
- if not callback and self.callback:
1318
- callback = self.callback
1319
-
1320
- if callback:
1321
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP_PROGRESS, {'progress':progress})
1322
-
1323
- def new_message(self, message_text:str, message_type:MSG_TYPE= MSG_TYPE.MSG_TYPE_FULL, metadata=[], callback: Callable[[str, int, dict, list, LollmsPersonality], bool]=None):
1324
- """This sends step rogress to front end
1325
-
1326
- Args:
1327
- step_text (dict): The step progress in %
1328
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the progress to. Defaults to None.
1329
- """
1330
- if not callback and self.callback:
1331
- callback = self.callback
1332
-
1333
- if callback:
1334
- callback(message_text, MSG_TYPE.MSG_TYPE_NEW_MESSAGE, parameters={'type':message_type.value,'metadata':metadata},personality = self.personality)
1335
-
1336
- def finished_message(self, message_text:str="", callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
1337
- """This sends step rogress to front end
1338
-
1339
- Args:
1340
- step_text (dict): The step progress in %
1341
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the progress to. Defaults to None.
1342
- """
1343
- if not callback and self.callback:
1344
- callback = self.callback
1345
-
1346
- if callback:
1347
- callback(message_text, MSG_TYPE.MSG_TYPE_FINISHED_MESSAGE)
1348
-
1349
- def print_prompt(self, title, prompt):
1350
- ASCIIColors.red("*-*-*-*-*-*-*-* ", end="")
1351
- ASCIIColors.red(title, end="")
1352
- ASCIIColors.red(" *-*-*-*-*-*-*-*")
1353
- ASCIIColors.yellow(prompt)
1354
- ASCIIColors.red(" *-*-*-*-*-*-*-*")
1355
-
1356
-
1357
- def fast_gen_with_images(self, prompt: str, images:list, max_generation_size: int= None, placeholders: dict = {}, sacrifice: list = ["previous_discussion"], debug: bool = False, callback=None, show_progress=False) -> str:
1358
- """
1359
- Fast way to generate code
1360
-
1361
- This method takes in a prompt, maximum generation size, optional placeholders, sacrifice list, and debug flag.
1362
- It reshapes the context before performing text generation by adjusting and cropping the number of tokens.
1363
-
1364
- Parameters:
1365
- - prompt (str): The input prompt for text generation.
1366
- - max_generation_size (int): The maximum number of tokens to generate.
1367
- - placeholders (dict, optional): A dictionary of placeholders to be replaced in the prompt. Defaults to an empty dictionary.
1368
- - sacrifice (list, optional): A list of placeholders to sacrifice if the window is bigger than the context size minus the number of tokens to generate. Defaults to ["previous_discussion"].
1369
- - debug (bool, optional): Flag to enable/disable debug mode. Defaults to False.
1370
-
1371
- Returns:
1372
- - str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
1373
- """
1374
- return self.personality.fast_gen_with_images(prompt=prompt, images=images, max_generation_size=max_generation_size,placeholders=placeholders, sacrifice=sacrifice, debug=debug, callback=callback, show_progress=show_progress)
1375
-
1376
- def fast_gen(self, prompt: str, max_generation_size: int= None, placeholders: dict = {}, sacrifice: list = ["previous_discussion"], debug: bool = False, callback=None, show_progress=False) -> str:
1377
- """
1378
- Fast way to generate code
1379
-
1380
- This method takes in a prompt, maximum generation size, optional placeholders, sacrifice list, and debug flag.
1381
- It reshapes the context before performing text generation by adjusting and cropping the number of tokens.
1382
-
1383
- Parameters:
1384
- - prompt (str): The input prompt for text generation.
1385
- - max_generation_size (int): The maximum number of tokens to generate.
1386
- - placeholders (dict, optional): A dictionary of placeholders to be replaced in the prompt. Defaults to an empty dictionary.
1387
- - sacrifice (list, optional): A list of placeholders to sacrifice if the window is bigger than the context size minus the number of tokens to generate. Defaults to ["previous_discussion"].
1388
- - debug (bool, optional): Flag to enable/disable debug mode. Defaults to False.
1389
-
1390
- Returns:
1391
- - str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
1392
- """
1393
- return self.personality.fast_gen(prompt=prompt,max_generation_size=max_generation_size,placeholders=placeholders, sacrifice=sacrifice, debug=debug, callback=callback, show_progress=show_progress)
1394
-
1395
-
1396
- #Helper method to convert outputs path to url
1397
- def path2url(file):
1398
- file = str(file).replace("\\","/")
1399
- pth = file.split('/')
1400
- idx = pth.index("outputs")
1401
- pth = "/".join(pth[idx:])
1402
- file_path = f"![](/{pth})\n"
1403
- return file_path
1404
-
1405
- def build_a_document_block(self, title="Title", link="", content="content"):
1406
- if link!="":
1407
- return f'''
1408
- <div style="width: 100%; border: 1px solid #ccc; border-radius: 5px; padding: 20px; font-family: Arial, sans-serif; margin-bottom: 20px; box-sizing: border-box;">
1409
- <h3 style="margin-top: 0;">
1410
- <a href="{link}" target="_blank" style="text-decoration: none; color: #333;">{title}</a>
1411
- </h3>
1412
- <pre style="white-space: pre-wrap;color: #666;">{content}</pre>
1413
- </div>
1414
- '''
1415
- else:
1416
- return f'''
1417
- <div style="width: 100%; border: 1px solid #ccc; border-radius: 5px; padding: 20px; font-family: Arial, sans-serif; margin-bottom: 20px; box-sizing: border-box;">
1418
- <h3 style="margin-top: 0;">
1419
- <p style="text-decoration: none; color: #333;">{title}</p>
1420
- </h3>
1421
- <pre style="white-space: pre-wrap;color: #666;">{content}</pre>
1422
- </div>
1423
- '''
1424
-
1425
- def build_a_folder_link(self, folder_path, link_text="Open Folder"):
1426
- folder_path = str(folder_path).replace('\\','/')
1427
- return '''
1428
- <a href="#" onclick="path=\''''+f'{folder_path}'+'''\';
1429
- fetch('/open_folder', {
1430
- method: 'POST',
1431
- headers: {
1432
- 'Content-Type': 'application/json'
1433
- },
1434
- body: JSON.stringify({ path: path })
1435
- })
1436
- .then(response => response.json())
1437
- .then(data => {
1438
- if (data.status) {
1439
- console.log('Folder opened successfully');
1440
- } else {
1441
- console.error('Error opening folder:', data.error);
1442
- }
1443
- })
1444
- .catch(error => {
1445
- console.error('Error:', error);
1446
- });
1447
- ">'''+f'''{link_text}</a>'''
1448
- def build_a_file_link(self, file_path, link_text="Open Folder"):
1449
- file_path = str(file_path).replace('\\','/')
1450
- return '''
1451
- <a href="#" onclick="path=\''''+f'{file_path}'+'''\';
1452
- fetch('/open_file', {
1453
- method: 'POST',
1454
- headers: {
1455
- 'Content-Type': 'application/json'
1456
- },
1457
- body: JSON.stringify({ path: path })
1458
- })
1459
- .then(response => response.json())
1460
- .then(data => {
1461
- if (data.status) {
1462
- console.log('Folder opened successfully');
1463
- } else {
1464
- console.error('Error opening folder:', data.error);
1465
- }
1466
- })
1467
- .catch(error => {
1468
- console.error('Error:', error);
1469
- });
1470
- ">'''+f'''{link_text}</a>'''
1471
- # ===========================================================
1472
- def compress_js(self, code):
1473
- return compress_js(code)
1474
- def compress_python(self, code):
1475
- return compress_python(code)
1476
- def compress_html(self, code):
1477
- return compress_html(code)
1478
-
1479
- # ===========================================================
1480
- def select_model(self, binding_name, model_name):
1481
- self.personality.app.select_model(binding_name, model_name)
1482
-
1483
- class AIPersonalityInstaller:
1484
- def __init__(self, personality:LollmsPersonality) -> None:
1485
- self.personality = personality