lollms-client 0.17.1__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (25) hide show
  1. examples/function_calling_with_local_custom_mcp.py +250 -0
  2. examples/local_mcp.py +171 -0
  3. examples/text_2_image.py +8 -3
  4. examples/text_2_image_diffusers.py +274 -0
  5. lollms_client/__init__.py +7 -6
  6. lollms_client/llm_bindings/llamacpp/__init__.py +8 -8
  7. lollms_client/lollms_core.py +345 -10
  8. lollms_client/lollms_mcp_binding.py +198 -0
  9. lollms_client/mcp_bindings/local_mcp/__init__.py +311 -0
  10. lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +74 -0
  11. lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +195 -0
  12. lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +107 -0
  13. lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +141 -0
  14. lollms_client/stt_bindings/whisper/__init__.py +1 -1
  15. lollms_client/tti_bindings/dalle/__init__.py +433 -0
  16. lollms_client/tti_bindings/diffusers/__init__.py +692 -0
  17. lollms_client/tti_bindings/gemini/__init__.py +0 -0
  18. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/METADATA +1 -1
  19. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/RECORD +22 -13
  20. examples/function_call/functions_call_with images.py +0 -52
  21. lollms_client/lollms_functions.py +0 -72
  22. lollms_client/lollms_tasks.py +0 -691
  23. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/WHEEL +0 -0
  24. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/licenses/LICENSE +0 -0
  25. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/top_level.txt +0 -0
@@ -1,691 +0,0 @@
1
- from lollms_client.lollms_core import LollmsClient
2
- from lollms_client.lollms_types import SUMMARY_MODE, MSG_TYPE
3
- from lollms_client.lollms_utilities import remove_text_from_string, PromptReshaper, process_ai_output
4
- from typing import List, Callable, Dict, Any, Optional
5
- from ascii_colors import ASCIIColors
6
- from functools import partial
7
- import json
8
- import sys
9
- from datetime import datetime
10
-
11
- class TasksLibrary:
12
- def __init__(self, lollms:LollmsClient, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None) -> None:
13
- self.lollms = lollms
14
- self.callback = callback
15
-
16
- def print_prompt(self, title, prompt):
17
- ASCIIColors.red("*-*-*-*-*-*-*-* ", end="")
18
- ASCIIColors.red(title, end="")
19
- ASCIIColors.red(" *-*-*-*-*-*-*-*")
20
- ASCIIColors.yellow(prompt)
21
- ASCIIColors.red(" *-*-*-*-*-*-*-*")
22
-
23
- def setCallback(self, callback: Callable[[str, MSG_TYPE, dict, list], bool]):
24
- self.callback = callback
25
-
26
- def process(self, text:str, message_type:MSG_TYPE, callback=None, show_progress=False):
27
- if callback is None:
28
- callback = self.callback
29
- if text is None:
30
- return True
31
- if message_type==MSG_TYPE.MSG_TYPE_CHUNK:
32
- bot_says = self.bot_says + text
33
- elif message_type==MSG_TYPE.MSG_TYPE_FULL:
34
- bot_says = text
35
-
36
- if show_progress:
37
- if self.nb_received_tokens==0:
38
- self.start_time = datetime.now()
39
- dt =(datetime.now() - self.start_time).seconds
40
- if dt==0:
41
- dt=1
42
- spd = self.nb_received_tokens/dt
43
- ASCIIColors.green(f"Received {self.nb_received_tokens} tokens (speed: {spd:.2f}t/s) ",end="\r",flush=True)
44
- sys.stdout = sys.__stdout__
45
- sys.stdout.flush()
46
- self.nb_received_tokens+=1
47
-
48
-
49
- antiprompt = self.detect_antiprompt(bot_says)
50
- if antiprompt:
51
- self.bot_says = remove_text_from_string(bot_says,antiprompt)
52
- ASCIIColors.warning(f"\n{antiprompt} detected. Stopping generation")
53
- return False
54
- else:
55
- if callback:
56
- callback(text,message_type)
57
- self.bot_says = bot_says
58
- return True
59
- def generate(self, prompt, n_predict, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False, show_progress=False, stream= False ):
60
- ASCIIColors.info("Text generation started: Warming up")
61
- self.nb_received_tokens = 0
62
- self.bot_says = ""
63
- if debug:
64
- self.print_prompt("gen",prompt)
65
-
66
- bot_says = self.lollms.generate_text(
67
- prompt=prompt,
68
- n_predict = n_predict,
69
- stream=stream,
70
- streaming_callback=partial(self.process, callback=callback, show_progress=show_progress),
71
- temperature= temperature,
72
- top_k= top_k,
73
- top_p= top_p,
74
- repeat_penalty= repeat_penalty,
75
- repeat_last_n= repeat_last_n,
76
- ).strip()
77
- return self.bot_says if stream else bot_says
78
-
79
-
80
- def fast_gen(
81
- self,
82
- prompt: str,
83
- n_predict: int=None,
84
- placeholders: dict = {},
85
- sacrifice: list = ["previous_discussion"],
86
- debug: bool = False,
87
- stream: bool = False,
88
- callback=None,
89
- show_progress=False,
90
- temperature = None,
91
- top_k = None,
92
- top_p=None,
93
- repeat_penalty=None,
94
- repeat_last_n=None
95
- ) -> str:
96
- """
97
- Fast way to generate code
98
-
99
- This method takes in a prompt, maximum generation size, optional placeholders, sacrifice list, and debug flag.
100
- It reshapes the context before performing text generation by adjusting and cropping the number of tokens.
101
-
102
- Parameters:
103
- - prompt (str): The input prompt for text generation.
104
- - max_generation_size (int): The maximum number of tokens to generate.
105
- - placeholders (dict, optional): A dictionary of placeholders to be replaced in the prompt. Defaults to an empty dictionary.
106
- - sacrifice (list, optional): A list of placeholders to sacrifice if the window is bigger than the context size minus the number of tokens to generate. Defaults to ["previous_discussion"].
107
- - debug (bool, optional): Flag to enable/disable debug mode. Defaults to False.
108
-
109
- Returns:
110
- - str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
111
- """
112
- if n_predict is None:
113
- prompt_size = self.lollms.tokenize(prompt)
114
- n_predict = self.lollms.default_ctx_size - len(prompt_size)
115
-
116
- pr = PromptReshaper(prompt)
117
- prompt = pr.build(placeholders,
118
- self.lollms.binding.tokenize,
119
- self.lollms.binding.detokenize,
120
- self.lollms.default_ctx_size - n_predict,
121
- sacrifice
122
- )
123
- ntk = len(self.lollms.binding.tokenize(prompt))
124
- n_predict = min(self.lollms.default_ctx_size - ntk, n_predict)
125
- # TODO : add show progress
126
-
127
- gen = self.lollms.generate_text(
128
- prompt=prompt,
129
- n_predict = n_predict,
130
- stream=stream,
131
- streaming_callback=partial(self.process, callback=callback, show_progress=show_progress),
132
- temperature= temperature,
133
- top_k= top_k,
134
- top_p= top_p,
135
- repeat_penalty= repeat_penalty,
136
- repeat_last_n= repeat_last_n
137
- ).strip().replace("</s>", "").replace("<s>", "")
138
- if debug:
139
- self.print_prompt("prompt", prompt+gen)
140
-
141
- return gen
142
-
143
-
144
-
145
- def step_start(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
146
- """This triggers a step start
147
-
148
- Args:
149
- step_text (str): The step text
150
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the step start to. Defaults to None.
151
- """
152
- if not callback and self.callback:
153
- callback = self.callback
154
-
155
- if callback:
156
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP_START)
157
-
158
- def step_end(self, step_text, status=True, callback: Callable[[str, int, dict, list], bool]=None):
159
- """This triggers a step end
160
-
161
- Args:
162
- step_text (str): The step text
163
- callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the step end to. Defaults to None.
164
- """
165
- if not callback and self.callback:
166
- callback = self.callback
167
-
168
- if callback:
169
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP_END, {'status':status})
170
-
171
- def step(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
172
- """This triggers a step information
173
-
174
- Args:
175
- step_text (str): The step text
176
- callback (callable, optional): A callable with this signature (str, MSG_TYPE, dict, list) to send the step to. Defaults to None.
177
- The callback has these fields:
178
- - chunk
179
- - Message Type : the type of message
180
- - Parameters (optional) : a dictionary of parameters
181
- - Metadata (optional) : a list of metadata
182
- """
183
- if not callback and self.callback:
184
- callback = self.callback
185
-
186
- if callback:
187
- callback(step_text, MSG_TYPE.MSG_TYPE_STEP)
188
-
189
-
190
- def sink(self, s=None,i=None,d=None):
191
- pass
192
-
193
- def build_prompt(self, prompt_parts:List[str], sacrifice_id:int=-1, context_size:int=None, minimum_spare_context_size:int=None):
194
- """
195
- Builds the prompt for code generation.
196
-
197
- Args:
198
- prompt_parts (List[str]): A list of strings representing the parts of the prompt.
199
- sacrifice_id (int, optional): The ID of the part to sacrifice.
200
- context_size (int, optional): The size of the context.
201
- minimum_spare_context_size (int, optional): The minimum spare context size.
202
-
203
- Returns:
204
- str: The built prompt.
205
- """
206
- if context_size is None:
207
- context_size = self.lollms.ctx_size
208
- if minimum_spare_context_size is None:
209
- minimum_spare_context_size = self.lollms.min_n_predict
210
-
211
- if sacrifice_id == -1 or len(prompt_parts[sacrifice_id])<50:
212
- return "\n".join([s for s in prompt_parts if s!=""])
213
- else:
214
- part_tokens=[]
215
- nb_tokens=0
216
- for i,part in enumerate(prompt_parts):
217
- tk = self.lollms.tokenize(part)
218
- part_tokens.append(tk)
219
- if i != sacrifice_id:
220
- nb_tokens += len(tk)
221
- if len(part_tokens[sacrifice_id])>0:
222
- sacrifice_tk = part_tokens[sacrifice_id]
223
- sacrifice_tk= sacrifice_tk[-(context_size-nb_tokens-minimum_spare_context_size):]
224
- sacrifice_text = self.lollms.detokenize(sacrifice_tk)
225
- else:
226
- sacrifice_text = ""
227
- prompt_parts[sacrifice_id] = sacrifice_text
228
- return "\n".join([s for s in prompt_parts if s!=""])
229
-
230
- def translate_text_chunk(self, text_chunk, output_language:str="french", host_address:str=None, model_name: str = None, temperature=0.1, max_generation_size=3000, callback=None, show_progress:bool=False):
231
- """
232
- This function translates a given text chunk into a specified language.
233
-
234
- Parameters:
235
- text_chunk (str): The text to be translated.
236
- output_language (str): The language into which the text should be translated. Defaults to 'french'.
237
- host_address (str): The address of the host where the translation model is located. Defaults to None.
238
- model_name (str): The name of the translation model to be used. Defaults to None.
239
- temperature (float): The temperature value for the translation model. This value affects the randomness of the translation. Defaults to 0.1.
240
- max_generation_size (int): The maximum length of the translated text. Defaults to 3000.
241
-
242
- Returns:
243
- str: The translated text.
244
- """
245
- translated = self.lollms.generate_text(
246
- prompt= "\n".join([
247
- self.lollms.system_full_header,
248
- f"Translate the following text to {output_language}.",
249
- "Be faithful to the original text and do not add or remove any information.",
250
- "Respond only with the translated text.",
251
- "Do not add comments or explanations.",
252
- self.lollms.system_custom_header("text to translate"),
253
- f"{text_chunk}",
254
- self.lollms.ai_custom_header("translation"),
255
- ]),
256
- n_predict = max_generation_size,
257
- streaming_callback=partial(self.process, callback=callback, show_progress=show_progress),
258
- temperature= temperature
259
- )
260
- return translated
261
-
262
- def extract_code_blocks(self, text: str) -> List[dict]:
263
- """
264
- This function extracts code blocks from a given text.
265
-
266
- Parameters:
267
- text (str): The text from which to extract code blocks. Code blocks are identified by triple backticks (```).
268
-
269
- Returns:
270
- List[dict]: A list of dictionaries where each dictionary represents a code block and contains the following keys:
271
- - 'index' (int): The index of the code block in the text.
272
- - 'file_name' (str): An empty string. This field is not used in the current implementation.
273
- - 'content' (str): The content of the code block.
274
- - 'type' (str): The type of the code block. If the code block starts with a language specifier (like 'python' or 'java'), this field will contain that specifier. Otherwise, it will be set to 'language-specific'.
275
-
276
- Note:
277
- The function assumes that the number of triple backticks in the text is even.
278
- If the number of triple backticks is odd, it will consider the rest of the text as the last code block.
279
- """
280
- remaining = text
281
- bloc_index = 0
282
- first_index=0
283
- indices = []
284
- while len(remaining)>0:
285
- try:
286
- index = remaining.index("```")
287
- indices.append(index+first_index)
288
- remaining = remaining[index+3:]
289
- first_index += index+3
290
- bloc_index +=1
291
- except Exception as ex:
292
- if bloc_index%2==1:
293
- index=len(remaining)
294
- indices.append(index)
295
- remaining = ""
296
-
297
- code_blocks = []
298
- is_start = True
299
- for index, code_delimiter_position in enumerate(indices):
300
- block_infos = {
301
- 'index':index,
302
- 'file_name': "",
303
- 'content': "",
304
- 'type':""
305
- }
306
- if is_start:
307
-
308
- sub_text = text[code_delimiter_position+3:]
309
- if len(sub_text)>0:
310
- try:
311
- find_space = sub_text.index(" ")
312
- except:
313
- find_space = int(1e10)
314
- try:
315
- find_return = sub_text.index("\n")
316
- except:
317
- find_return = int(1e10)
318
- next_index = min(find_return, find_space)
319
- start_pos = next_index
320
- if code_delimiter_position+3<len(text) and text[code_delimiter_position+3] in ["\n"," ","\t"] :
321
- # No
322
- block_infos["type"]='language-specific'
323
- else:
324
- block_infos["type"]=sub_text[:next_index]
325
-
326
- next_pos = indices[index+1]-code_delimiter_position
327
- if sub_text[next_pos-3]=="`":
328
- block_infos["content"]=sub_text[start_pos:next_pos-3].strip()
329
- else:
330
- block_infos["content"]=sub_text[start_pos:next_pos].strip()
331
- code_blocks.append(block_infos)
332
- is_start = False
333
- else:
334
- is_start = True
335
- continue
336
-
337
- return code_blocks
338
-
339
- def yes_no(self, question: str, context:str="", max_answer_length: int = 50, conditionning="") -> bool:
340
- """
341
- Analyzes the user prompt and answers whether it is asking to generate an image.
342
-
343
- Args:
344
- question (str): The user's message.
345
- max_answer_length (int, optional): The maximum length of the generated answer. Defaults to 50.
346
- conditionning: An optional system message to put at the beginning of the prompt
347
- Returns:
348
- bool: True if the user prompt is asking to generate an image, False otherwise.
349
- """
350
- return self.multichoice_question(question, ["no","yes"], context, max_answer_length, conditionning=conditionning)>0
351
-
352
- def multichoice_question(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50, conditionning="") -> int:
353
- """
354
- Interprets a multi-choice question from a users response. This function expects only one choice as true. All other choices are considered false. If none are correct, returns -1.
355
-
356
- Args:
357
- question (str): The multi-choice question posed by the user.
358
- possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
359
- max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
360
- conditionning: An optional system message to put at the beginning of the prompt
361
-
362
- Returns:
363
- int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
364
- """
365
- choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
366
- elements = [conditionning] if conditionning!="" else []
367
- elements += [
368
- self.lollms.system_full_header,
369
- "Answer this multi choices question.",
370
- ]
371
- if context!="":
372
- elements+=[
373
- self.lollms.system_custom_header("Context"),
374
- f"{context}",
375
- ]
376
- elements +=[
377
- "Answer with an id from the possible answers.",
378
- "Do not answer with an id outside this possible answers.",
379
- "Do not explain your reasons or add comments.",
380
- "the output should be an integer."
381
- ]
382
- elements += [
383
- f'{self.lollms.user_custom_header("question")} {question}',
384
- f'{self.lollms.user_custom_header("possible answers")}',
385
- f"{choices}",
386
- ]
387
- elements += [self.lollms.ai_custom_header("answer")]
388
- prompt = self.build_prompt(elements)
389
-
390
- gen = self.lollms.generate_text(
391
- prompt=prompt,
392
- streaming_callback=self.sink).strip().replace("</s>","").replace("<s>","")
393
- if len(gen)>0:
394
- selection = gen.strip().split()[0].replace(",","").replace(".","")
395
- self.print_prompt("Multi choice selection",prompt+gen)
396
- try:
397
- return int(selection)
398
- except:
399
- ASCIIColors.cyan("Model failed to answer the question")
400
- return -1
401
- else:
402
- return -1
403
-
404
-
405
- def summerize_text(
406
- self,
407
- text,
408
- summary_instruction="summerize",
409
- doc_name="chunk",
410
- answer_start="",
411
- max_generation_size=3000,
412
- max_summary_size=512,
413
- callback=None,
414
- chunk_summary_post_processing=None,
415
- summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
416
- ):
417
- depth=0
418
- tk = self.lollms.tokenize(text)
419
- prev_len = len(tk)
420
- document_chunks=None
421
- while len(tk)>max_summary_size and (document_chunks is None or len(document_chunks)>1):
422
- self.step_start(f"Comprerssing {doc_name}... [depth {depth+1}]")
423
- chunk_size = int(self.lollms.ctx_size*0.6)
424
- document_chunks = chunk_text(text, self.lollms, chunk_size, 0, True)
425
- text = self.summerize_chunks(
426
- document_chunks,
427
- summary_instruction,
428
- doc_name,
429
- answer_start,
430
- max_generation_size,
431
- callback,
432
- chunk_summary_post_processing=chunk_summary_post_processing,
433
- summary_mode=summary_mode)
434
- tk = self.lollms.binding.tokenize(text)
435
- tk = self.lollms.binding.tokenize(text)
436
- dtk_ln=prev_len-len(tk)
437
- prev_len = len(tk)
438
- self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
439
- self.step_end(f"Comprerssing {doc_name}... [depth {depth+1}]")
440
- depth += 1
441
- if dtk_ln<=10: # it is not sumlmarizing
442
- break
443
- return text
444
-
445
- def smart_data_extraction(
446
- self,
447
- text,
448
- data_extraction_instruction="summerize",
449
- final_task_instruction="reformulate with better wording",
450
- doc_name="chunk",
451
- answer_start="",
452
- max_generation_size=3000,
453
- max_summary_size=512,
454
- callback=None,
455
- chunk_summary_post_processing=None,
456
- summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
457
- ):
458
- depth=0
459
- tk = self.lollms.tokenize(text)
460
- prev_len = len(tk)
461
- while len(tk)>max_summary_size:
462
- self.step_start(f"Comprerssing... [depth {depth+1}]")
463
- chunk_size = int(self.lollms.ctx_size*0.6)
464
- document_chunks = TextChunker.chunk_text(text, self.lollms, chunk_size, 0, True)
465
- text = self.summerize_chunks(
466
- document_chunks,
467
- data_extraction_instruction,
468
- doc_name,
469
- answer_start,
470
- max_generation_size,
471
- callback,
472
- chunk_summary_post_processing=chunk_summary_post_processing,
473
- summary_mode=summary_mode
474
- )
475
- tk = self.lollms.tokenize(text)
476
- dtk_ln=prev_len-len(tk)
477
- prev_len = len(tk)
478
- self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
479
- self.step_end(f"Comprerssing... [depth {depth+1}]")
480
- depth += 1
481
- if dtk_ln<=10: # it is not sumlmarizing
482
- break
483
- self.step_start(f"Rewriting ...")
484
- text = self.summerize_chunks(
485
- [text],
486
- final_task_instruction,
487
- doc_name, answer_start,
488
- max_generation_size,
489
- callback,
490
- chunk_summary_post_processing=chunk_summary_post_processing
491
- )
492
- self.step_end(f"Rewriting ...")
493
-
494
- return text
495
-
496
- def summerize_chunks(
497
- self,
498
- chunks,
499
- summary_instruction="summerize",
500
- doc_name="chunk",
501
- answer_start="",
502
- max_generation_size=3000,
503
- callback=None,
504
- chunk_summary_post_processing=None,
505
- summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
506
- ):
507
- if summary_mode==SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL:
508
- summary = ""
509
- for i, chunk in enumerate(chunks):
510
- self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
511
- summary = f"{answer_start}"+ self.fast_gen(
512
- "\n".join([
513
- f"!@>Document_chunk: {doc_name}:",
514
- f"{summary}",
515
- f"{chunk}",
516
- f"!@>instruction: {summary_instruction}",
517
- f"Answer directly with the summary with no extra comments.",
518
- f"!@>summary:",
519
- f"{answer_start}"
520
- ]),
521
- max_generation_size=max_generation_size,
522
- callback=callback)
523
- if chunk_summary_post_processing:
524
- summary = chunk_summary_post_processing(summary)
525
- self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
526
- return summary
527
- else:
528
- summeries = []
529
- for i, chunk in enumerate(chunks):
530
- self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
531
- summary = f"{answer_start}"+ self.fast_gen(
532
- "\n".join([
533
- f"!@>Document_chunk: {doc_name}:",
534
- f"{chunk}",
535
- f"!@>instruction: {summary_instruction}",
536
- f"Answer directly with the summary with no extra comments.",
537
- f"!@>summary:",
538
- f"{answer_start}"
539
- ]),
540
- max_generation_size=max_generation_size,
541
- callback=callback)
542
- if chunk_summary_post_processing:
543
- summary = chunk_summary_post_processing(summary)
544
- summeries.append(summary)
545
- self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
546
- return "\n".join(summeries)
547
-
548
- #======================= Function calls
549
- def _upgrade_prompt_with_function_info(self, prompt: str, functions: List[Dict[str, Any]]) -> str:
550
- """
551
- Upgrades the prompt with information about function calls.
552
-
553
- Args:
554
- prompt (str): The original prompt.
555
- functions (List[Dict[str, Any]]): A list of dictionaries describing functions that can be called.
556
-
557
- Returns:
558
- str: The upgraded prompt that includes information about the function calls.
559
- """
560
- function_descriptions = ["!@>information: If you need to call a function to fulfull the user request, use a function markdown tag with the function call as the following json format:",
561
- "```function",
562
- "{",
563
- '"function_name":the name of the function to be called,',
564
- '"function_parameters": a list of parameter values',
565
- "}",
566
- "```",
567
- "You can call multiple functions in one generation.",
568
- "Each function call needs to be in a separate function markdown tag.",
569
- "Do not add status of the execution as it will be added automatically by the system.",
570
- "If you want to get the output of the function before answering the user, then use the keyword @<NEXT>@ at the end of your message.",
571
- "!@>List of possible functions to be called:\n"]
572
- for function in functions:
573
- description = f"{function['function_name']}: {function['function_description']}\nparameters:{function['function_parameters']}"
574
- function_descriptions.append(description)
575
-
576
- # Combine the function descriptions with the original prompt.
577
- function_info = ' '.join(function_descriptions)
578
- upgraded_prompt = f"{function_info}\n{prompt}"
579
-
580
- return upgraded_prompt
581
- def extract_function_calls_as_json(self, text: str) -> List[Dict[str, Any]]:
582
- """
583
- Extracts function calls formatted as JSON inside markdown code blocks.
584
-
585
- Args:
586
- text (str): The generated text containing JSON markdown entries for function calls.
587
-
588
- Returns:
589
- List[Dict[str, Any]]: A list of dictionaries representing the function calls.
590
- """
591
- # Extract markdown code blocks that contain JSON.
592
- code_blocks = self.extract_code_blocks(text)
593
-
594
- # Filter out and parse JSON entries.
595
- function_calls = []
596
- for block in code_blocks:
597
- if block["type"]=="function":
598
- content = block.get("content", "")
599
- try:
600
- # Attempt to parse the JSON content of the code block.
601
- function_call = json.loads(content)
602
- if type(function_call)==dict:
603
- function_calls.append(function_call)
604
- elif type(function_call)==list:
605
- function_calls+=function_call
606
- except json.JSONDecodeError:
607
- # If the content is not valid JSON, skip it.
608
- continue
609
-
610
- return function_calls
611
- def execute_function_calls(self, function_calls: List[Dict[str, Any]], function_definitions: List[Dict[str, Any]]) -> List[Any]:
612
- """
613
- Executes the function calls with the parameters extracted from the generated text,
614
- using the original functions list to find the right function to execute.
615
-
616
- Args:
617
- function_calls (List[Dict[str, Any]]): A list of dictionaries representing the function calls.
618
- function_definitions (List[Dict[str, Any]]): The original list of functions with their descriptions and callable objects.
619
-
620
- Returns:
621
- List[Any]: A list of results from executing the function calls.
622
- """
623
- results = []
624
- # Convert function_definitions to a dict for easier lookup
625
- functions_dict = {func['function_name']: func['function'] for func in function_definitions}
626
-
627
- for call in function_calls:
628
- function_name = call.get("function_name")
629
- parameters = call.get("function_parameters", [])
630
- function = functions_dict.get(function_name)
631
-
632
- if function:
633
- try:
634
- # Assuming parameters is a dictionary that maps directly to the function's arguments.
635
- if type(parameters)==list:
636
- result = function(*parameters)
637
- elif type(parameters)==dict:
638
- result = function(**parameters)
639
- results.append(result)
640
- except TypeError as e:
641
- # Handle cases where the function call fails due to incorrect parameters, etc.
642
- results.append(f"Error calling {function_name}: {e}")
643
- else:
644
- results.append(f"Function {function_name} not found.")
645
-
646
- return results
647
- def generate_with_function_calls(self, prompt: str, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None, callback: Callable[[str,MSG_TYPE],bool]=None) -> List[Dict[str, Any]]:
648
- """
649
- Performs text generation with function calls.
650
-
651
- Args:
652
- prompt (str): The full prompt (including conditioning, user discussion, extra data, and the user prompt).
653
- functions (List[Dict[str, Any]]): A list of dictionaries describing functions that can be called.
654
- max_answer_length (int, optional): Maximum string length allowed for the generated text.
655
-
656
- Returns:
657
- List[Dict[str, Any]]: A list of dictionaries with the function names and parameters to execute.
658
- """
659
- # Upgrade the prompt with information about the function calls.
660
- upgraded_prompt = self._upgrade_prompt_with_function_info(prompt, functions)
661
-
662
- # Generate the initial text based on the upgraded prompt.
663
- generated_text = self.fast_gen(upgraded_prompt, max_answer_length, callback=callback)
664
-
665
- # Extract the function calls from the generated text.
666
- function_calls = self.extract_function_calls_as_json(generated_text)
667
-
668
- return generated_text, function_calls
669
-
670
- def generate_with_function_calls_and_images(self, prompt: str, images:list, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None, callback: Callable[[str,MSG_TYPE],bool]=None) -> List[Dict[str, Any]]:
671
- """
672
- Performs text generation with function calls.
673
-
674
- Args:
675
- prompt (str): The full prompt (including conditioning, user discussion, extra data, and the user prompt).
676
- functions (List[Dict[str, Any]]): A list of dictionaries describing functions that can be called.
677
- max_answer_length (int, optional): Maximum string length allowed for the generated text.
678
-
679
- Returns:
680
- List[Dict[str, Any]]: A list of dictionaries with the function names and parameters to execute.
681
- """
682
- # Upgrade the prompt with information about the function calls.
683
- upgraded_prompt = self._upgrade_prompt_with_function_info(prompt, functions)
684
-
685
- # Generate the initial text based on the upgraded prompt.
686
- generated_text = self.fast_gen_with_images(upgraded_prompt, images, max_answer_length, callback=callback)
687
-
688
- # Extract the function calls from the generated text.
689
- function_calls = self.extract_function_calls_as_json(generated_text)
690
-
691
- return generated_text, function_calls