lollms-client 0.9.1__tar.gz → 0.9.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (25) hide show
  1. {lollms_client-0.9.1 → lollms_client-0.9.2}/PKG-INFO +1 -1
  2. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_core.py +344 -5
  3. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_tasks.py +5 -5
  4. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client.egg-info/PKG-INFO +1 -1
  5. {lollms_client-0.9.1 → lollms_client-0.9.2}/setup.py +1 -1
  6. {lollms_client-0.9.1 → lollms_client-0.9.2}/LICENSE +0 -0
  7. {lollms_client-0.9.1 → lollms_client-0.9.2}/README.md +0 -0
  8. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/__init__.py +0 -0
  9. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_config.py +0 -0
  10. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_discussion.py +0 -0
  11. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_functions.py +0 -0
  12. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_js_analyzer.py +0 -0
  13. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_personality.py +0 -0
  14. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_personality_worker.py +0 -0
  15. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_python_analyzer.py +0 -0
  16. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_stt.py +0 -0
  17. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_tti.py +0 -0
  18. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_tts.py +0 -0
  19. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_types.py +0 -0
  20. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client/lollms_utilities.py +0 -0
  21. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client.egg-info/SOURCES.txt +0 -0
  22. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client.egg-info/dependency_links.txt +0 -0
  23. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client.egg-info/requires.txt +0 -0
  24. {lollms_client-0.9.1 → lollms_client-0.9.2}/lollms_client.egg-info/top_level.txt +0 -0
  25. {lollms_client-0.9.1 → lollms_client-0.9.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lollms_client
3
- Version: 0.9.1
3
+ Version: 0.9.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Home-page: https://github.com/ParisNeo/lollms_client
6
6
  Author: ParisNeo
@@ -36,7 +36,9 @@ class ELF_GENERATION_FORMAT(Enum):
36
36
  return format_mapping[format_string.upper()]
37
37
  except KeyError:
38
38
  raise ValueError(f"Invalid format string: {format_string}. Must be one of {list(format_mapping.keys())}.")
39
-
39
+
40
+ def __str__(self):
41
+ return self.name
40
42
  class ELF_COMPLETION_FORMAT(Enum):
41
43
  Instruct = 0
42
44
  Chat = 1
@@ -51,6 +53,9 @@ class ELF_COMPLETION_FORMAT(Enum):
51
53
  return format_mapping[format_string.upper()]
52
54
  except KeyError:
53
55
  raise ValueError(f"Invalid format string: {format_string}. Must be one of {list(format_mapping.keys())}.")
56
+
57
+ def __str__(self):
58
+ return self.name
54
59
 
55
60
  class LollmsClient():
56
61
  def __init__(
@@ -155,23 +160,31 @@ class LollmsClient():
155
160
  def system_full_header(self) -> str:
156
161
  """Get the start_header_id_template."""
157
162
  return f"{self.start_header_id_template}{self.system_message_template}{self.end_header_id_template}"
163
+
164
+ def system_custom_header(self, ai_name) -> str:
165
+ """Get the start_header_id_template."""
166
+ return f"{self.start_header_id_template}{ai_name}{self.end_header_id_template}"
167
+
158
168
  @property
159
169
  def user_full_header(self) -> str:
160
170
  """Get the start_header_id_template."""
161
171
  return f"{self.start_user_header_id_template}{self.user_name}{self.end_user_header_id_template}"
172
+
173
+ def user_custom_header(self, user_name="user") -> str:
174
+ """Get the start_header_id_template."""
175
+ return f"{self.start_user_header_id_template}{user_name}{self.end_user_header_id_template}"
176
+
162
177
  @property
163
178
  def ai_full_header(self) -> str:
164
179
  """Get the start_header_id_template."""
165
180
  return f"{self.start_ai_header_id_template}{self.ai_name}{self.end_ai_header_id_template}"
166
181
 
167
- def system_custom_header(self, ai_name) -> str:
168
- """Get the start_header_id_template."""
169
- return f"{self.start_header_id_template}{ai_name}{self.end_header_id_template}"
170
-
171
182
  def ai_custom_header(self, ai_name) -> str:
172
183
  """Get the start_header_id_template."""
173
184
  return f"{self.start_ai_header_id_template}{ai_name}{self.end_ai_header_id_template}"
174
185
 
186
+ def sink(self, s=None,i=None,d=None):
187
+ pass
175
188
 
176
189
  def tokenize(self, prompt:str):
177
190
  """
@@ -1582,6 +1595,182 @@ Do not split the code in multiple tags.
1582
1595
 
1583
1596
  return cleaned_text
1584
1597
 
1598
+ def yes_no(
1599
+ self,
1600
+ question: str,
1601
+ context: str = "",
1602
+ max_answer_length: int = None,
1603
+ conditionning: str = "",
1604
+ return_explanation: bool = False,
1605
+ callback = None
1606
+ ) -> bool | dict:
1607
+ """
1608
+ Answers a yes/no question.
1609
+
1610
+ Args:
1611
+ question (str): The yes/no question to answer.
1612
+ context (str, optional): Additional context to provide for the question.
1613
+ max_answer_length (int, optional): Maximum string length allowed for the response. Defaults to None.
1614
+ conditionning (str, optional): An optional system message to put at the beginning of the prompt.
1615
+ return_explanation (bool, optional): If True, returns a dictionary with the answer and explanation. Defaults to False.
1616
+
1617
+ Returns:
1618
+ bool or dict:
1619
+ - If return_explanation is False, returns a boolean (True for 'yes', False for 'no').
1620
+ - If return_explanation is True, returns a dictionary with the answer and explanation.
1621
+ """
1622
+ if not callback:
1623
+ callback=self.sink
1624
+
1625
+ prompt = f"{conditionning}\nQuestion: {question}\nContext: {context}\n"
1626
+
1627
+ template = """
1628
+ {
1629
+ "answer": true | false,
1630
+ "explanation": "Optional explanation if return_explanation is True"
1631
+ }
1632
+ """
1633
+
1634
+ response = self.generate_code(
1635
+ prompt=prompt,
1636
+ template=template,
1637
+ language="json",
1638
+ code_tag_format="markdown",
1639
+ max_size=max_answer_length,
1640
+ callback=callback
1641
+ )
1642
+
1643
+ try:
1644
+ parsed_response = json.loads(response)
1645
+ answer = parsed_response.get("answer", False)
1646
+ explanation = parsed_response.get("explanation", "")
1647
+
1648
+ if return_explanation:
1649
+ return {"answer": answer, "explanation": explanation}
1650
+ else:
1651
+ return answer
1652
+ except json.JSONDecodeError:
1653
+ return False
1654
+
1655
+ def multichoice_question(
1656
+ self,
1657
+ question: str,
1658
+ possible_answers: list,
1659
+ context: str = "",
1660
+ max_answer_length: int = None,
1661
+ conditionning: str = "",
1662
+ return_explanation: bool = False,
1663
+ callback = None
1664
+ ) -> dict:
1665
+ """
1666
+ Interprets a multi-choice question from a user's response. This function expects only one choice as true.
1667
+ All other choices are considered false. If none are correct, returns -1.
1668
+
1669
+ Args:
1670
+ question (str): The multi-choice question posed by the user.
1671
+ possible_answers (List[Any]): A list containing all valid options for the chosen value.
1672
+ context (str, optional): Additional context to provide for the question.
1673
+ max_answer_length (int, optional): Maximum string length allowed while interpreting the user's responses. Defaults to None.
1674
+ conditionning (str, optional): An optional system message to put at the beginning of the prompt.
1675
+ return_explanation (bool, optional): If True, returns a dictionary with the choice and explanation. Defaults to False.
1676
+
1677
+ Returns:
1678
+ dict:
1679
+ - If return_explanation is False, returns a JSON object with only the selected choice index.
1680
+ - If return_explanation is True, returns a JSON object with the selected choice index and an explanation.
1681
+ - Returns {"index": -1} if no match is found among the possible answers.
1682
+ """
1683
+ if not callback:
1684
+ callback=self.sink
1685
+
1686
+ prompt = f"""
1687
+ {conditionning}\n
1688
+ QUESTION:\n{question}\n
1689
+ POSSIBLE ANSWERS:\n"""
1690
+ for i, answer in enumerate(possible_answers):
1691
+ prompt += f"{i}. {answer}\n"
1692
+
1693
+ if context:
1694
+ prompt += f"\nADDITIONAL CONTEXT:\n{context}\n"
1695
+
1696
+ prompt += "\nRespond with a JSON object containing:\n"
1697
+ if return_explanation:
1698
+ prompt += "{\"index\": (the selected answer index), \"explanation\": (reasoning for selection)}"
1699
+ else:
1700
+ prompt += "{\"index\": (the selected answer index)}"
1701
+
1702
+ response = self.generate_code(prompt, language="json", max_size=max_answer_length,
1703
+ accept_all_if_no_code_tags_is_present=True, return_full_generated_code=False, callback=callback)
1704
+
1705
+ try:
1706
+ result = json.loads(response)
1707
+ if return_explanation:
1708
+ if "index" in result and isinstance(result["index"], int):
1709
+ return result["index"], result["index"]
1710
+ else:
1711
+ if "index" in result and isinstance(result["index"], int):
1712
+ return result["index"]
1713
+ except json.JSONDecodeError:
1714
+ if return_explanation:
1715
+ return -1, "failed to decide"
1716
+ else:
1717
+ return -1
1718
+
1719
+ def multichoice_ranking(
1720
+ self,
1721
+ question: str,
1722
+ possible_answers: list,
1723
+ context: str = "",
1724
+ max_answer_length: int = 512,
1725
+ conditionning: str = "",
1726
+ return_explanation: bool = False,
1727
+ callback = None
1728
+ ) -> dict:
1729
+ """
1730
+ Ranks answers for a question from best to worst. Returns a JSON object containing the ranked order.
1731
+
1732
+ Args:
1733
+ question (str): The question for which the answers are being ranked.
1734
+ possible_answers (List[Any]): A list of possible answers to rank.
1735
+ context (str, optional): Additional context to provide for the question.
1736
+ max_answer_length (int, optional): Maximum string length allowed for the response. Defaults to 50.
1737
+ conditionning (str, optional): An optional system message to put at the beginning of the prompt.
1738
+ return_explanation (bool, optional): If True, returns a dictionary with the ranked order and explanations. Defaults to False.
1739
+
1740
+ Returns:
1741
+ dict:
1742
+ - If return_explanation is False, returns a JSON object with only the ranked order.
1743
+ - If return_explanation is True, returns a JSON object with the ranked order and explanations.
1744
+ """
1745
+ if not callback:
1746
+ callback=self.sink
1747
+
1748
+ prompt = f"""
1749
+ {conditionning}\n
1750
+ QUESTION:\n{question}\n
1751
+ POSSIBLE ANSWERS:\n"""
1752
+ for i, answer in enumerate(possible_answers):
1753
+ prompt += f"{i}. {answer}\n"
1754
+
1755
+ if context:
1756
+ prompt += f"\nADDITIONAL CONTEXT:\n{context}\n"
1757
+
1758
+ prompt += "\nRespond with a JSON object containing:\n"
1759
+ if return_explanation:
1760
+ prompt += "{\"ranking\": (list of indices ordered from best to worst), \"explanations\": (list of reasons for each ranking)}"
1761
+ else:
1762
+ prompt += "{\"ranking\": (list of indices ordered from best to worst)}"
1763
+
1764
+ response = self.generate_code(prompt, language="json", return_full_generated_code=False, callback=callback)
1765
+
1766
+ try:
1767
+ result = json.loads(response)
1768
+ if "ranking" in result and isinstance(result["ranking"], list):
1769
+ return result
1770
+ except json.JSONDecodeError:
1771
+ return {"ranking": []}
1772
+
1773
+
1585
1774
  def sequential_summarize(
1586
1775
  self,
1587
1776
  text:str,
@@ -1736,6 +1925,156 @@ The updated memory must be put in a {chunk_processing_output_format} markdown ta
1736
1925
  memory=code[0]["content"]
1737
1926
  return memory
1738
1927
 
1928
+ def deepsearch(
1929
+ self,
1930
+ query: str,
1931
+ text: str = None,
1932
+ files: list = None,
1933
+ search_prompt: str = "Extract information related to the query from the current text chunk and update the memory with new findings.",
1934
+ aggregation_prompt: str = None,
1935
+ output_format: str = "markdown",
1936
+ ctx_size: int = None,
1937
+ chunk_size: int = None,
1938
+ bootstrap_chunk_size: int = None,
1939
+ bootstrap_steps: int = None,
1940
+ callback=None,
1941
+ debug: bool = False
1942
+ ):
1943
+ """
1944
+ Searches for specific information related to a query in a long text or a list of files.
1945
+ Processes the input in chunks, updates a memory with relevant findings, and optionally aggregates them.
1946
+
1947
+ Parameters:
1948
+ - query (str): The query to search for.
1949
+ - text (str, optional): The input text to search in. Defaults to None.
1950
+ - files (list, optional): List of file paths to search in. Defaults to None.
1951
+ - search_prompt (str, optional): Prompt for processing each chunk. Defaults to a standard extraction prompt.
1952
+ - aggregation_prompt (str, optional): Prompt for aggregating findings. Defaults to None.
1953
+ - output_format (str, optional): Output format. Defaults to "markdown".
1954
+ - ctx_size (int, optional): Context size for the model. Defaults to None (uses self.ctx_size).
1955
+ - chunk_size (int, optional): Size of each chunk. Defaults to None (ctx_size // 4). Smaller chunk sizes yield better results but is slower.
1956
+ - bootstrap_chunk_size (int, optional): Size for initial chunks. Defaults to None.
1957
+ - bootstrap_steps (int, optional): Number of initial chunks using bootstrap size. Defaults to None.
1958
+ - callback (callable, optional): Function called after each chunk. Defaults to None.
1959
+ - debug (bool, optional): Enable debug output. Defaults to False.
1960
+
1961
+ Returns:
1962
+ - str: The search findings or aggregated output in the specified format.
1963
+ """
1964
+ # Set defaults
1965
+ if ctx_size is None:
1966
+ ctx_size = self.ctx_size
1967
+ if chunk_size is None:
1968
+ chunk_size = ctx_size // 4
1969
+
1970
+ # Prepare input
1971
+ if files:
1972
+ all_texts = [(file, open(file, 'r', encoding='utf-8').read()) for file in files]
1973
+ elif text:
1974
+ all_texts = [("input_text", text)]
1975
+ else:
1976
+ raise ValueError("Either text or files must be provided.")
1977
+
1978
+ # Initialize memory and chunk counter
1979
+ memory = ""
1980
+ chunk_id = 0
1981
+
1982
+ # Define search prompt template using f-string and the provided search_prompt
1983
+ search_prompt_template = f"""{self.system_full_header}
1984
+ You are a search assistant that processes documents chunk by chunk to find information related to a query, updating a memory of findings at each step.
1985
+
1986
+ Your goal is to extract and combine relevant information from each text chunk with the existing memory, ensuring no key details are omitted or invented.
1987
+
1988
+
1989
+ ----
1990
+ # Chunk number: {{chunk_id}}
1991
+ # Text chunk:
1992
+ ```markdown
1993
+ {{chunk}}
1994
+ ```
1995
+
1996
+ Current findings memory:
1997
+ ```markdown
1998
+ {{memory}}
1999
+ ```
2000
+ {self.user_full_header}
2001
+ Query: '{query}'
2002
+ Task: {search_prompt}
2003
+
2004
+ Update the memory by adding new relevant information from this chunk. Retain all prior findings unless contradicted or updated. Only include explicitly relevant details.
2005
+ Make sure to extrafct only information relevant to be able to answer the query of the user or at least gives important contextual information that can be completed to answer the user query.
2006
+ {self.ai_full_header}
2007
+ """
2008
+
2009
+ # Calculate static prompt tokens
2010
+ example_prompt = search_prompt_template.replace("{{chunk_id}}", "0")\
2011
+ .replace("{{memory}}", "")\
2012
+ .replace("{{chunk}}", "")
2013
+ static_tokens = len(self.tokenize(example_prompt))
2014
+
2015
+ # Process each text (file or input)
2016
+ for file_name, file_text in all_texts:
2017
+ file_tokens = self.tokenize(file_text)
2018
+ start_token_idx = 0
2019
+
2020
+ while start_token_idx < len(file_tokens):
2021
+ # Calculate available tokens
2022
+ current_memory_tokens = len(self.tokenize(memory))
2023
+ available_tokens = ctx_size - static_tokens - current_memory_tokens
2024
+ if available_tokens <= 0:
2025
+ raise ValueError("Memory too large - consider reducing chunk size or increasing context window")
2026
+
2027
+ # Adjust chunk size
2028
+ actual_chunk_size = (
2029
+ min(bootstrap_chunk_size, available_tokens)
2030
+ if bootstrap_chunk_size is not None and bootstrap_steps is not None and chunk_id < bootstrap_steps
2031
+ else min(chunk_size, available_tokens)
2032
+ )
2033
+
2034
+ end_token_idx = min(start_token_idx + actual_chunk_size, len(file_tokens))
2035
+ chunk_tokens = file_tokens[start_token_idx:end_token_idx]
2036
+ chunk = self.detokenize(chunk_tokens)
2037
+
2038
+ # Generate updated memory
2039
+ prompt = search_prompt_template.replace("{chunk_id}", str(chunk_id))\
2040
+ .replace("{memory}", memory)\
2041
+ .replace("{chunk}", chunk)
2042
+ if debug:
2043
+ print(f"----- Chunk {chunk_id} from {file_name} ------")
2044
+ print(prompt)
2045
+
2046
+ output = self.generate(prompt, n_predict=ctx_size // 4, streaming_callback=callback).strip()
2047
+ code = self.extract_code_blocks(output)
2048
+ memory = code[0]["content"] if code else output
2049
+
2050
+ if debug:
2051
+ print("----- Updated Memory ------")
2052
+ print(memory)
2053
+ print("---------------------------")
2054
+
2055
+ start_token_idx = end_token_idx
2056
+ chunk_id += 1
2057
+
2058
+ # Aggregate findings if requested
2059
+ if aggregation_prompt:
2060
+ final_prompt = f"""{self.system_full_header}
2061
+ You are a search results aggregator.
2062
+
2063
+ {self.user_full_header}
2064
+ {aggregation_prompt}
2065
+
2066
+ Collected findings:
2067
+ ```markdown
2068
+ {memory}
2069
+ ```
2070
+
2071
+ Provide the final output in {output_format} format.
2072
+ {self.ai_full_header}
2073
+ """
2074
+ final_output = self.generate(final_prompt, streaming_callback=callback)
2075
+ code = self.extract_code_blocks(final_output)
2076
+ return code[0]["content"] if code else final_output
2077
+ return memory
1739
2078
  def error(self, content, duration:int=4, client_id=None, verbose:bool=True):
1740
2079
  ASCIIColors.error(content)
1741
2080
 
@@ -435,12 +435,12 @@ class TasksLibrary:
435
435
  choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
436
436
  elements = [conditionning] if conditionning!="" else []
437
437
  elements += [
438
- "!@>system:",
438
+ self.lollms.system_full_header,
439
439
  "Answer this multi choices question.",
440
440
  ]
441
441
  if context!="":
442
442
  elements+=[
443
- "!@>Context:",
443
+ self.lollms.system_custom_header("Context"),
444
444
  f"{context}",
445
445
  ]
446
446
  elements +=[
@@ -450,11 +450,11 @@ class TasksLibrary:
450
450
  "the output should be an integer."
451
451
  ]
452
452
  elements += [
453
- f"!@>question: {question}",
454
- "!@>possible answers:",
453
+ f'{self.lollms.user_custom_header("question")} {question}',
454
+ f'{self.lollms.user_custom_header("possible answers")}',
455
455
  f"{choices}",
456
456
  ]
457
- elements += ["!@>answer:"]
457
+ elements += [self.lollms.ai_custom_header("answer")]
458
458
  prompt = self.build_prompt(elements)
459
459
 
460
460
  gen = self.lollms.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50, streaming_callback=self.sink).strip().replace("</s>","").replace("<s>","")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lollms_client
3
- Version: 0.9.1
3
+ Version: 0.9.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Home-page: https://github.com/ParisNeo/lollms_client
6
6
  Author: ParisNeo
@@ -8,7 +8,7 @@ with open('requirements.txt', 'r') as f:
8
8
 
9
9
  setuptools.setup(
10
10
  name="lollms_client",
11
- version="0.9.1",
11
+ version="0.9.2",
12
12
  author="ParisNeo",
13
13
  author_email="parisneoai@gmail.com",
14
14
  description="A client library for LoLLMs generate endpoint",
File without changes
File without changes
File without changes