lollms-client 0.8.2__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -1555,20 +1555,42 @@ Do not split the code in multiple tags.
1555
1555
 
1556
1556
  return cleaned_text
1557
1557
 
1558
- def sequential_summarize(self, text, summary_context="", task="Create final summary using this memory.", format="bullet points", tone="neutral", ctx_size=8192, callback = None):
1558
+ def sequential_summarize(
1559
+ self,
1560
+ text:str,
1561
+ chunk_processing_prompt:str="Extract relevant information from the current text chunk and update the memory if needed.",
1562
+ chunk_processing_output_format="markdown",
1563
+ final_memory_processing_prompt="Create final summary using this memory.",
1564
+ final_output_format="markdown",
1565
+ ctx_size:int=None,
1566
+ chunk_size:int=None,
1567
+ bootstrap_chunk_size:int=None,
1568
+ bootstrap_steps:int=None,
1569
+ callback = None,
1570
+ debug:bool= False):
1559
1571
  """
1560
- Summarizes a long text sequentially by processing chunks and maintaining a memory.
1561
-
1562
- Args:
1563
- text (str): The input text to summarize.
1564
- summary_context (str): Optional context to guide the summarization.
1565
- format (str): Desired format for the final summary (e.g., "bullet points").
1566
- tone (str): Desired tone for the final summary (e.g., "neutral").
1567
- ctx_size (int): Total context window size of the model.
1568
-
1569
- Returns:
1570
- str: The final formatted summary.
1572
+ This function processes a given text in chunks and generates a summary for each chunk.
1573
+ It then combines the summaries to create a final summary.
1574
+
1575
+ Parameters:
1576
+ text (str): The input text to be summarized.
1577
+ chunk_processing_prompt (str, optional): The prompt used for processing each chunk. Defaults to "".
1578
+ chunk_processing_output_format (str, optional): The format of the output for each chunk. Defaults to "markdown".
1579
+ final_memory_processing_prompt (str, optional): The prompt used for processing the final memory. Defaults to "Create final summary using this memory.".
1580
+ final_output_format (str, optional): The format of the final output. Defaults to "markdown".
1581
+ ctx_size (int, optional): The size of the context. Defaults to None.
1582
+ chunk_size (int, optional): The size of each chunk. Defaults to None.
1583
+ callback (callable, optional): A function to be called after processing each chunk. Defaults to None.
1584
+ debug (bool, optional): A flag to enable debug mode. Defaults to False.
1585
+
1586
+ Returns:
1587
+ The final summary in the specified format.
1571
1588
  """
1589
+ if ctx_size is None:
1590
+ ctx_size = self.ctx_size
1591
+
1592
+ if chunk_size is None:
1593
+ chunk_size = ctx_size//4
1572
1594
 
1573
1595
  # Tokenize entire text
1574
1596
  all_tokens = self.tokenize(text)
@@ -1579,21 +1601,46 @@ Do not split the code in multiple tags.
1579
1601
  start_token_idx = 0
1580
1602
 
1581
1603
  # Create static prompt template
1582
- static_prompt_template = f"""!@>instruction:
1583
- Update the summary memory by combining previous memory with key information from this text chunk. {summary_context if summary_context else ''}
1584
- Keep memory concise using bullet points.
1604
+ static_prompt_template = f"""{self.system_full_header}
1605
+ You are a structured sequential text summary assistant that processes documents chunk by chunk, updating a memory of previously generated information at each step.
1585
1606
 
1586
- !@>current memory:
1587
- {{memory}}
1607
+ Your goal is to extract and combine relevant information from each text chunk with the existing memory, ensuring no key details are omitted or invented.
1588
1608
 
1589
- !@>new text chunk:
1609
+ If requested, infer metadata like titles or authors from the content.
1610
+
1611
+ {self.user_full_header}
1612
+ Update the memory by merging previous information with new details from this text chunk.
1613
+ Only add information explicitly present in the chunk. Retain all relevant prior memory unless clarified or updated by the current chunk.
1614
+
1615
+ ----
1616
+ # Text chunk:
1617
+ # Chunk number: {{chunk_id}}
1618
+ ----
1619
+ ```markdown
1590
1620
  {{chunk}}
1621
+ ```
1591
1622
 
1592
- !@>updated memory:
1593
- """
1594
-
1623
+ {{custom_prompt}}
1624
+
1625
+ Before updating, verify each requested detail:
1626
+ 1. Does the chunk explicitly mention the information?
1627
+ 2. Should prior memory be retained, updated, or clarified?
1628
+
1629
+ Include only confirmed details in the output.
1630
+ Rewrite the full memory including the updates and keeping relevant data.
1631
+ Do not discuss the information inside thememory, just put the relevant information without comments.
1632
+
1633
+ ----
1634
+ # Current document analysis memory:
1635
+ ----
1636
+ ```{chunk_processing_output_format}
1637
+ {{memory}}
1638
+ ```
1639
+ {self.ai_full_header}
1640
+ """
1595
1641
  # Calculate static prompt tokens (with empty memory and chunk)
1596
- example_prompt = static_prompt_template.format(memory="", chunk="")
1642
+ chunk_id=0
1643
+ example_prompt = static_prompt_template.format(custom_prompt=chunk_processing_prompt if chunk_processing_prompt else '', memory="", chunk="", chunk_id=chunk_id)
1597
1644
  static_tokens = len(self.tokenize(example_prompt))
1598
1645
 
1599
1646
  # Process text in chunks
@@ -1606,31 +1653,47 @@ Keep memory concise using bullet points.
1606
1653
  raise ValueError("Memory too large - consider reducing chunk size or increasing context window")
1607
1654
 
1608
1655
  # Get chunk tokens
1609
- end_token_idx = min(start_token_idx + available_tokens, total_tokens)
1656
+ if bootstrap_chunk_size is not None and chunk_id < bootstrap_steps:
1657
+ end_token_idx = min(start_token_idx + bootstrap_chunk_size, total_tokens)
1658
+ else:
1659
+ end_token_idx = min(start_token_idx + chunk_size, total_tokens)
1610
1660
  chunk_tokens = all_tokens[start_token_idx:end_token_idx]
1611
1661
  chunk = self.detokenize(chunk_tokens)
1662
+ chunk_id +=1
1612
1663
 
1613
1664
  # Generate memory update
1614
- prompt = static_prompt_template.format(memory=memory, chunk=chunk)
1615
- memory = self.generate(prompt, n_predict=ctx_size//4, streaming_callback=callback).strip()
1665
+ prompt = static_prompt_template.format(custom_prompt=chunk_processing_prompt if chunk_processing_prompt else '', memory=memory, chunk=chunk, chunk_id=chunk_id)
1666
+ if debug:
1667
+ ASCIIColors.yellow(f" ----- {chunk_id-1} ------")
1668
+ ASCIIColors.red(prompt)
1616
1669
 
1670
+ memory = self.generate(prompt, n_predict=ctx_size//4, streaming_callback=callback).strip()
1671
+ code = self.extract_code_blocks(memory)
1672
+ if code:
1673
+ memory=code[0]["content"]
1674
+
1675
+ if debug:
1676
+ ASCIIColors.yellow(f" ----- OUT ------")
1677
+ ASCIIColors.yellow(memory)
1678
+ ASCIIColors.yellow(" ----- ------")
1617
1679
  # Move to next chunk
1618
1680
  start_token_idx = end_token_idx
1619
1681
 
1620
1682
  # Prepare final summary prompt
1621
- final_prompt_template = f"""!@>instruction:
1622
- {task}. Follow these requirements:
1623
- Format: {format}
1624
- Tone: {tone}
1625
-
1626
- !@>memory:
1627
- {{memory}}
1628
-
1629
- !@>summary:
1683
+ final_prompt_template = f"""!@>system:
1684
+ You are a memory summarizer assistant that helps users format their memory information into coherant text in a specific style or format.
1685
+ {final_memory_processing_prompt}.
1686
+ !@>user:
1687
+ Here is my document analysis memory:
1688
+ ```{chunk_processing_output_format}
1689
+ {memory}
1690
+ ```
1691
+ The output must be put inside a {final_output_format} markdown tag.
1692
+ The updated memory must be put in a {chunk_processing_output_format} markdown tag.
1693
+ !@>assistant:
1630
1694
  """
1631
-
1632
1695
  # Truncate memory if needed for final prompt
1633
- example_final_prompt = final_prompt_template.format(memory=memory)
1696
+ example_final_prompt = final_prompt_template
1634
1697
  final_static_tokens = len(self.tokenize(example_final_prompt))
1635
1698
  available_final_tokens = ctx_size - final_static_tokens
1636
1699
 
@@ -1639,8 +1702,12 @@ Tone: {tone}
1639
1702
  memory = self.detokenize(memory_tokens[:available_final_tokens])
1640
1703
 
1641
1704
  # Generate final summary
1642
- final_prompt = final_prompt_template.format(memory=memory)
1643
- return self.generate(final_prompt, streaming_callback=callback)
1705
+ final_prompt = final_prompt_template
1706
+ memory = self.generate(final_prompt, streaming_callback=callback)
1707
+ code = self.extract_code_blocks(memory)
1708
+ if code:
1709
+ memory=code[0]["content"]
1710
+ return memory
1644
1711
 
1645
1712
  def error(self, content, duration:int=4, client_id=None, verbose:bool=True):
1646
1713
  ASCIIColors.error(content)
@@ -0,0 +1,172 @@
1
+ Metadata-Version: 2.1
2
+ Name: lollms_client
3
+ Version: 0.9.0
4
+ Summary: A client library for LoLLMs generate endpoint
5
+ Home-page: https://github.com/ParisNeo/lollms_client
6
+ Author: ParisNeo
7
+ Author-email: parisneoai@gmail.com
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Operating System :: OS Independent
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: requests
14
+
15
+ # lollms_client
16
+
17
+ [![Python Version](https://img.shields.io/pypi/pyversions/lollms-client)](https://pypi.org/project/lollms-client/) [![PyPI Downloads](https://img.shields.io/pypi/dw/lollms-client)](https://pypi.org/project/lollms-client/) [![Apache License](https://img.shields.io/apache/2.0)](https://www.apache.org/licenses/LICENSE-2.0)
18
+
19
+ Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
20
+
21
+ ## Installation
22
+
23
+ To install the library from PyPI using `pip`, run:
24
+
25
+ ```
26
+ pip install lollms-client
27
+ ```
28
+
29
+ ## Getting Started
30
+
31
+ The LollmsClient class is the gateway to interacting with the lollms API. Here's how you can instantiate it in various ways to suit your needs:
32
+
33
+ ```python
34
+ from lollms_client import LollmsClient
35
+
36
+ # Default instantiation using the local lollms service - hosted at http://localhost:9600
37
+ lc = LollmsClient()
38
+
39
+ # Specify a custom host and port
40
+ lc = LollmsClient(host_address="http://some.server:9600")
41
+
42
+ # Use a specific model with a local or remote ollama server
43
+ from lollms_client import ELF_GENERATION_FORMAT
44
+ lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
45
+
46
+ # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
47
+ lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
48
+
49
+ # Use a specific model with an Ollama binding on the server, with a context size of 32800
50
+ lc = LollmsClient(
51
+ host_address="http://some.other.server:11434",
52
+ model_name="phi4:latest",
53
+ ctx_size=32800,
54
+ default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
55
+ )
56
+ ```
57
+
58
+ ### Text Generation
59
+
60
+ Use `generate()` for generating text from the lollms API.
61
+
62
+ ```python
63
+ response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
64
+ print(response)
65
+ ```
66
+
67
+ ### Code Generation
68
+
69
+ The `generate_code()` function allows you to generate code snippets based on your input. Here's how you can use it:
70
+
71
+ ```python
72
+ # A generic case to generate a snippet in python
73
+ response = lc.generate_code(prompt="Create a function to add all numbers of a list", language='python')
74
+ print(response)
75
+
76
+ # generate_code can also be used to generate responses ready to be parsed - with json or yaml for instance
77
+ response = lc.generate_code(prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.", language='json', template="""
78
+ {
79
+ "name":"the first name of the person"
80
+ "family_name":"the family name of the person"
81
+ "age":"the age of the person"
82
+ "appointment_date":"the date of the appointment"
83
+ "reason":"the reason for the appointment. if not specified fill out with 'N/A'"
84
+ }
85
+ """)
86
+ data = json.loads(response)
87
+ print(data['name'], data['family_name'], "- Reason:", data['reason'])
88
+ ```
89
+
90
+
91
+ ### List Mounted Personalities (only on lollms)
92
+
93
+ List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
94
+
95
+ ```python
96
+ response = lc.listMountedPersonalities()
97
+ print(response)
98
+ ```
99
+
100
+ ### List Models
101
+
102
+ List available models of the lollms API with the `listModels()` method.
103
+
104
+ ```python
105
+ response = lc.listModels()
106
+ print(response)
107
+ ```
108
+
109
+ ## Complete Example
110
+
111
+ ```python
112
+ import json
113
+ from datetime import datetime
114
+
115
+ # Assuming LollmsClient is already imported and instantiated as lc
116
+ lc = LollmsClient()
117
+
118
+ # Generate code using the LollmsClient
119
+ response = lc.generate_code(
120
+ prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.",
121
+ language='json',
122
+ template="""
123
+ {
124
+ "name": "the first name of the person",
125
+ "family_name": "the family name of the person",
126
+ "age": "the age of the person",
127
+ "appointment_date": "the date of the appointment in the format DD/MM/YYYY",
128
+ "reason": "the reason for the appointment. if not specified fill out with 'N/A'"
129
+ }
130
+ """
131
+ )
132
+
133
+ # Parse the JSON response
134
+ data = json.loads(response)
135
+
136
+ # Function to validate the data
137
+ def validate_data(data):
138
+ try:
139
+ # Validate age
140
+ if not (0 < int(data['age']) < 120):
141
+ raise ValueError("Invalid age provided.")
142
+
143
+ # Validate appointment date
144
+ appointment_date = datetime.strptime(data['appointment_date'], '%d/%m/%Y')
145
+ if appointment_date < datetime.now():
146
+ raise ValueError("Appointment date cannot be in the past.")
147
+
148
+ # Validate name fields
149
+ if not data['name'] or not data['family_name']:
150
+ raise ValueError("Name fields cannot be empty.")
151
+
152
+ return True
153
+ except Exception as e:
154
+ print(f"Validation Error: {e}")
155
+ return False
156
+
157
+ # Function to simulate a response to the user
158
+ def simulate_response(data):
159
+ if validate_data(data):
160
+ print(f"Appointment confirmed for {data['name']} {data['family_name']}.")
161
+ print(f"Date: {data['appointment_date']}")
162
+ print(f"Reason: {data['reason']}")
163
+ else:
164
+ print("Failed to confirm appointment due to invalid data.")
165
+
166
+ # Execute the simulation
167
+ simulate_response(data)
168
+ ```
169
+
170
+ Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
171
+
172
+ Happy coding!
@@ -1,6 +1,6 @@
1
1
  lollms_client/__init__.py,sha256=_1_zkzDrAs43mf6LEBVZUEYCGZ8KRmj-jd_vgkB8xSw,516
2
2
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
- lollms_client/lollms_core.py,sha256=3-llQuzi7UyuFPGZz7Uvkypg-ECoPmXlqL4_rK_RRXA,81342
3
+ lollms_client/lollms_core.py,sha256=DMYD8giCca6abiD18Iy86B9MiSF187iZJFXGIe88X6A,85090
4
4
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
5
5
  lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
6
6
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
@@ -13,8 +13,8 @@ lollms_client/lollms_tti.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1
13
13
  lollms_client/lollms_tts.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1287
14
14
  lollms_client/lollms_types.py,sha256=uuaADVVfi1sZucY7gT8v-EDN5xrMI3vy_4M7k7Uz3eU,2170
15
15
  lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
16
- lollms_client-0.8.2.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
17
- lollms_client-0.8.2.dist-info/METADATA,sha256=F98bHTKVyLuCaJR0y8kXBOdgBeO_yHmCUXeLWfScoi8,3401
18
- lollms_client-0.8.2.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
19
- lollms_client-0.8.2.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
20
- lollms_client-0.8.2.dist-info/RECORD,,
16
+ lollms_client-0.9.0.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
17
+ lollms_client-0.9.0.dist-info/METADATA,sha256=1gy04xFPMlAtuTwtSfbRE3vcVdu1VowIJdkqjnbYa3o,6400
18
+ lollms_client-0.9.0.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
19
+ lollms_client-0.9.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
20
+ lollms_client-0.9.0.dist-info/RECORD,,
@@ -1,97 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: lollms_client
3
- Version: 0.8.2
4
- Summary: A client library for LoLLMs generate endpoint
5
- Home-page: https://github.com/ParisNeo/lollms_client
6
- Author: ParisNeo
7
- Author-email: parisneoai@gmail.com
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: License :: OSI Approved :: Apache Software License
10
- Classifier: Operating System :: OS Independent
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
13
- Requires-Dist: requests
14
-
15
- # lollms_client
16
-
17
- [![Python Version](https://img.shields.io/pypi/pyversions/lollms-client)](https://pypi.org/project/lollms-client/) [![PyPI Downloads](https://img.shields.io/pypi/dw/lollms-client)](https://pypi.org/project/lollms-client/) [![Apache License](https://img.shields.io/apachie/2.0)](https://www.apache.org/licenses/LICENSE-2.0)
18
-
19
- Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
20
-
21
- ## Installation
22
-
23
- To install the library from PyPI using `pip`, run:
24
-
25
- ```
26
- pip install lollms-client
27
- ```
28
-
29
- ## Usage
30
-
31
- To use the lollms_client, first import the necessary classes:
32
-
33
- ```python
34
- from lollms_client import LollmsClient
35
-
36
- # Initialize the LollmsClient instance this uses the default lollms localhost service http://localhost:9600
37
- lc = LollmsClient()
38
- # You can also use a different host and port number if you please
39
- lc = LollmsClient("http://some.other.server:9600")
40
- # You can also use a local or remote ollama server
41
- lc = LollmsClient(model_name="mistral-nemo:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
42
- # You can also use a local or remote openai server (you can either set your key as an environment variable or pass it here)
43
- lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
44
- ```
45
-
46
- ### Text Generation
47
-
48
- Use `generate()` for generating text from the lollms API.
49
-
50
- ```python
51
- response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
52
- print(response)
53
- ```
54
-
55
-
56
- ### List Mounted Personalities (only on lollms)
57
-
58
- List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
59
-
60
- ```python
61
- response = lc.listMountedPersonalities()
62
- print(response)
63
- ```
64
-
65
- ### List Models
66
-
67
- List available models of the lollms API with the `listModels()` method.
68
-
69
- ```python
70
- response = lc.listModels()
71
- print(response)
72
- ```
73
-
74
- ## Complete Example
75
-
76
- ```python
77
- from lollms_client import LollmsClient
78
-
79
- # Initialize the LollmsClient instance
80
- lc = LollmsClient()
81
-
82
- # Generate Text
83
- response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
84
- print(response)
85
-
86
- # List Mounted Personalities
87
- response = lc.listMountedPersonalities()
88
- print(response)
89
-
90
- # List Models
91
- response = lc.listModels()
92
- print(response)
93
- ```
94
-
95
- Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
96
-
97
- Happy coding!