lollms-client 0.8.2__tar.gz → 0.9.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client-0.9.0/PKG-INFO +172 -0
- lollms_client-0.9.0/README.md +158 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_core.py +105 -38
- lollms_client-0.9.0/lollms_client.egg-info/PKG-INFO +172 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/setup.py +1 -1
- lollms_client-0.8.2/PKG-INFO +0 -97
- lollms_client-0.8.2/README.md +0 -83
- lollms_client-0.8.2/lollms_client.egg-info/PKG-INFO +0 -97
- {lollms_client-0.8.2 → lollms_client-0.9.0}/LICENSE +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/__init__.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_config.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_functions.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_personality.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_personality_worker.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_stt.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_tasks.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_tti.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_tts.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_types.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client.egg-info/SOURCES.txt +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-0.8.2 → lollms_client-0.9.0}/setup.cfg +0 -0
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: lollms_client
|
|
3
|
+
Version: 0.9.0
|
|
4
|
+
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
+
Home-page: https://github.com/ParisNeo/lollms_client
|
|
6
|
+
Author: ParisNeo
|
|
7
|
+
Author-email: parisneoai@gmail.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Requires-Dist: requests
|
|
14
|
+
|
|
15
|
+
# lollms_client
|
|
16
|
+
|
|
17
|
+
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
18
|
+
|
|
19
|
+
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
To install the library from PyPI using `pip`, run:
|
|
24
|
+
|
|
25
|
+
```
|
|
26
|
+
pip install lollms-client
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Getting Started
|
|
30
|
+
|
|
31
|
+
The LollmsClient class is the gateway to interacting with the lollms API. Here's how you can instantiate it in various ways to suit your needs:
|
|
32
|
+
|
|
33
|
+
```python
|
|
34
|
+
from lollms_client import LollmsClient
|
|
35
|
+
|
|
36
|
+
# Default instantiation using the local lollms service - hosted at http://localhost:9600
|
|
37
|
+
lc = LollmsClient()
|
|
38
|
+
|
|
39
|
+
# Specify a custom host and port
|
|
40
|
+
lc = LollmsClient(host_address="http://some.server:9600")
|
|
41
|
+
|
|
42
|
+
# Use a specific model with a local or remote ollama server
|
|
43
|
+
from lollms_client import ELF_GENERATION_FORMAT
|
|
44
|
+
lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
45
|
+
|
|
46
|
+
# Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
|
|
47
|
+
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
48
|
+
|
|
49
|
+
# Use a specific model with an Ollama binding on the server, with a context size of 32800
|
|
50
|
+
lc = LollmsClient(
|
|
51
|
+
host_address="http://some.other.server:11434",
|
|
52
|
+
model_name="phi4:latest",
|
|
53
|
+
ctx_size=32800,
|
|
54
|
+
default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
|
|
55
|
+
)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Text Generation
|
|
59
|
+
|
|
60
|
+
Use `generate()` for generating text from the lollms API.
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
64
|
+
print(response)
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Code Generation
|
|
68
|
+
|
|
69
|
+
The `generate_code()` function allows you to generate code snippets based on your input. Here's how you can use it:
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
# A generic case to generate a snippet in python
|
|
73
|
+
response = lc.generate_code(prompt="Create a function to add all numbers of a list", language='python')
|
|
74
|
+
print(response)
|
|
75
|
+
|
|
76
|
+
# generate_code can also be used to generate responses ready to be parsed - with json or yaml for instance
|
|
77
|
+
response = lc.generate_code(prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.", language='json', template="""
|
|
78
|
+
{
|
|
79
|
+
"name":"the first name of the person"
|
|
80
|
+
"family_name":"the family name of the person"
|
|
81
|
+
"age":"the age of the person"
|
|
82
|
+
"appointment_date":"the date of the appointment"
|
|
83
|
+
"reason":"the reason for the appointment. if not specified fill out with 'N/A'"
|
|
84
|
+
}
|
|
85
|
+
""")
|
|
86
|
+
data = json.loads(response)
|
|
87
|
+
print(data['name'], data['family_name'], "- Reason:", data['reason'])
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
### List Mounted Personalities (only on lollms)
|
|
92
|
+
|
|
93
|
+
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
response = lc.listMountedPersonalities()
|
|
97
|
+
print(response)
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### List Models
|
|
101
|
+
|
|
102
|
+
List available models of the lollms API with the `listModels()` method.
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
response = lc.listModels()
|
|
106
|
+
print(response)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Complete Example
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
import json
|
|
113
|
+
from datetime import datetime
|
|
114
|
+
|
|
115
|
+
# Assuming LollmsClient is already imported and instantiated as lc
|
|
116
|
+
lc = LollmsClient()
|
|
117
|
+
|
|
118
|
+
# Generate code using the LollmsClient
|
|
119
|
+
response = lc.generate_code(
|
|
120
|
+
prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.",
|
|
121
|
+
language='json',
|
|
122
|
+
template="""
|
|
123
|
+
{
|
|
124
|
+
"name": "the first name of the person",
|
|
125
|
+
"family_name": "the family name of the person",
|
|
126
|
+
"age": "the age of the person",
|
|
127
|
+
"appointment_date": "the date of the appointment in the format DD/MM/YYYY",
|
|
128
|
+
"reason": "the reason for the appointment. if not specified fill out with 'N/A'"
|
|
129
|
+
}
|
|
130
|
+
"""
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Parse the JSON response
|
|
134
|
+
data = json.loads(response)
|
|
135
|
+
|
|
136
|
+
# Function to validate the data
|
|
137
|
+
def validate_data(data):
|
|
138
|
+
try:
|
|
139
|
+
# Validate age
|
|
140
|
+
if not (0 < int(data['age']) < 120):
|
|
141
|
+
raise ValueError("Invalid age provided.")
|
|
142
|
+
|
|
143
|
+
# Validate appointment date
|
|
144
|
+
appointment_date = datetime.strptime(data['appointment_date'], '%d/%m/%Y')
|
|
145
|
+
if appointment_date < datetime.now():
|
|
146
|
+
raise ValueError("Appointment date cannot be in the past.")
|
|
147
|
+
|
|
148
|
+
# Validate name fields
|
|
149
|
+
if not data['name'] or not data['family_name']:
|
|
150
|
+
raise ValueError("Name fields cannot be empty.")
|
|
151
|
+
|
|
152
|
+
return True
|
|
153
|
+
except Exception as e:
|
|
154
|
+
print(f"Validation Error: {e}")
|
|
155
|
+
return False
|
|
156
|
+
|
|
157
|
+
# Function to simulate a response to the user
|
|
158
|
+
def simulate_response(data):
|
|
159
|
+
if validate_data(data):
|
|
160
|
+
print(f"Appointment confirmed for {data['name']} {data['family_name']}.")
|
|
161
|
+
print(f"Date: {data['appointment_date']}")
|
|
162
|
+
print(f"Reason: {data['reason']}")
|
|
163
|
+
else:
|
|
164
|
+
print("Failed to confirm appointment due to invalid data.")
|
|
165
|
+
|
|
166
|
+
# Execute the simulation
|
|
167
|
+
simulate_response(data)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
171
|
+
|
|
172
|
+
Happy coding!
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
# lollms_client
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
4
|
+
|
|
5
|
+
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
To install the library from PyPI using `pip`, run:
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
pip install lollms-client
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Getting Started
|
|
16
|
+
|
|
17
|
+
The LollmsClient class is the gateway to interacting with the lollms API. Here's how you can instantiate it in various ways to suit your needs:
|
|
18
|
+
|
|
19
|
+
```python
|
|
20
|
+
from lollms_client import LollmsClient
|
|
21
|
+
|
|
22
|
+
# Default instantiation using the local lollms service - hosted at http://localhost:9600
|
|
23
|
+
lc = LollmsClient()
|
|
24
|
+
|
|
25
|
+
# Specify a custom host and port
|
|
26
|
+
lc = LollmsClient(host_address="http://some.server:9600")
|
|
27
|
+
|
|
28
|
+
# Use a specific model with a local or remote ollama server
|
|
29
|
+
from lollms_client import ELF_GENERATION_FORMAT
|
|
30
|
+
lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
31
|
+
|
|
32
|
+
# Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
|
|
33
|
+
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
34
|
+
|
|
35
|
+
# Use a specific model with an Ollama binding on the server, with a context size of 32800
|
|
36
|
+
lc = LollmsClient(
|
|
37
|
+
host_address="http://some.other.server:11434",
|
|
38
|
+
model_name="phi4:latest",
|
|
39
|
+
ctx_size=32800,
|
|
40
|
+
default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
|
|
41
|
+
)
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### Text Generation
|
|
45
|
+
|
|
46
|
+
Use `generate()` for generating text from the lollms API.
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
50
|
+
print(response)
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
### Code Generation
|
|
54
|
+
|
|
55
|
+
The `generate_code()` function allows you to generate code snippets based on your input. Here's how you can use it:
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
# A generic case to generate a snippet in python
|
|
59
|
+
response = lc.generate_code(prompt="Create a function to add all numbers of a list", language='python')
|
|
60
|
+
print(response)
|
|
61
|
+
|
|
62
|
+
# generate_code can also be used to generate responses ready to be parsed - with json or yaml for instance
|
|
63
|
+
response = lc.generate_code(prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.", language='json', template="""
|
|
64
|
+
{
|
|
65
|
+
"name":"the first name of the person"
|
|
66
|
+
"family_name":"the family name of the person"
|
|
67
|
+
"age":"the age of the person"
|
|
68
|
+
"appointment_date":"the date of the appointment"
|
|
69
|
+
"reason":"the reason for the appointment. if not specified fill out with 'N/A'"
|
|
70
|
+
}
|
|
71
|
+
""")
|
|
72
|
+
data = json.loads(response)
|
|
73
|
+
print(data['name'], data['family_name'], "- Reason:", data['reason'])
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
### List Mounted Personalities (only on lollms)
|
|
78
|
+
|
|
79
|
+
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
response = lc.listMountedPersonalities()
|
|
83
|
+
print(response)
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### List Models
|
|
87
|
+
|
|
88
|
+
List available models of the lollms API with the `listModels()` method.
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
response = lc.listModels()
|
|
92
|
+
print(response)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Complete Example
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
import json
|
|
99
|
+
from datetime import datetime
|
|
100
|
+
|
|
101
|
+
# Assuming LollmsClient is already imported and instantiated as lc
|
|
102
|
+
lc = LollmsClient()
|
|
103
|
+
|
|
104
|
+
# Generate code using the LollmsClient
|
|
105
|
+
response = lc.generate_code(
|
|
106
|
+
prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.",
|
|
107
|
+
language='json',
|
|
108
|
+
template="""
|
|
109
|
+
{
|
|
110
|
+
"name": "the first name of the person",
|
|
111
|
+
"family_name": "the family name of the person",
|
|
112
|
+
"age": "the age of the person",
|
|
113
|
+
"appointment_date": "the date of the appointment in the format DD/MM/YYYY",
|
|
114
|
+
"reason": "the reason for the appointment. if not specified fill out with 'N/A'"
|
|
115
|
+
}
|
|
116
|
+
"""
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Parse the JSON response
|
|
120
|
+
data = json.loads(response)
|
|
121
|
+
|
|
122
|
+
# Function to validate the data
|
|
123
|
+
def validate_data(data):
|
|
124
|
+
try:
|
|
125
|
+
# Validate age
|
|
126
|
+
if not (0 < int(data['age']) < 120):
|
|
127
|
+
raise ValueError("Invalid age provided.")
|
|
128
|
+
|
|
129
|
+
# Validate appointment date
|
|
130
|
+
appointment_date = datetime.strptime(data['appointment_date'], '%d/%m/%Y')
|
|
131
|
+
if appointment_date < datetime.now():
|
|
132
|
+
raise ValueError("Appointment date cannot be in the past.")
|
|
133
|
+
|
|
134
|
+
# Validate name fields
|
|
135
|
+
if not data['name'] or not data['family_name']:
|
|
136
|
+
raise ValueError("Name fields cannot be empty.")
|
|
137
|
+
|
|
138
|
+
return True
|
|
139
|
+
except Exception as e:
|
|
140
|
+
print(f"Validation Error: {e}")
|
|
141
|
+
return False
|
|
142
|
+
|
|
143
|
+
# Function to simulate a response to the user
|
|
144
|
+
def simulate_response(data):
|
|
145
|
+
if validate_data(data):
|
|
146
|
+
print(f"Appointment confirmed for {data['name']} {data['family_name']}.")
|
|
147
|
+
print(f"Date: {data['appointment_date']}")
|
|
148
|
+
print(f"Reason: {data['reason']}")
|
|
149
|
+
else:
|
|
150
|
+
print("Failed to confirm appointment due to invalid data.")
|
|
151
|
+
|
|
152
|
+
# Execute the simulation
|
|
153
|
+
simulate_response(data)
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
157
|
+
|
|
158
|
+
Happy coding!
|
|
@@ -1555,20 +1555,42 @@ Do not split the code in multiple tags.
|
|
|
1555
1555
|
|
|
1556
1556
|
return cleaned_text
|
|
1557
1557
|
|
|
1558
|
-
def sequential_summarize(
|
|
1558
|
+
def sequential_summarize(
|
|
1559
|
+
self,
|
|
1560
|
+
text:str,
|
|
1561
|
+
chunk_processing_prompt:str="Extract relevant information from the current text chunk and update the memory if needed.",
|
|
1562
|
+
chunk_processing_output_format="markdown",
|
|
1563
|
+
final_memory_processing_prompt="Create final summary using this memory.",
|
|
1564
|
+
final_output_format="markdown",
|
|
1565
|
+
ctx_size:int=None,
|
|
1566
|
+
chunk_size:int=None,
|
|
1567
|
+
bootstrap_chunk_size:int=None,
|
|
1568
|
+
bootstrap_steps:int=None,
|
|
1569
|
+
callback = None,
|
|
1570
|
+
debug:bool= False):
|
|
1559
1571
|
"""
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1572
|
+
This function processes a given text in chunks and generates a summary for each chunk.
|
|
1573
|
+
It then combines the summaries to create a final summary.
|
|
1574
|
+
|
|
1575
|
+
Parameters:
|
|
1576
|
+
text (str): The input text to be summarized.
|
|
1577
|
+
chunk_processing_prompt (str, optional): The prompt used for processing each chunk. Defaults to "".
|
|
1578
|
+
chunk_processing_output_format (str, optional): The format of the output for each chunk. Defaults to "markdown".
|
|
1579
|
+
final_memory_processing_prompt (str, optional): The prompt used for processing the final memory. Defaults to "Create final summary using this memory.".
|
|
1580
|
+
final_output_format (str, optional): The format of the final output. Defaults to "markdown".
|
|
1581
|
+
ctx_size (int, optional): The size of the context. Defaults to None.
|
|
1582
|
+
chunk_size (int, optional): The size of each chunk. Defaults to None.
|
|
1583
|
+
callback (callable, optional): A function to be called after processing each chunk. Defaults to None.
|
|
1584
|
+
debug (bool, optional): A flag to enable debug mode. Defaults to False.
|
|
1585
|
+
|
|
1586
|
+
Returns:
|
|
1587
|
+
The final summary in the specified format.
|
|
1571
1588
|
"""
|
|
1589
|
+
if ctx_size is None:
|
|
1590
|
+
ctx_size = self.ctx_size
|
|
1591
|
+
|
|
1592
|
+
if chunk_size is None:
|
|
1593
|
+
chunk_size = ctx_size//4
|
|
1572
1594
|
|
|
1573
1595
|
# Tokenize entire text
|
|
1574
1596
|
all_tokens = self.tokenize(text)
|
|
@@ -1579,21 +1601,46 @@ Do not split the code in multiple tags.
|
|
|
1579
1601
|
start_token_idx = 0
|
|
1580
1602
|
|
|
1581
1603
|
# Create static prompt template
|
|
1582
|
-
static_prompt_template = f"""
|
|
1583
|
-
|
|
1584
|
-
Keep memory concise using bullet points.
|
|
1604
|
+
static_prompt_template = f"""{self.system_full_header}
|
|
1605
|
+
You are a structured sequential text summary assistant that processes documents chunk by chunk, updating a memory of previously generated information at each step.
|
|
1585
1606
|
|
|
1586
|
-
|
|
1587
|
-
{{memory}}
|
|
1607
|
+
Your goal is to extract and combine relevant information from each text chunk with the existing memory, ensuring no key details are omitted or invented.
|
|
1588
1608
|
|
|
1589
|
-
|
|
1609
|
+
If requested, infer metadata like titles or authors from the content.
|
|
1610
|
+
|
|
1611
|
+
{self.user_full_header}
|
|
1612
|
+
Update the memory by merging previous information with new details from this text chunk.
|
|
1613
|
+
Only add information explicitly present in the chunk. Retain all relevant prior memory unless clarified or updated by the current chunk.
|
|
1614
|
+
|
|
1615
|
+
----
|
|
1616
|
+
# Text chunk:
|
|
1617
|
+
# Chunk number: {{chunk_id}}
|
|
1618
|
+
----
|
|
1619
|
+
```markdown
|
|
1590
1620
|
{{chunk}}
|
|
1621
|
+
```
|
|
1591
1622
|
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1623
|
+
{{custom_prompt}}
|
|
1624
|
+
|
|
1625
|
+
Before updating, verify each requested detail:
|
|
1626
|
+
1. Does the chunk explicitly mention the information?
|
|
1627
|
+
2. Should prior memory be retained, updated, or clarified?
|
|
1628
|
+
|
|
1629
|
+
Include only confirmed details in the output.
|
|
1630
|
+
Rewrite the full memory including the updates and keeping relevant data.
|
|
1631
|
+
Do not discuss the information inside thememory, just put the relevant information without comments.
|
|
1632
|
+
|
|
1633
|
+
----
|
|
1634
|
+
# Current document analysis memory:
|
|
1635
|
+
----
|
|
1636
|
+
```{chunk_processing_output_format}
|
|
1637
|
+
{{memory}}
|
|
1638
|
+
```
|
|
1639
|
+
{self.ai_full_header}
|
|
1640
|
+
"""
|
|
1595
1641
|
# Calculate static prompt tokens (with empty memory and chunk)
|
|
1596
|
-
|
|
1642
|
+
chunk_id=0
|
|
1643
|
+
example_prompt = static_prompt_template.format(custom_prompt=chunk_processing_prompt if chunk_processing_prompt else '', memory="", chunk="", chunk_id=chunk_id)
|
|
1597
1644
|
static_tokens = len(self.tokenize(example_prompt))
|
|
1598
1645
|
|
|
1599
1646
|
# Process text in chunks
|
|
@@ -1606,31 +1653,47 @@ Keep memory concise using bullet points.
|
|
|
1606
1653
|
raise ValueError("Memory too large - consider reducing chunk size or increasing context window")
|
|
1607
1654
|
|
|
1608
1655
|
# Get chunk tokens
|
|
1609
|
-
|
|
1656
|
+
if bootstrap_chunk_size is not None and chunk_id < bootstrap_steps:
|
|
1657
|
+
end_token_idx = min(start_token_idx + bootstrap_chunk_size, total_tokens)
|
|
1658
|
+
else:
|
|
1659
|
+
end_token_idx = min(start_token_idx + chunk_size, total_tokens)
|
|
1610
1660
|
chunk_tokens = all_tokens[start_token_idx:end_token_idx]
|
|
1611
1661
|
chunk = self.detokenize(chunk_tokens)
|
|
1662
|
+
chunk_id +=1
|
|
1612
1663
|
|
|
1613
1664
|
# Generate memory update
|
|
1614
|
-
prompt = static_prompt_template.format(memory=memory, chunk=chunk)
|
|
1615
|
-
|
|
1665
|
+
prompt = static_prompt_template.format(custom_prompt=chunk_processing_prompt if chunk_processing_prompt else '', memory=memory, chunk=chunk, chunk_id=chunk_id)
|
|
1666
|
+
if debug:
|
|
1667
|
+
ASCIIColors.yellow(f" ----- {chunk_id-1} ------")
|
|
1668
|
+
ASCIIColors.red(prompt)
|
|
1616
1669
|
|
|
1670
|
+
memory = self.generate(prompt, n_predict=ctx_size//4, streaming_callback=callback).strip()
|
|
1671
|
+
code = self.extract_code_blocks(memory)
|
|
1672
|
+
if code:
|
|
1673
|
+
memory=code[0]["content"]
|
|
1674
|
+
|
|
1675
|
+
if debug:
|
|
1676
|
+
ASCIIColors.yellow(f" ----- OUT ------")
|
|
1677
|
+
ASCIIColors.yellow(memory)
|
|
1678
|
+
ASCIIColors.yellow(" ----- ------")
|
|
1617
1679
|
# Move to next chunk
|
|
1618
1680
|
start_token_idx = end_token_idx
|
|
1619
1681
|
|
|
1620
1682
|
# Prepare final summary prompt
|
|
1621
|
-
final_prompt_template = f"""!@>
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
{
|
|
1628
|
-
|
|
1629
|
-
|
|
1683
|
+
final_prompt_template = f"""!@>system:
|
|
1684
|
+
You are a memory summarizer assistant that helps users format their memory information into coherant text in a specific style or format.
|
|
1685
|
+
{final_memory_processing_prompt}.
|
|
1686
|
+
!@>user:
|
|
1687
|
+
Here is my document analysis memory:
|
|
1688
|
+
```{chunk_processing_output_format}
|
|
1689
|
+
{memory}
|
|
1690
|
+
```
|
|
1691
|
+
The output must be put inside a {final_output_format} markdown tag.
|
|
1692
|
+
The updated memory must be put in a {chunk_processing_output_format} markdown tag.
|
|
1693
|
+
!@>assistant:
|
|
1630
1694
|
"""
|
|
1631
|
-
|
|
1632
1695
|
# Truncate memory if needed for final prompt
|
|
1633
|
-
example_final_prompt = final_prompt_template
|
|
1696
|
+
example_final_prompt = final_prompt_template
|
|
1634
1697
|
final_static_tokens = len(self.tokenize(example_final_prompt))
|
|
1635
1698
|
available_final_tokens = ctx_size - final_static_tokens
|
|
1636
1699
|
|
|
@@ -1639,8 +1702,12 @@ Tone: {tone}
|
|
|
1639
1702
|
memory = self.detokenize(memory_tokens[:available_final_tokens])
|
|
1640
1703
|
|
|
1641
1704
|
# Generate final summary
|
|
1642
|
-
final_prompt = final_prompt_template
|
|
1643
|
-
|
|
1705
|
+
final_prompt = final_prompt_template
|
|
1706
|
+
memory = self.generate(final_prompt, streaming_callback=callback)
|
|
1707
|
+
code = self.extract_code_blocks(memory)
|
|
1708
|
+
if code:
|
|
1709
|
+
memory=code[0]["content"]
|
|
1710
|
+
return memory
|
|
1644
1711
|
|
|
1645
1712
|
def error(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
|
1646
1713
|
ASCIIColors.error(content)
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: lollms_client
|
|
3
|
+
Version: 0.9.0
|
|
4
|
+
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
+
Home-page: https://github.com/ParisNeo/lollms_client
|
|
6
|
+
Author: ParisNeo
|
|
7
|
+
Author-email: parisneoai@gmail.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Requires-Dist: requests
|
|
14
|
+
|
|
15
|
+
# lollms_client
|
|
16
|
+
|
|
17
|
+
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
18
|
+
|
|
19
|
+
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
To install the library from PyPI using `pip`, run:
|
|
24
|
+
|
|
25
|
+
```
|
|
26
|
+
pip install lollms-client
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Getting Started
|
|
30
|
+
|
|
31
|
+
The LollmsClient class is the gateway to interacting with the lollms API. Here's how you can instantiate it in various ways to suit your needs:
|
|
32
|
+
|
|
33
|
+
```python
|
|
34
|
+
from lollms_client import LollmsClient
|
|
35
|
+
|
|
36
|
+
# Default instantiation using the local lollms service - hosted at http://localhost:9600
|
|
37
|
+
lc = LollmsClient()
|
|
38
|
+
|
|
39
|
+
# Specify a custom host and port
|
|
40
|
+
lc = LollmsClient(host_address="http://some.server:9600")
|
|
41
|
+
|
|
42
|
+
# Use a specific model with a local or remote ollama server
|
|
43
|
+
from lollms_client import ELF_GENERATION_FORMAT
|
|
44
|
+
lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
45
|
+
|
|
46
|
+
# Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
|
|
47
|
+
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
48
|
+
|
|
49
|
+
# Use a specific model with an Ollama binding on the server, with a context size of 32800
|
|
50
|
+
lc = LollmsClient(
|
|
51
|
+
host_address="http://some.other.server:11434",
|
|
52
|
+
model_name="phi4:latest",
|
|
53
|
+
ctx_size=32800,
|
|
54
|
+
default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
|
|
55
|
+
)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Text Generation
|
|
59
|
+
|
|
60
|
+
Use `generate()` for generating text from the lollms API.
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
64
|
+
print(response)
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Code Generation
|
|
68
|
+
|
|
69
|
+
The `generate_code()` function allows you to generate code snippets based on your input. Here's how you can use it:
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
# A generic case to generate a snippet in python
|
|
73
|
+
response = lc.generate_code(prompt="Create a function to add all numbers of a list", language='python')
|
|
74
|
+
print(response)
|
|
75
|
+
|
|
76
|
+
# generate_code can also be used to generate responses ready to be parsed - with json or yaml for instance
|
|
77
|
+
response = lc.generate_code(prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.", language='json', template="""
|
|
78
|
+
{
|
|
79
|
+
"name":"the first name of the person"
|
|
80
|
+
"family_name":"the family name of the person"
|
|
81
|
+
"age":"the age of the person"
|
|
82
|
+
"appointment_date":"the date of the appointment"
|
|
83
|
+
"reason":"the reason for the appointment. if not specified fill out with 'N/A'"
|
|
84
|
+
}
|
|
85
|
+
""")
|
|
86
|
+
data = json.loads(response)
|
|
87
|
+
print(data['name'], data['family_name'], "- Reason:", data['reason'])
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
### List Mounted Personalities (only on lollms)
|
|
92
|
+
|
|
93
|
+
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
response = lc.listMountedPersonalities()
|
|
97
|
+
print(response)
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### List Models
|
|
101
|
+
|
|
102
|
+
List available models of the lollms API with the `listModels()` method.
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
response = lc.listModels()
|
|
106
|
+
print(response)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Complete Example
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
import json
|
|
113
|
+
from datetime import datetime
|
|
114
|
+
|
|
115
|
+
# Assuming LollmsClient is already imported and instantiated as lc
|
|
116
|
+
lc = LollmsClient()
|
|
117
|
+
|
|
118
|
+
# Generate code using the LollmsClient
|
|
119
|
+
response = lc.generate_code(
|
|
120
|
+
prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.",
|
|
121
|
+
language='json',
|
|
122
|
+
template="""
|
|
123
|
+
{
|
|
124
|
+
"name": "the first name of the person",
|
|
125
|
+
"family_name": "the family name of the person",
|
|
126
|
+
"age": "the age of the person",
|
|
127
|
+
"appointment_date": "the date of the appointment in the format DD/MM/YYYY",
|
|
128
|
+
"reason": "the reason for the appointment. if not specified fill out with 'N/A'"
|
|
129
|
+
}
|
|
130
|
+
"""
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Parse the JSON response
|
|
134
|
+
data = json.loads(response)
|
|
135
|
+
|
|
136
|
+
# Function to validate the data
|
|
137
|
+
def validate_data(data):
|
|
138
|
+
try:
|
|
139
|
+
# Validate age
|
|
140
|
+
if not (0 < int(data['age']) < 120):
|
|
141
|
+
raise ValueError("Invalid age provided.")
|
|
142
|
+
|
|
143
|
+
# Validate appointment date
|
|
144
|
+
appointment_date = datetime.strptime(data['appointment_date'], '%d/%m/%Y')
|
|
145
|
+
if appointment_date < datetime.now():
|
|
146
|
+
raise ValueError("Appointment date cannot be in the past.")
|
|
147
|
+
|
|
148
|
+
# Validate name fields
|
|
149
|
+
if not data['name'] or not data['family_name']:
|
|
150
|
+
raise ValueError("Name fields cannot be empty.")
|
|
151
|
+
|
|
152
|
+
return True
|
|
153
|
+
except Exception as e:
|
|
154
|
+
print(f"Validation Error: {e}")
|
|
155
|
+
return False
|
|
156
|
+
|
|
157
|
+
# Function to simulate a response to the user
|
|
158
|
+
def simulate_response(data):
|
|
159
|
+
if validate_data(data):
|
|
160
|
+
print(f"Appointment confirmed for {data['name']} {data['family_name']}.")
|
|
161
|
+
print(f"Date: {data['appointment_date']}")
|
|
162
|
+
print(f"Reason: {data['reason']}")
|
|
163
|
+
else:
|
|
164
|
+
print("Failed to confirm appointment due to invalid data.")
|
|
165
|
+
|
|
166
|
+
# Execute the simulation
|
|
167
|
+
simulate_response(data)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
171
|
+
|
|
172
|
+
Happy coding!
|
lollms_client-0.8.2/PKG-INFO
DELETED
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: lollms_client
|
|
3
|
-
Version: 0.8.2
|
|
4
|
-
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
-
Home-page: https://github.com/ParisNeo/lollms_client
|
|
6
|
-
Author: ParisNeo
|
|
7
|
-
Author-email: parisneoai@gmail.com
|
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
-
Classifier: Operating System :: OS Independent
|
|
11
|
-
Description-Content-Type: text/markdown
|
|
12
|
-
License-File: LICENSE
|
|
13
|
-
Requires-Dist: requests
|
|
14
|
-
|
|
15
|
-
# lollms_client
|
|
16
|
-
|
|
17
|
-
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
18
|
-
|
|
19
|
-
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
20
|
-
|
|
21
|
-
## Installation
|
|
22
|
-
|
|
23
|
-
To install the library from PyPI using `pip`, run:
|
|
24
|
-
|
|
25
|
-
```
|
|
26
|
-
pip install lollms-client
|
|
27
|
-
```
|
|
28
|
-
|
|
29
|
-
## Usage
|
|
30
|
-
|
|
31
|
-
To use the lollms_client, first import the necessary classes:
|
|
32
|
-
|
|
33
|
-
```python
|
|
34
|
-
from lollms_client import LollmsClient
|
|
35
|
-
|
|
36
|
-
# Initialize the LollmsClient instance this uses the default lollms localhost service http://localhost:9600
|
|
37
|
-
lc = LollmsClient()
|
|
38
|
-
# You can also use a different host and port number if you please
|
|
39
|
-
lc = LollmsClient("http://some.other.server:9600")
|
|
40
|
-
# You can also use a local or remote ollama server
|
|
41
|
-
lc = LollmsClient(model_name="mistral-nemo:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
42
|
-
# You can also use a local or remote openai server (you can either set your key as an environment variable or pass it here)
|
|
43
|
-
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
44
|
-
```
|
|
45
|
-
|
|
46
|
-
### Text Generation
|
|
47
|
-
|
|
48
|
-
Use `generate()` for generating text from the lollms API.
|
|
49
|
-
|
|
50
|
-
```python
|
|
51
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
52
|
-
print(response)
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
### List Mounted Personalities (only on lollms)
|
|
57
|
-
|
|
58
|
-
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
59
|
-
|
|
60
|
-
```python
|
|
61
|
-
response = lc.listMountedPersonalities()
|
|
62
|
-
print(response)
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
### List Models
|
|
66
|
-
|
|
67
|
-
List available models of the lollms API with the `listModels()` method.
|
|
68
|
-
|
|
69
|
-
```python
|
|
70
|
-
response = lc.listModels()
|
|
71
|
-
print(response)
|
|
72
|
-
```
|
|
73
|
-
|
|
74
|
-
## Complete Example
|
|
75
|
-
|
|
76
|
-
```python
|
|
77
|
-
from lollms_client import LollmsClient
|
|
78
|
-
|
|
79
|
-
# Initialize the LollmsClient instance
|
|
80
|
-
lc = LollmsClient()
|
|
81
|
-
|
|
82
|
-
# Generate Text
|
|
83
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
84
|
-
print(response)
|
|
85
|
-
|
|
86
|
-
# List Mounted Personalities
|
|
87
|
-
response = lc.listMountedPersonalities()
|
|
88
|
-
print(response)
|
|
89
|
-
|
|
90
|
-
# List Models
|
|
91
|
-
response = lc.listModels()
|
|
92
|
-
print(response)
|
|
93
|
-
```
|
|
94
|
-
|
|
95
|
-
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
96
|
-
|
|
97
|
-
Happy coding!
|
lollms_client-0.8.2/README.md
DELETED
|
@@ -1,83 +0,0 @@
|
|
|
1
|
-
# lollms_client
|
|
2
|
-
|
|
3
|
-
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
4
|
-
|
|
5
|
-
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
6
|
-
|
|
7
|
-
## Installation
|
|
8
|
-
|
|
9
|
-
To install the library from PyPI using `pip`, run:
|
|
10
|
-
|
|
11
|
-
```
|
|
12
|
-
pip install lollms-client
|
|
13
|
-
```
|
|
14
|
-
|
|
15
|
-
## Usage
|
|
16
|
-
|
|
17
|
-
To use the lollms_client, first import the necessary classes:
|
|
18
|
-
|
|
19
|
-
```python
|
|
20
|
-
from lollms_client import LollmsClient
|
|
21
|
-
|
|
22
|
-
# Initialize the LollmsClient instance this uses the default lollms localhost service http://localhost:9600
|
|
23
|
-
lc = LollmsClient()
|
|
24
|
-
# You can also use a different host and port number if you please
|
|
25
|
-
lc = LollmsClient("http://some.other.server:9600")
|
|
26
|
-
# You can also use a local or remote ollama server
|
|
27
|
-
lc = LollmsClient(model_name="mistral-nemo:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
28
|
-
# You can also use a local or remote openai server (you can either set your key as an environment variable or pass it here)
|
|
29
|
-
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
30
|
-
```
|
|
31
|
-
|
|
32
|
-
### Text Generation
|
|
33
|
-
|
|
34
|
-
Use `generate()` for generating text from the lollms API.
|
|
35
|
-
|
|
36
|
-
```python
|
|
37
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
38
|
-
print(response)
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
### List Mounted Personalities (only on lollms)
|
|
43
|
-
|
|
44
|
-
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
45
|
-
|
|
46
|
-
```python
|
|
47
|
-
response = lc.listMountedPersonalities()
|
|
48
|
-
print(response)
|
|
49
|
-
```
|
|
50
|
-
|
|
51
|
-
### List Models
|
|
52
|
-
|
|
53
|
-
List available models of the lollms API with the `listModels()` method.
|
|
54
|
-
|
|
55
|
-
```python
|
|
56
|
-
response = lc.listModels()
|
|
57
|
-
print(response)
|
|
58
|
-
```
|
|
59
|
-
|
|
60
|
-
## Complete Example
|
|
61
|
-
|
|
62
|
-
```python
|
|
63
|
-
from lollms_client import LollmsClient
|
|
64
|
-
|
|
65
|
-
# Initialize the LollmsClient instance
|
|
66
|
-
lc = LollmsClient()
|
|
67
|
-
|
|
68
|
-
# Generate Text
|
|
69
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
70
|
-
print(response)
|
|
71
|
-
|
|
72
|
-
# List Mounted Personalities
|
|
73
|
-
response = lc.listMountedPersonalities()
|
|
74
|
-
print(response)
|
|
75
|
-
|
|
76
|
-
# List Models
|
|
77
|
-
response = lc.listModels()
|
|
78
|
-
print(response)
|
|
79
|
-
```
|
|
80
|
-
|
|
81
|
-
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
82
|
-
|
|
83
|
-
Happy coding!
|
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: lollms_client
|
|
3
|
-
Version: 0.8.2
|
|
4
|
-
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
-
Home-page: https://github.com/ParisNeo/lollms_client
|
|
6
|
-
Author: ParisNeo
|
|
7
|
-
Author-email: parisneoai@gmail.com
|
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
-
Classifier: Operating System :: OS Independent
|
|
11
|
-
Description-Content-Type: text/markdown
|
|
12
|
-
License-File: LICENSE
|
|
13
|
-
Requires-Dist: requests
|
|
14
|
-
|
|
15
|
-
# lollms_client
|
|
16
|
-
|
|
17
|
-
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
18
|
-
|
|
19
|
-
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
20
|
-
|
|
21
|
-
## Installation
|
|
22
|
-
|
|
23
|
-
To install the library from PyPI using `pip`, run:
|
|
24
|
-
|
|
25
|
-
```
|
|
26
|
-
pip install lollms-client
|
|
27
|
-
```
|
|
28
|
-
|
|
29
|
-
## Usage
|
|
30
|
-
|
|
31
|
-
To use the lollms_client, first import the necessary classes:
|
|
32
|
-
|
|
33
|
-
```python
|
|
34
|
-
from lollms_client import LollmsClient
|
|
35
|
-
|
|
36
|
-
# Initialize the LollmsClient instance this uses the default lollms localhost service http://localhost:9600
|
|
37
|
-
lc = LollmsClient()
|
|
38
|
-
# You can also use a different host and port number if you please
|
|
39
|
-
lc = LollmsClient("http://some.other.server:9600")
|
|
40
|
-
# You can also use a local or remote ollama server
|
|
41
|
-
lc = LollmsClient(model_name="mistral-nemo:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
42
|
-
# You can also use a local or remote openai server (you can either set your key as an environment variable or pass it here)
|
|
43
|
-
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
44
|
-
```
|
|
45
|
-
|
|
46
|
-
### Text Generation
|
|
47
|
-
|
|
48
|
-
Use `generate()` for generating text from the lollms API.
|
|
49
|
-
|
|
50
|
-
```python
|
|
51
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
52
|
-
print(response)
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
### List Mounted Personalities (only on lollms)
|
|
57
|
-
|
|
58
|
-
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
59
|
-
|
|
60
|
-
```python
|
|
61
|
-
response = lc.listMountedPersonalities()
|
|
62
|
-
print(response)
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
### List Models
|
|
66
|
-
|
|
67
|
-
List available models of the lollms API with the `listModels()` method.
|
|
68
|
-
|
|
69
|
-
```python
|
|
70
|
-
response = lc.listModels()
|
|
71
|
-
print(response)
|
|
72
|
-
```
|
|
73
|
-
|
|
74
|
-
## Complete Example
|
|
75
|
-
|
|
76
|
-
```python
|
|
77
|
-
from lollms_client import LollmsClient
|
|
78
|
-
|
|
79
|
-
# Initialize the LollmsClient instance
|
|
80
|
-
lc = LollmsClient()
|
|
81
|
-
|
|
82
|
-
# Generate Text
|
|
83
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
84
|
-
print(response)
|
|
85
|
-
|
|
86
|
-
# List Mounted Personalities
|
|
87
|
-
response = lc.listMountedPersonalities()
|
|
88
|
-
print(response)
|
|
89
|
-
|
|
90
|
-
# List Models
|
|
91
|
-
response = lc.listModels()
|
|
92
|
-
print(response)
|
|
93
|
-
```
|
|
94
|
-
|
|
95
|
-
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
96
|
-
|
|
97
|
-
Happy coding!
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|