lollms-client 0.8.2__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/lollms_core.py +132 -38
- lollms_client-0.9.1.dist-info/METADATA +172 -0
- {lollms_client-0.8.2.dist-info → lollms_client-0.9.1.dist-info}/RECORD +6 -6
- lollms_client-0.8.2.dist-info/METADATA +0 -97
- {lollms_client-0.8.2.dist-info → lollms_client-0.9.1.dist-info}/LICENSE +0 -0
- {lollms_client-0.8.2.dist-info → lollms_client-0.9.1.dist-info}/WHEEL +0 -0
- {lollms_client-0.8.2.dist-info → lollms_client-0.9.1.dist-info}/top_level.txt +0 -0
lollms_client/lollms_core.py
CHANGED
|
@@ -20,11 +20,38 @@ class ELF_GENERATION_FORMAT(Enum):
|
|
|
20
20
|
LITELLM = 3
|
|
21
21
|
TRANSFORMERS = 4
|
|
22
22
|
VLLM = 5
|
|
23
|
+
|
|
24
|
+
@classmethod
|
|
25
|
+
def from_string(cls, format_string: str) -> 'ELF_GENERATION_FORMAT':
|
|
26
|
+
format_mapping = {
|
|
27
|
+
"LOLLMS": cls.LOLLMS,
|
|
28
|
+
"OPENAI": cls.OPENAI,
|
|
29
|
+
"OLLAMA": cls.OLLAMA,
|
|
30
|
+
"LITELLM": cls.LITELLM,
|
|
31
|
+
"TRANSFORMERS": cls.TRANSFORMERS,
|
|
32
|
+
"VLLM": cls.VLLM
|
|
33
|
+
}
|
|
23
34
|
|
|
35
|
+
try:
|
|
36
|
+
return format_mapping[format_string.upper()]
|
|
37
|
+
except KeyError:
|
|
38
|
+
raise ValueError(f"Invalid format string: {format_string}. Must be one of {list(format_mapping.keys())}.")
|
|
39
|
+
|
|
24
40
|
class ELF_COMPLETION_FORMAT(Enum):
|
|
25
41
|
Instruct = 0
|
|
26
42
|
Chat = 1
|
|
43
|
+
@classmethod
|
|
44
|
+
def from_string(cls, format_string: str) -> 'ELF_COMPLETION_FORMAT':
|
|
45
|
+
format_mapping = {
|
|
46
|
+
"Instruct": cls.Instruct,
|
|
47
|
+
"Chat": cls.Chat,
|
|
48
|
+
}
|
|
27
49
|
|
|
50
|
+
try:
|
|
51
|
+
return format_mapping[format_string.upper()]
|
|
52
|
+
except KeyError:
|
|
53
|
+
raise ValueError(f"Invalid format string: {format_string}. Must be one of {list(format_mapping.keys())}.")
|
|
54
|
+
|
|
28
55
|
class LollmsClient():
|
|
29
56
|
def __init__(
|
|
30
57
|
self,
|
|
@@ -1555,20 +1582,42 @@ Do not split the code in multiple tags.
|
|
|
1555
1582
|
|
|
1556
1583
|
return cleaned_text
|
|
1557
1584
|
|
|
1558
|
-
def sequential_summarize(
|
|
1585
|
+
def sequential_summarize(
|
|
1586
|
+
self,
|
|
1587
|
+
text:str,
|
|
1588
|
+
chunk_processing_prompt:str="Extract relevant information from the current text chunk and update the memory if needed.",
|
|
1589
|
+
chunk_processing_output_format="markdown",
|
|
1590
|
+
final_memory_processing_prompt="Create final summary using this memory.",
|
|
1591
|
+
final_output_format="markdown",
|
|
1592
|
+
ctx_size:int=None,
|
|
1593
|
+
chunk_size:int=None,
|
|
1594
|
+
bootstrap_chunk_size:int=None,
|
|
1595
|
+
bootstrap_steps:int=None,
|
|
1596
|
+
callback = None,
|
|
1597
|
+
debug:bool= False):
|
|
1559
1598
|
"""
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1599
|
+
This function processes a given text in chunks and generates a summary for each chunk.
|
|
1600
|
+
It then combines the summaries to create a final summary.
|
|
1601
|
+
|
|
1602
|
+
Parameters:
|
|
1603
|
+
text (str): The input text to be summarized.
|
|
1604
|
+
chunk_processing_prompt (str, optional): The prompt used for processing each chunk. Defaults to "".
|
|
1605
|
+
chunk_processing_output_format (str, optional): The format of the output for each chunk. Defaults to "markdown".
|
|
1606
|
+
final_memory_processing_prompt (str, optional): The prompt used for processing the final memory. Defaults to "Create final summary using this memory.".
|
|
1607
|
+
final_output_format (str, optional): The format of the final output. Defaults to "markdown".
|
|
1608
|
+
ctx_size (int, optional): The size of the context. Defaults to None.
|
|
1609
|
+
chunk_size (int, optional): The size of each chunk. Defaults to None.
|
|
1610
|
+
callback (callable, optional): A function to be called after processing each chunk. Defaults to None.
|
|
1611
|
+
debug (bool, optional): A flag to enable debug mode. Defaults to False.
|
|
1612
|
+
|
|
1613
|
+
Returns:
|
|
1614
|
+
The final summary in the specified format.
|
|
1571
1615
|
"""
|
|
1616
|
+
if ctx_size is None:
|
|
1617
|
+
ctx_size = self.ctx_size
|
|
1618
|
+
|
|
1619
|
+
if chunk_size is None:
|
|
1620
|
+
chunk_size = ctx_size//4
|
|
1572
1621
|
|
|
1573
1622
|
# Tokenize entire text
|
|
1574
1623
|
all_tokens = self.tokenize(text)
|
|
@@ -1579,21 +1628,46 @@ Do not split the code in multiple tags.
|
|
|
1579
1628
|
start_token_idx = 0
|
|
1580
1629
|
|
|
1581
1630
|
# Create static prompt template
|
|
1582
|
-
static_prompt_template = f"""
|
|
1583
|
-
|
|
1584
|
-
Keep memory concise using bullet points.
|
|
1631
|
+
static_prompt_template = f"""{self.system_full_header}
|
|
1632
|
+
You are a structured sequential text summary assistant that processes documents chunk by chunk, updating a memory of previously generated information at each step.
|
|
1585
1633
|
|
|
1586
|
-
|
|
1587
|
-
|
|
1634
|
+
Your goal is to extract and combine relevant information from each text chunk with the existing memory, ensuring no key details are omitted or invented.
|
|
1635
|
+
|
|
1636
|
+
If requested, infer metadata like titles or authors from the content.
|
|
1588
1637
|
|
|
1589
|
-
|
|
1638
|
+
{self.user_full_header}
|
|
1639
|
+
Update the memory by merging previous information with new details from this text chunk.
|
|
1640
|
+
Only add information explicitly present in the chunk. Retain all relevant prior memory unless clarified or updated by the current chunk.
|
|
1641
|
+
|
|
1642
|
+
----
|
|
1643
|
+
# Text chunk:
|
|
1644
|
+
# Chunk number: {{chunk_id}}
|
|
1645
|
+
----
|
|
1646
|
+
```markdown
|
|
1590
1647
|
{{chunk}}
|
|
1648
|
+
```
|
|
1591
1649
|
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1650
|
+
{{custom_prompt}}
|
|
1651
|
+
|
|
1652
|
+
Before updating, verify each requested detail:
|
|
1653
|
+
1. Does the chunk explicitly mention the information?
|
|
1654
|
+
2. Should prior memory be retained, updated, or clarified?
|
|
1655
|
+
|
|
1656
|
+
Include only confirmed details in the output.
|
|
1657
|
+
Rewrite the full memory including the updates and keeping relevant data.
|
|
1658
|
+
Do not discuss the information inside thememory, just put the relevant information without comments.
|
|
1659
|
+
|
|
1660
|
+
----
|
|
1661
|
+
# Current document analysis memory:
|
|
1662
|
+
----
|
|
1663
|
+
```{chunk_processing_output_format}
|
|
1664
|
+
{{memory}}
|
|
1665
|
+
```
|
|
1666
|
+
{self.ai_full_header}
|
|
1667
|
+
"""
|
|
1595
1668
|
# Calculate static prompt tokens (with empty memory and chunk)
|
|
1596
|
-
|
|
1669
|
+
chunk_id=0
|
|
1670
|
+
example_prompt = static_prompt_template.format(custom_prompt=chunk_processing_prompt if chunk_processing_prompt else '', memory="", chunk="", chunk_id=chunk_id)
|
|
1597
1671
|
static_tokens = len(self.tokenize(example_prompt))
|
|
1598
1672
|
|
|
1599
1673
|
# Process text in chunks
|
|
@@ -1606,31 +1680,47 @@ Keep memory concise using bullet points.
|
|
|
1606
1680
|
raise ValueError("Memory too large - consider reducing chunk size or increasing context window")
|
|
1607
1681
|
|
|
1608
1682
|
# Get chunk tokens
|
|
1609
|
-
|
|
1683
|
+
if bootstrap_chunk_size is not None and chunk_id < bootstrap_steps:
|
|
1684
|
+
end_token_idx = min(start_token_idx + bootstrap_chunk_size, total_tokens)
|
|
1685
|
+
else:
|
|
1686
|
+
end_token_idx = min(start_token_idx + chunk_size, total_tokens)
|
|
1610
1687
|
chunk_tokens = all_tokens[start_token_idx:end_token_idx]
|
|
1611
1688
|
chunk = self.detokenize(chunk_tokens)
|
|
1689
|
+
chunk_id +=1
|
|
1612
1690
|
|
|
1613
1691
|
# Generate memory update
|
|
1614
|
-
prompt = static_prompt_template.format(memory=memory, chunk=chunk)
|
|
1615
|
-
|
|
1692
|
+
prompt = static_prompt_template.format(custom_prompt=chunk_processing_prompt if chunk_processing_prompt else '', memory=memory, chunk=chunk, chunk_id=chunk_id)
|
|
1693
|
+
if debug:
|
|
1694
|
+
ASCIIColors.yellow(f" ----- {chunk_id-1} ------")
|
|
1695
|
+
ASCIIColors.red(prompt)
|
|
1616
1696
|
|
|
1697
|
+
memory = self.generate(prompt, n_predict=ctx_size//4, streaming_callback=callback).strip()
|
|
1698
|
+
code = self.extract_code_blocks(memory)
|
|
1699
|
+
if code:
|
|
1700
|
+
memory=code[0]["content"]
|
|
1701
|
+
|
|
1702
|
+
if debug:
|
|
1703
|
+
ASCIIColors.yellow(f" ----- OUT ------")
|
|
1704
|
+
ASCIIColors.yellow(memory)
|
|
1705
|
+
ASCIIColors.yellow(" ----- ------")
|
|
1617
1706
|
# Move to next chunk
|
|
1618
1707
|
start_token_idx = end_token_idx
|
|
1619
1708
|
|
|
1620
1709
|
# Prepare final summary prompt
|
|
1621
|
-
final_prompt_template = f"""!@>
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
{
|
|
1628
|
-
|
|
1629
|
-
|
|
1710
|
+
final_prompt_template = f"""!@>system:
|
|
1711
|
+
You are a memory summarizer assistant that helps users format their memory information into coherant text in a specific style or format.
|
|
1712
|
+
{final_memory_processing_prompt}.
|
|
1713
|
+
!@>user:
|
|
1714
|
+
Here is my document analysis memory:
|
|
1715
|
+
```{chunk_processing_output_format}
|
|
1716
|
+
{memory}
|
|
1717
|
+
```
|
|
1718
|
+
The output must be put inside a {final_output_format} markdown tag.
|
|
1719
|
+
The updated memory must be put in a {chunk_processing_output_format} markdown tag.
|
|
1720
|
+
!@>assistant:
|
|
1630
1721
|
"""
|
|
1631
|
-
|
|
1632
1722
|
# Truncate memory if needed for final prompt
|
|
1633
|
-
example_final_prompt = final_prompt_template
|
|
1723
|
+
example_final_prompt = final_prompt_template
|
|
1634
1724
|
final_static_tokens = len(self.tokenize(example_final_prompt))
|
|
1635
1725
|
available_final_tokens = ctx_size - final_static_tokens
|
|
1636
1726
|
|
|
@@ -1639,8 +1729,12 @@ Tone: {tone}
|
|
|
1639
1729
|
memory = self.detokenize(memory_tokens[:available_final_tokens])
|
|
1640
1730
|
|
|
1641
1731
|
# Generate final summary
|
|
1642
|
-
final_prompt = final_prompt_template
|
|
1643
|
-
|
|
1732
|
+
final_prompt = final_prompt_template
|
|
1733
|
+
memory = self.generate(final_prompt, streaming_callback=callback)
|
|
1734
|
+
code = self.extract_code_blocks(memory)
|
|
1735
|
+
if code:
|
|
1736
|
+
memory=code[0]["content"]
|
|
1737
|
+
return memory
|
|
1644
1738
|
|
|
1645
1739
|
def error(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
|
1646
1740
|
ASCIIColors.error(content)
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: lollms_client
|
|
3
|
+
Version: 0.9.1
|
|
4
|
+
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
+
Home-page: https://github.com/ParisNeo/lollms_client
|
|
6
|
+
Author: ParisNeo
|
|
7
|
+
Author-email: parisneoai@gmail.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Requires-Dist: requests
|
|
14
|
+
|
|
15
|
+
# lollms_client
|
|
16
|
+
|
|
17
|
+
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
18
|
+
|
|
19
|
+
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
To install the library from PyPI using `pip`, run:
|
|
24
|
+
|
|
25
|
+
```
|
|
26
|
+
pip install lollms-client
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Getting Started
|
|
30
|
+
|
|
31
|
+
The LollmsClient class is the gateway to interacting with the lollms API. Here's how you can instantiate it in various ways to suit your needs:
|
|
32
|
+
|
|
33
|
+
```python
|
|
34
|
+
from lollms_client import LollmsClient
|
|
35
|
+
|
|
36
|
+
# Default instantiation using the local lollms service - hosted at http://localhost:9600
|
|
37
|
+
lc = LollmsClient()
|
|
38
|
+
|
|
39
|
+
# Specify a custom host and port
|
|
40
|
+
lc = LollmsClient(host_address="http://some.server:9600")
|
|
41
|
+
|
|
42
|
+
# Use a specific model with a local or remote ollama server
|
|
43
|
+
from lollms_client import ELF_GENERATION_FORMAT
|
|
44
|
+
lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
45
|
+
|
|
46
|
+
# Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
|
|
47
|
+
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
48
|
+
|
|
49
|
+
# Use a specific model with an Ollama binding on the server, with a context size of 32800
|
|
50
|
+
lc = LollmsClient(
|
|
51
|
+
host_address="http://some.other.server:11434",
|
|
52
|
+
model_name="phi4:latest",
|
|
53
|
+
ctx_size=32800,
|
|
54
|
+
default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
|
|
55
|
+
)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Text Generation
|
|
59
|
+
|
|
60
|
+
Use `generate()` for generating text from the lollms API.
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
64
|
+
print(response)
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Code Generation
|
|
68
|
+
|
|
69
|
+
The `generate_code()` function allows you to generate code snippets based on your input. Here's how you can use it:
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
# A generic case to generate a snippet in python
|
|
73
|
+
response = lc.generate_code(prompt="Create a function to add all numbers of a list", language='python')
|
|
74
|
+
print(response)
|
|
75
|
+
|
|
76
|
+
# generate_code can also be used to generate responses ready to be parsed - with json or yaml for instance
|
|
77
|
+
response = lc.generate_code(prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.", language='json', template="""
|
|
78
|
+
{
|
|
79
|
+
"name":"the first name of the person"
|
|
80
|
+
"family_name":"the family name of the person"
|
|
81
|
+
"age":"the age of the person"
|
|
82
|
+
"appointment_date":"the date of the appointment"
|
|
83
|
+
"reason":"the reason for the appointment. if not specified fill out with 'N/A'"
|
|
84
|
+
}
|
|
85
|
+
""")
|
|
86
|
+
data = json.loads(response)
|
|
87
|
+
print(data['name'], data['family_name'], "- Reason:", data['reason'])
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
### List Mounted Personalities (only on lollms)
|
|
92
|
+
|
|
93
|
+
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
response = lc.listMountedPersonalities()
|
|
97
|
+
print(response)
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### List Models
|
|
101
|
+
|
|
102
|
+
List available models of the lollms API with the `listModels()` method.
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
response = lc.listModels()
|
|
106
|
+
print(response)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Complete Example
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
import json
|
|
113
|
+
from datetime import datetime
|
|
114
|
+
|
|
115
|
+
# Assuming LollmsClient is already imported and instantiated as lc
|
|
116
|
+
lc = LollmsClient()
|
|
117
|
+
|
|
118
|
+
# Generate code using the LollmsClient
|
|
119
|
+
response = lc.generate_code(
|
|
120
|
+
prompt="Mr Alex Brown presents himself to the pharmacist. He is 20 years old and seeks an appointment for the 12th of October. Fill out his application.",
|
|
121
|
+
language='json',
|
|
122
|
+
template="""
|
|
123
|
+
{
|
|
124
|
+
"name": "the first name of the person",
|
|
125
|
+
"family_name": "the family name of the person",
|
|
126
|
+
"age": "the age of the person",
|
|
127
|
+
"appointment_date": "the date of the appointment in the format DD/MM/YYYY",
|
|
128
|
+
"reason": "the reason for the appointment. if not specified fill out with 'N/A'"
|
|
129
|
+
}
|
|
130
|
+
"""
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Parse the JSON response
|
|
134
|
+
data = json.loads(response)
|
|
135
|
+
|
|
136
|
+
# Function to validate the data
|
|
137
|
+
def validate_data(data):
|
|
138
|
+
try:
|
|
139
|
+
# Validate age
|
|
140
|
+
if not (0 < int(data['age']) < 120):
|
|
141
|
+
raise ValueError("Invalid age provided.")
|
|
142
|
+
|
|
143
|
+
# Validate appointment date
|
|
144
|
+
appointment_date = datetime.strptime(data['appointment_date'], '%d/%m/%Y')
|
|
145
|
+
if appointment_date < datetime.now():
|
|
146
|
+
raise ValueError("Appointment date cannot be in the past.")
|
|
147
|
+
|
|
148
|
+
# Validate name fields
|
|
149
|
+
if not data['name'] or not data['family_name']:
|
|
150
|
+
raise ValueError("Name fields cannot be empty.")
|
|
151
|
+
|
|
152
|
+
return True
|
|
153
|
+
except Exception as e:
|
|
154
|
+
print(f"Validation Error: {e}")
|
|
155
|
+
return False
|
|
156
|
+
|
|
157
|
+
# Function to simulate a response to the user
|
|
158
|
+
def simulate_response(data):
|
|
159
|
+
if validate_data(data):
|
|
160
|
+
print(f"Appointment confirmed for {data['name']} {data['family_name']}.")
|
|
161
|
+
print(f"Date: {data['appointment_date']}")
|
|
162
|
+
print(f"Reason: {data['reason']}")
|
|
163
|
+
else:
|
|
164
|
+
print("Failed to confirm appointment due to invalid data.")
|
|
165
|
+
|
|
166
|
+
# Execute the simulation
|
|
167
|
+
simulate_response(data)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
171
|
+
|
|
172
|
+
Happy coding!
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
lollms_client/__init__.py,sha256=_1_zkzDrAs43mf6LEBVZUEYCGZ8KRmj-jd_vgkB8xSw,516
|
|
2
2
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
3
|
-
lollms_client/lollms_core.py,sha256=
|
|
3
|
+
lollms_client/lollms_core.py,sha256=KjDXCiCZS_dI1DyEHRlw0Qhim9zJfVmmCd_Lxp9U0aU,86103
|
|
4
4
|
lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
|
|
5
5
|
lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
|
|
6
6
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
@@ -13,8 +13,8 @@ lollms_client/lollms_tti.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1
|
|
|
13
13
|
lollms_client/lollms_tts.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1287
|
|
14
14
|
lollms_client/lollms_types.py,sha256=uuaADVVfi1sZucY7gT8v-EDN5xrMI3vy_4M7k7Uz3eU,2170
|
|
15
15
|
lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
|
|
16
|
-
lollms_client-0.
|
|
17
|
-
lollms_client-0.
|
|
18
|
-
lollms_client-0.
|
|
19
|
-
lollms_client-0.
|
|
20
|
-
lollms_client-0.
|
|
16
|
+
lollms_client-0.9.1.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
17
|
+
lollms_client-0.9.1.dist-info/METADATA,sha256=gdd_TJNGajt13fJfPBaFPeyzulrc0ZWZWfTAK3VWTZw,6400
|
|
18
|
+
lollms_client-0.9.1.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
|
|
19
|
+
lollms_client-0.9.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
|
|
20
|
+
lollms_client-0.9.1.dist-info/RECORD,,
|
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: lollms_client
|
|
3
|
-
Version: 0.8.2
|
|
4
|
-
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
-
Home-page: https://github.com/ParisNeo/lollms_client
|
|
6
|
-
Author: ParisNeo
|
|
7
|
-
Author-email: parisneoai@gmail.com
|
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
-
Classifier: Operating System :: OS Independent
|
|
11
|
-
Description-Content-Type: text/markdown
|
|
12
|
-
License-File: LICENSE
|
|
13
|
-
Requires-Dist: requests
|
|
14
|
-
|
|
15
|
-
# lollms_client
|
|
16
|
-
|
|
17
|
-
[](https://pypi.org/project/lollms-client/) [](https://pypi.org/project/lollms-client/) [](https://www.apache.org/licenses/LICENSE-2.0)
|
|
18
|
-
|
|
19
|
-
Welcome to the lollms_client repository! This library is built by [ParisNeo](https://github.com/ParisNeo) and provides a convenient way to interact with the lollms (Lord Of Large Language Models) API. It is available on [PyPI](https://pypi.org/project/lollms-client/) and distributed under the Apache 2.0 License.
|
|
20
|
-
|
|
21
|
-
## Installation
|
|
22
|
-
|
|
23
|
-
To install the library from PyPI using `pip`, run:
|
|
24
|
-
|
|
25
|
-
```
|
|
26
|
-
pip install lollms-client
|
|
27
|
-
```
|
|
28
|
-
|
|
29
|
-
## Usage
|
|
30
|
-
|
|
31
|
-
To use the lollms_client, first import the necessary classes:
|
|
32
|
-
|
|
33
|
-
```python
|
|
34
|
-
from lollms_client import LollmsClient
|
|
35
|
-
|
|
36
|
-
# Initialize the LollmsClient instance this uses the default lollms localhost service http://localhost:9600
|
|
37
|
-
lc = LollmsClient()
|
|
38
|
-
# You can also use a different host and port number if you please
|
|
39
|
-
lc = LollmsClient("http://some.other.server:9600")
|
|
40
|
-
# You can also use a local or remote ollama server
|
|
41
|
-
lc = LollmsClient(model_name="mistral-nemo:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
|
|
42
|
-
# You can also use a local or remote openai server (you can either set your key as an environment variable or pass it here)
|
|
43
|
-
lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
|
|
44
|
-
```
|
|
45
|
-
|
|
46
|
-
### Text Generation
|
|
47
|
-
|
|
48
|
-
Use `generate()` for generating text from the lollms API.
|
|
49
|
-
|
|
50
|
-
```python
|
|
51
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
52
|
-
print(response)
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
### List Mounted Personalities (only on lollms)
|
|
57
|
-
|
|
58
|
-
List mounted personalities of the lollms API with the `listMountedPersonalities()` method.
|
|
59
|
-
|
|
60
|
-
```python
|
|
61
|
-
response = lc.listMountedPersonalities()
|
|
62
|
-
print(response)
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
### List Models
|
|
66
|
-
|
|
67
|
-
List available models of the lollms API with the `listModels()` method.
|
|
68
|
-
|
|
69
|
-
```python
|
|
70
|
-
response = lc.listModels()
|
|
71
|
-
print(response)
|
|
72
|
-
```
|
|
73
|
-
|
|
74
|
-
## Complete Example
|
|
75
|
-
|
|
76
|
-
```python
|
|
77
|
-
from lollms_client import LollmsClient
|
|
78
|
-
|
|
79
|
-
# Initialize the LollmsClient instance
|
|
80
|
-
lc = LollmsClient()
|
|
81
|
-
|
|
82
|
-
# Generate Text
|
|
83
|
-
response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
84
|
-
print(response)
|
|
85
|
-
|
|
86
|
-
# List Mounted Personalities
|
|
87
|
-
response = lc.listMountedPersonalities()
|
|
88
|
-
print(response)
|
|
89
|
-
|
|
90
|
-
# List Models
|
|
91
|
-
response = lc.listModels()
|
|
92
|
-
print(response)
|
|
93
|
-
```
|
|
94
|
-
|
|
95
|
-
Feel free to contribute to the project by submitting issues or pull requests. Follow [ParisNeo](https://github.com/ParisNeo) on [GitHub](https://github.com/ParisNeo), [Twitter](https://twitter.com/ParisNeo_AI), [Discord](https://discord.gg/BDxacQmv), [Sub-Reddit](r/lollms), and [Instagram](https://www.instagram.com/spacenerduino/) for updates and news.
|
|
96
|
-
|
|
97
|
-
Happy coding!
|
|
File without changes
|
|
File without changes
|
|
File without changes
|