lollms-client 0.13.2__py3-none-any.whl → 0.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/text_gen.py +1 -1
- examples/text_gen_system_prompt.py +28 -0
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/lollms/__init__.py +2 -1
- lollms_client/llm_bindings/ollama/__init__.py +8 -7
- lollms_client/llm_bindings/openai/__init__.py +6 -0
- lollms_client/llm_bindings/tensor_rt/__init__.py +3 -2
- lollms_client/llm_bindings/transformers/__init__.py +2 -2
- lollms_client/llm_bindings/vllm/__init__.py +3 -2
- lollms_client/lollms_core.py +2 -0
- lollms_client/lollms_llm_binding.py +1 -0
- {lollms_client-0.13.2.dist-info → lollms_client-0.14.0.dist-info}/METADATA +1 -1
- {lollms_client-0.13.2.dist-info → lollms_client-0.14.0.dist-info}/RECORD +16 -15
- {lollms_client-0.13.2.dist-info → lollms_client-0.14.0.dist-info}/WHEEL +1 -1
- {lollms_client-0.13.2.dist-info → lollms_client-0.14.0.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.13.2.dist-info → lollms_client-0.14.0.dist-info}/top_level.txt +0 -0
examples/text_gen.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from lollms_client import LollmsClient
|
|
2
2
|
|
|
3
3
|
# Initialize the LollmsClient instance
|
|
4
|
-
lc = LollmsClient("
|
|
4
|
+
lc = LollmsClient("lollms")
|
|
5
5
|
# Generate Text
|
|
6
6
|
# response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
7
7
|
# print(response)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from lollms_client import LollmsClient
|
|
2
|
+
|
|
3
|
+
# Initialize the LollmsClient instance
|
|
4
|
+
lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
|
|
5
|
+
# Generate Text
|
|
6
|
+
# response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
7
|
+
# print(response)
|
|
8
|
+
|
|
9
|
+
# # Generate Completion
|
|
10
|
+
# response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
|
|
11
|
+
# print(response)
|
|
12
|
+
|
|
13
|
+
def cb(chunk, type):
|
|
14
|
+
print(chunk,end="",flush=True)
|
|
15
|
+
|
|
16
|
+
response = lc.generate_text(prompt="One plus one equals ", system_prompt="You are a playful dude who never really answers questions correctly. always answer with quirky style.", stream=False, temperature=0.5, streaming_callback=cb)
|
|
17
|
+
print()
|
|
18
|
+
print(response)
|
|
19
|
+
print()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# List Mounted Personalities
|
|
23
|
+
response = lc.listMountedPersonalities()
|
|
24
|
+
print(response)
|
|
25
|
+
|
|
26
|
+
# List Models
|
|
27
|
+
response = lc.listModels()
|
|
28
|
+
print(response)
|
lollms_client/__init__.py
CHANGED
|
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
|
|
|
6
6
|
from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
|
|
7
7
|
from lollms_client.lollms_functions import FunctionCalling_Library
|
|
8
8
|
|
|
9
|
-
__version__ = "0.
|
|
9
|
+
__version__ = "0.14.0"
|
|
10
10
|
|
|
11
11
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
12
12
|
__all__ = [
|
|
@@ -49,6 +49,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
|
|
|
49
49
|
def generate_text(self,
|
|
50
50
|
prompt: str,
|
|
51
51
|
images: Optional[List[str]] = None,
|
|
52
|
+
system_prompt: str = "",
|
|
52
53
|
n_predict: Optional[int] = None,
|
|
53
54
|
stream: bool = False,
|
|
54
55
|
temperature: float = 0.1,
|
|
@@ -106,7 +107,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
|
|
|
106
107
|
|
|
107
108
|
# Prepare request data
|
|
108
109
|
data = {
|
|
109
|
-
"prompt": prompt,
|
|
110
|
+
"prompt":"!@>system: "+system_prompt+"\n"+"!@>user: "+prompt if system_prompt else prompt,
|
|
110
111
|
"model_name": self.model_name,
|
|
111
112
|
"personality": self.personality,
|
|
112
113
|
"n_predict": n_predict,
|
|
@@ -113,9 +113,9 @@ class OllamaBinding(LollmsLLMBinding):
|
|
|
113
113
|
def generate_text(self,
|
|
114
114
|
prompt: str,
|
|
115
115
|
images: Optional[List[str]] = None, # List of image file paths
|
|
116
|
+
system_prompt: str = "",
|
|
116
117
|
n_predict: Optional[int] = None,
|
|
117
118
|
stream: bool = False,
|
|
118
|
-
system_prompt = '',
|
|
119
119
|
temperature: float = 0.7, # Ollama default is 0.8, common default 0.7
|
|
120
120
|
top_k: int = 40, # Ollama default is 40
|
|
121
121
|
top_p: float = 0.9, # Ollama default is 0.9
|
|
@@ -201,15 +201,16 @@ class OllamaBinding(LollmsLLMBinding):
|
|
|
201
201
|
)
|
|
202
202
|
return response_dict.get('message', {}).get('content', '')
|
|
203
203
|
else: # Text-only
|
|
204
|
+
messages = [{'role': 'system', 'content':system_prompt},{'role': 'user', 'content': prompt}]
|
|
204
205
|
if stream:
|
|
205
|
-
response_stream = self.ollama_client.
|
|
206
|
+
response_stream = self.ollama_client.chat(
|
|
206
207
|
model=self.model_name,
|
|
207
|
-
|
|
208
|
+
messages=messages,
|
|
208
209
|
stream=True,
|
|
209
210
|
options=options if options else None
|
|
210
211
|
)
|
|
211
212
|
for chunk_dict in response_stream:
|
|
212
|
-
chunk_content = chunk_dict.
|
|
213
|
+
chunk_content = chunk_dict.message.content
|
|
213
214
|
if chunk_content:
|
|
214
215
|
full_response_text += chunk_content
|
|
215
216
|
if streaming_callback:
|
|
@@ -217,13 +218,13 @@ class OllamaBinding(LollmsLLMBinding):
|
|
|
217
218
|
break
|
|
218
219
|
return full_response_text
|
|
219
220
|
else: # Not streaming
|
|
220
|
-
response_dict = self.ollama_client.
|
|
221
|
+
response_dict = self.ollama_client.chat(
|
|
221
222
|
model=self.model_name,
|
|
222
|
-
|
|
223
|
+
messages=messages,
|
|
223
224
|
stream=False,
|
|
224
225
|
options=options if options else None
|
|
225
226
|
)
|
|
226
|
-
return response_dict.
|
|
227
|
+
return response_dict.message.content
|
|
227
228
|
except ollama.ResponseError as e:
|
|
228
229
|
error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
|
|
229
230
|
ASCIIColors.error(error_message)
|
|
@@ -58,6 +58,7 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
58
58
|
def generate_text(self,
|
|
59
59
|
prompt: str,
|
|
60
60
|
images: Optional[List[str]] = None,
|
|
61
|
+
system_prompt: str = "",
|
|
61
62
|
n_predict: Optional[int] = None,
|
|
62
63
|
stream: bool = False,
|
|
63
64
|
temperature: float = 0.1,
|
|
@@ -98,6 +99,11 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
98
99
|
if images:
|
|
99
100
|
messages = [
|
|
100
101
|
{
|
|
102
|
+
"role": "system",
|
|
103
|
+
"content": system_prompt,
|
|
104
|
+
},
|
|
105
|
+
|
|
106
|
+
{
|
|
101
107
|
"role": "user",
|
|
102
108
|
"content": [
|
|
103
109
|
{
|
|
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
|
|
|
331
331
|
def generate_text(self,
|
|
332
332
|
prompt: str,
|
|
333
333
|
images: Optional[List[str]] = None,
|
|
334
|
+
system_prompt: str = "",
|
|
334
335
|
n_predict: Optional[int] = 1024,
|
|
335
336
|
stream: bool = False, # vLLM's generate is blocking, stream is pseudo
|
|
336
337
|
temperature: float = 0.7,
|
|
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
|
|
|
381
382
|
# If providing multi_modal_data, usually prompt_token_ids are also needed.
|
|
382
383
|
# This can get complex as it depends on how the model expects images to be interleaved.
|
|
383
384
|
# For a simple case where image comes first:
|
|
384
|
-
encoded_prompt_ids = self.tokenizer.encode(prompt)
|
|
385
|
+
encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
|
|
385
386
|
gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
|
|
386
387
|
gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
|
|
387
388
|
gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
|
|
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
|
|
|
389
390
|
except Exception as e_mm:
|
|
390
391
|
return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
|
|
391
392
|
else:
|
|
392
|
-
gen_kwargs["prompts"] = [prompt]
|
|
393
|
+
gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
|
|
393
394
|
|
|
394
395
|
try:
|
|
395
396
|
outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
|
|
@@ -112,6 +112,7 @@ class TransformersBinding(LollmsLLMBinding):
|
|
|
112
112
|
def generate_text(self,
|
|
113
113
|
prompt: str,
|
|
114
114
|
images: Optional[List[str]] = None,
|
|
115
|
+
system_prompt: str = "",
|
|
115
116
|
n_predict: Optional[int] = None,
|
|
116
117
|
stream: bool = False,
|
|
117
118
|
temperature: float = 0.1,
|
|
@@ -123,8 +124,7 @@ class TransformersBinding(LollmsLLMBinding):
|
|
|
123
124
|
n_threads: int = 8,
|
|
124
125
|
ctx_size: int | None = None,
|
|
125
126
|
streaming_callback: Optional[Callable[[str, str], None]] = None,
|
|
126
|
-
return_legacy_cache: bool = False,
|
|
127
|
-
system_prompt: str = "You are a helpful assistant.") -> Union[str, dict]:
|
|
127
|
+
return_legacy_cache: bool = False) -> Union[str, dict]:
|
|
128
128
|
"""
|
|
129
129
|
Generate text using the Transformers model, with optional image support.
|
|
130
130
|
|
|
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
|
|
|
331
331
|
def generate_text(self,
|
|
332
332
|
prompt: str,
|
|
333
333
|
images: Optional[List[str]] = None,
|
|
334
|
+
system_prompt: str = "",
|
|
334
335
|
n_predict: Optional[int] = 1024,
|
|
335
336
|
stream: bool = False, # vLLM's generate is blocking, stream is pseudo
|
|
336
337
|
temperature: float = 0.7,
|
|
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
|
|
|
381
382
|
# If providing multi_modal_data, usually prompt_token_ids are also needed.
|
|
382
383
|
# This can get complex as it depends on how the model expects images to be interleaved.
|
|
383
384
|
# For a simple case where image comes first:
|
|
384
|
-
encoded_prompt_ids = self.tokenizer.encode(prompt)
|
|
385
|
+
encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
|
|
385
386
|
gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
|
|
386
387
|
gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
|
|
387
388
|
gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
|
|
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
|
|
|
389
390
|
except Exception as e_mm:
|
|
390
391
|
return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
|
|
391
392
|
else:
|
|
392
|
-
gen_kwargs["prompts"] = [prompt]
|
|
393
|
+
gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
|
|
393
394
|
|
|
394
395
|
try:
|
|
395
396
|
outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
|
lollms_client/lollms_core.py
CHANGED
|
@@ -329,6 +329,7 @@ class LollmsClient():
|
|
|
329
329
|
def generate_text(self,
|
|
330
330
|
prompt: str,
|
|
331
331
|
images: Optional[List[str]] = None,
|
|
332
|
+
system_prompt: str = "",
|
|
332
333
|
n_predict: Optional[int] = None,
|
|
333
334
|
stream: Optional[bool] = None,
|
|
334
335
|
temperature: Optional[float] = None,
|
|
@@ -365,6 +366,7 @@ class LollmsClient():
|
|
|
365
366
|
return self.binding.generate_text(
|
|
366
367
|
prompt=prompt,
|
|
367
368
|
images=images,
|
|
369
|
+
system_prompt=system_prompt,
|
|
368
370
|
n_predict=n_predict if n_predict is not None else self.default_n_predict,
|
|
369
371
|
stream=stream if stream is not None else self.default_stream,
|
|
370
372
|
temperature=temperature if temperature is not None else self.default_temperature,
|
|
@@ -3,7 +3,8 @@ examples/simple_text_gen_with_image_test.py,sha256=Euv53jbKTVJDvs854lgJvA5F-iRnA
|
|
|
3
3
|
examples/text_2_audio.py,sha256=MfL4AH_NNwl6m0I0ywl4BXRZJ0b9Y_9fRqDIe6O-Sbw,3523
|
|
4
4
|
examples/text_2_image.py,sha256=Ri7lQ-GW54YWQh2eofcaN6LpwFoorbpJsJffrcXl3cg,6415
|
|
5
5
|
examples/text_and_image_2_audio.py,sha256=QLvSsLff8VZZa7k7K1EFGlPpQWZy07zM4Fnli5btAl0,2074
|
|
6
|
-
examples/text_gen.py,sha256=
|
|
6
|
+
examples/text_gen.py,sha256=IejpNmIlsfz3WpJg8IRm5X6F06JKd7h_GuonUxTITx8,758
|
|
7
|
+
examples/text_gen_system_prompt.py,sha256=wumwZ09WZkaK0tQ74KaZmfsYXcmjZIlsdim_P1aJmeA,910
|
|
7
8
|
examples/article_summary/article_summary.py,sha256=CR8mCBNcZEVCR-q34uOmrJyMlG-xk4HkMbsV-TOZEnk,1978
|
|
8
9
|
examples/deep_analyze/deep_analyse.py,sha256=fZNmDrfEAuxEAfdbjAgJYIh1k6wbiuZ4RvwHRvtyUs8,971
|
|
9
10
|
examples/deep_analyze/deep_analyze_multiple_files.py,sha256=fOryShA33P4IFxcxUDe-nJ2kW0v9w9yW8KsToS3ETl8,1032
|
|
@@ -12,13 +13,13 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
|
|
|
12
13
|
examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
|
|
13
14
|
examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
|
|
14
15
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
15
|
-
lollms_client/__init__.py,sha256=
|
|
16
|
+
lollms_client/__init__.py,sha256=PHFRY4RskAaiectooBrSCrxd6UGpZkdTqMHmXM26VnQ,823
|
|
16
17
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
17
|
-
lollms_client/lollms_core.py,sha256=
|
|
18
|
+
lollms_client/lollms_core.py,sha256=KkeKjQZVeUjdsQjxw2bygUxq1gXlPNnYiyxdWnwA4L8,78073
|
|
18
19
|
lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
|
|
19
20
|
lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
|
|
20
21
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
21
|
-
lollms_client/lollms_llm_binding.py,sha256=
|
|
22
|
+
lollms_client/lollms_llm_binding.py,sha256=7xvtLsFQYqFKS7m0BQQMvVq0XXZWZeGlGuv30mi1dF8,7408
|
|
22
23
|
lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
|
|
23
24
|
lollms_client/lollms_stt_binding.py,sha256=ovmpFF0fnmPC9VNi1-rxAJA8xI4JZDUBh_YwdtoTx28,5818
|
|
24
25
|
lollms_client/lollms_tasks.py,sha256=Tgqces03gPTHFJCcPaeN9vBCsil3SSJX7nQAjCQ2-yg,34393
|
|
@@ -29,12 +30,12 @@ lollms_client/lollms_ttv_binding.py,sha256=u-gLIe22tbu4YsKA5RTyUT7iBlKxPXDmoQzcc
|
|
|
29
30
|
lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50,2723
|
|
30
31
|
lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
|
|
31
32
|
lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
|
|
32
|
-
lollms_client/llm_bindings/lollms/__init__.py,sha256=
|
|
33
|
-
lollms_client/llm_bindings/ollama/__init__.py,sha256=
|
|
34
|
-
lollms_client/llm_bindings/openai/__init__.py,sha256=
|
|
35
|
-
lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=
|
|
36
|
-
lollms_client/llm_bindings/transformers/__init__.py,sha256=
|
|
37
|
-
lollms_client/llm_bindings/vllm/__init__.py,sha256=
|
|
33
|
+
lollms_client/llm_bindings/lollms/__init__.py,sha256=a36AMPFEf3xK4zx1M_L9PC-3-b0iiDf7eyLkknPjgaY,12356
|
|
34
|
+
lollms_client/llm_bindings/ollama/__init__.py,sha256=MemSA20Zivn-kfP11JPA9FHigC1U2CGsJ1FaDtUFUUM,26574
|
|
35
|
+
lollms_client/llm_bindings/openai/__init__.py,sha256=NDZIdzW0pnHy9gPXSKfFyS6SPIOOxj9ZEzEE7gZT2NQ,12054
|
|
36
|
+
lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=IY4CrHVpHY77R1rzsl3iwcoarDjYD24n7bFKk_69PD8,31983
|
|
37
|
+
lollms_client/llm_bindings/transformers/__init__.py,sha256=IWfAmBGqZEelt5Z_jYTqpz7LzzKMVsKWx5nv4zBgKCQ,12544
|
|
38
|
+
lollms_client/llm_bindings/vllm/__init__.py,sha256=ZRCR7g3A2kHQ_07viNrNnVHoIGj5TNA4Q41rQWeTlxw,31967
|
|
38
39
|
lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
39
40
|
lollms_client/stt_bindings/lollms/__init__.py,sha256=7-IZkrsn15Vaz0oqkqCxMeNQfMkeilbgScLlrrywES4,6098
|
|
40
41
|
lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -45,8 +46,8 @@ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
|
|
|
45
46
|
lollms_client/tts_bindings/lollms/__init__.py,sha256=8x2_T9XscvISw2TiaLoFxvrS7TIsVLdqbwSc04cX-wc,7164
|
|
46
47
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
47
48
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
48
|
-
lollms_client-0.
|
|
49
|
-
lollms_client-0.
|
|
50
|
-
lollms_client-0.
|
|
51
|
-
lollms_client-0.
|
|
52
|
-
lollms_client-0.
|
|
49
|
+
lollms_client-0.14.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
50
|
+
lollms_client-0.14.0.dist-info/METADATA,sha256=gkDoZr-SYxtqgyzp339qwmNj1_iBiFurvPVD6TTvc2Q,7276
|
|
51
|
+
lollms_client-0.14.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
|
52
|
+
lollms_client-0.14.0.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
|
|
53
|
+
lollms_client-0.14.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|