lollms-client 0.13.2__tar.gz → 0.14.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (58) hide show
  1. {lollms_client-0.13.2 → lollms_client-0.14.0}/PKG-INFO +1 -1
  2. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/text_gen.py +1 -1
  3. lollms_client-0.14.0/examples/text_gen_system_prompt.py +28 -0
  4. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/__init__.py +1 -1
  5. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/lollms/__init__.py +2 -1
  6. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/ollama/__init__.py +8 -7
  7. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/openai/__init__.py +6 -0
  8. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/tensor_rt/__init__.py +3 -2
  9. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/transformers/__init__.py +2 -2
  10. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/vllm/__init__.py +3 -2
  11. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_core.py +2 -0
  12. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_llm_binding.py +1 -0
  13. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client.egg-info/PKG-INFO +1 -1
  14. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client.egg-info/SOURCES.txt +1 -0
  15. {lollms_client-0.13.2 → lollms_client-0.14.0}/LICENSE +0 -0
  16. {lollms_client-0.13.2 → lollms_client-0.14.0}/README.md +0 -0
  17. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/article_summary/article_summary.py +0 -0
  18. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/deep_analyze/deep_analyse.py +0 -0
  19. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  20. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/function_call/functions_call_with images.py +0 -0
  21. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/personality_test/chat_test.py +0 -0
  22. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/personality_test/chat_with_aristotle.py +0 -0
  23. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/personality_test/tesks_test.py +0 -0
  24. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/simple_text_gen_test.py +0 -0
  25. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/simple_text_gen_with_image_test.py +0 -0
  26. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/test_local_models/local_chat.py +0 -0
  27. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/text_2_audio.py +0 -0
  28. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/text_2_image.py +0 -0
  29. {lollms_client-0.13.2 → lollms_client-0.14.0}/examples/text_and_image_2_audio.py +0 -0
  30. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/llm_bindings/__init__.py +0 -0
  31. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_config.py +0 -0
  32. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_discussion.py +0 -0
  33. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_functions.py +0 -0
  34. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_js_analyzer.py +0 -0
  35. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_python_analyzer.py +0 -0
  36. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_stt_binding.py +0 -0
  37. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_tasks.py +0 -0
  38. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_tti_binding.py +0 -0
  39. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_ttm_binding.py +0 -0
  40. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_tts_binding.py +0 -0
  41. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_ttv_binding.py +0 -0
  42. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_types.py +0 -0
  43. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/lollms_utilities.py +0 -0
  44. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/stt_bindings/__init__.py +0 -0
  45. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  46. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/tti_bindings/__init__.py +0 -0
  47. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  48. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/ttm_bindings/__init__.py +0 -0
  49. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  50. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/tts_bindings/__init__.py +0 -0
  51. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  52. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/ttv_bindings/__init__.py +0 -0
  53. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  54. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client.egg-info/dependency_links.txt +0 -0
  55. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client.egg-info/requires.txt +0 -0
  56. {lollms_client-0.13.2 → lollms_client-0.14.0}/lollms_client.egg-info/top_level.txt +0 -0
  57. {lollms_client-0.13.2 → lollms_client-0.14.0}/pyproject.toml +0 -0
  58. {lollms_client-0.13.2 → lollms_client-0.14.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.2
3
+ Version: 0.14.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,7 +1,7 @@
1
1
  from lollms_client import LollmsClient
2
2
 
3
3
  # Initialize the LollmsClient instance
4
- lc = LollmsClient("http://localhost:9600")
4
+ lc = LollmsClient("lollms")
5
5
  # Generate Text
6
6
  # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
7
7
  # print(response)
@@ -0,0 +1,28 @@
1
+ from lollms_client import LollmsClient
2
+
3
+ # Initialize the LollmsClient instance
4
+ lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
5
+ # Generate Text
6
+ # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
7
+ # print(response)
8
+
9
+ # # Generate Completion
10
+ # response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
11
+ # print(response)
12
+
13
+ def cb(chunk, type):
14
+ print(chunk,end="",flush=True)
15
+
16
+ response = lc.generate_text(prompt="One plus one equals ", system_prompt="You are a playful dude who never really answers questions correctly. always answer with quirky style.", stream=False, temperature=0.5, streaming_callback=cb)
17
+ print()
18
+ print(response)
19
+ print()
20
+
21
+
22
+ # List Mounted Personalities
23
+ response = lc.listMountedPersonalities()
24
+ print(response)
25
+
26
+ # List Models
27
+ response = lc.listModels()
28
+ print(response)
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
6
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
8
 
9
- __version__ = "0.13.2"
9
+ __version__ = "0.14.0"
10
10
 
11
11
  # Optionally, you could define __all__ if you want to be explicit about exports
12
12
  __all__ = [
@@ -49,6 +49,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
49
49
  def generate_text(self,
50
50
  prompt: str,
51
51
  images: Optional[List[str]] = None,
52
+ system_prompt: str = "",
52
53
  n_predict: Optional[int] = None,
53
54
  stream: bool = False,
54
55
  temperature: float = 0.1,
@@ -106,7 +107,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
106
107
 
107
108
  # Prepare request data
108
109
  data = {
109
- "prompt": prompt,
110
+ "prompt":"!@>system: "+system_prompt+"\n"+"!@>user: "+prompt if system_prompt else prompt,
110
111
  "model_name": self.model_name,
111
112
  "personality": self.personality,
112
113
  "n_predict": n_predict,
@@ -113,9 +113,9 @@ class OllamaBinding(LollmsLLMBinding):
113
113
  def generate_text(self,
114
114
  prompt: str,
115
115
  images: Optional[List[str]] = None, # List of image file paths
116
+ system_prompt: str = "",
116
117
  n_predict: Optional[int] = None,
117
118
  stream: bool = False,
118
- system_prompt = '',
119
119
  temperature: float = 0.7, # Ollama default is 0.8, common default 0.7
120
120
  top_k: int = 40, # Ollama default is 40
121
121
  top_p: float = 0.9, # Ollama default is 0.9
@@ -201,15 +201,16 @@ class OllamaBinding(LollmsLLMBinding):
201
201
  )
202
202
  return response_dict.get('message', {}).get('content', '')
203
203
  else: # Text-only
204
+ messages = [{'role': 'system', 'content':system_prompt},{'role': 'user', 'content': prompt}]
204
205
  if stream:
205
- response_stream = self.ollama_client.generate(
206
+ response_stream = self.ollama_client.chat(
206
207
  model=self.model_name,
207
- prompt=prompt,
208
+ messages=messages,
208
209
  stream=True,
209
210
  options=options if options else None
210
211
  )
211
212
  for chunk_dict in response_stream:
212
- chunk_content = chunk_dict.get('response', '')
213
+ chunk_content = chunk_dict.message.content
213
214
  if chunk_content:
214
215
  full_response_text += chunk_content
215
216
  if streaming_callback:
@@ -217,13 +218,13 @@ class OllamaBinding(LollmsLLMBinding):
217
218
  break
218
219
  return full_response_text
219
220
  else: # Not streaming
220
- response_dict = self.ollama_client.generate(
221
+ response_dict = self.ollama_client.chat(
221
222
  model=self.model_name,
222
- prompt=prompt,
223
+ messages=messages,
223
224
  stream=False,
224
225
  options=options if options else None
225
226
  )
226
- return response_dict.get('response', '')
227
+ return response_dict.message.content
227
228
  except ollama.ResponseError as e:
228
229
  error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
229
230
  ASCIIColors.error(error_message)
@@ -58,6 +58,7 @@ class OpenAIBinding(LollmsLLMBinding):
58
58
  def generate_text(self,
59
59
  prompt: str,
60
60
  images: Optional[List[str]] = None,
61
+ system_prompt: str = "",
61
62
  n_predict: Optional[int] = None,
62
63
  stream: bool = False,
63
64
  temperature: float = 0.1,
@@ -98,6 +99,11 @@ class OpenAIBinding(LollmsLLMBinding):
98
99
  if images:
99
100
  messages = [
100
101
  {
102
+ "role": "system",
103
+ "content": system_prompt,
104
+ },
105
+
106
+ {
101
107
  "role": "user",
102
108
  "content": [
103
109
  {
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
331
331
  def generate_text(self,
332
332
  prompt: str,
333
333
  images: Optional[List[str]] = None,
334
+ system_prompt: str = "",
334
335
  n_predict: Optional[int] = 1024,
335
336
  stream: bool = False, # vLLM's generate is blocking, stream is pseudo
336
337
  temperature: float = 0.7,
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
381
382
  # If providing multi_modal_data, usually prompt_token_ids are also needed.
382
383
  # This can get complex as it depends on how the model expects images to be interleaved.
383
384
  # For a simple case where image comes first:
384
- encoded_prompt_ids = self.tokenizer.encode(prompt)
385
+ encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
385
386
  gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
386
387
  gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
387
388
  gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
389
390
  except Exception as e_mm:
390
391
  return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
391
392
  else:
392
- gen_kwargs["prompts"] = [prompt]
393
+ gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
393
394
 
394
395
  try:
395
396
  outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
@@ -112,6 +112,7 @@ class TransformersBinding(LollmsLLMBinding):
112
112
  def generate_text(self,
113
113
  prompt: str,
114
114
  images: Optional[List[str]] = None,
115
+ system_prompt: str = "",
115
116
  n_predict: Optional[int] = None,
116
117
  stream: bool = False,
117
118
  temperature: float = 0.1,
@@ -123,8 +124,7 @@ class TransformersBinding(LollmsLLMBinding):
123
124
  n_threads: int = 8,
124
125
  ctx_size: int | None = None,
125
126
  streaming_callback: Optional[Callable[[str, str], None]] = None,
126
- return_legacy_cache: bool = False,
127
- system_prompt: str = "You are a helpful assistant.") -> Union[str, dict]:
127
+ return_legacy_cache: bool = False) -> Union[str, dict]:
128
128
  """
129
129
  Generate text using the Transformers model, with optional image support.
130
130
 
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
331
331
  def generate_text(self,
332
332
  prompt: str,
333
333
  images: Optional[List[str]] = None,
334
+ system_prompt: str = "",
334
335
  n_predict: Optional[int] = 1024,
335
336
  stream: bool = False, # vLLM's generate is blocking, stream is pseudo
336
337
  temperature: float = 0.7,
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
381
382
  # If providing multi_modal_data, usually prompt_token_ids are also needed.
382
383
  # This can get complex as it depends on how the model expects images to be interleaved.
383
384
  # For a simple case where image comes first:
384
- encoded_prompt_ids = self.tokenizer.encode(prompt)
385
+ encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
385
386
  gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
386
387
  gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
387
388
  gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
389
390
  except Exception as e_mm:
390
391
  return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
391
392
  else:
392
- gen_kwargs["prompts"] = [prompt]
393
+ gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
393
394
 
394
395
  try:
395
396
  outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
@@ -329,6 +329,7 @@ class LollmsClient():
329
329
  def generate_text(self,
330
330
  prompt: str,
331
331
  images: Optional[List[str]] = None,
332
+ system_prompt: str = "",
332
333
  n_predict: Optional[int] = None,
333
334
  stream: Optional[bool] = None,
334
335
  temperature: Optional[float] = None,
@@ -365,6 +366,7 @@ class LollmsClient():
365
366
  return self.binding.generate_text(
366
367
  prompt=prompt,
367
368
  images=images,
369
+ system_prompt=system_prompt,
368
370
  n_predict=n_predict if n_predict is not None else self.default_n_predict,
369
371
  stream=stream if stream is not None else self.default_stream,
370
372
  temperature=temperature if temperature is not None else self.default_temperature,
@@ -32,6 +32,7 @@ class LollmsLLMBinding(ABC):
32
32
  def generate_text(self,
33
33
  prompt: str,
34
34
  images: Optional[List[str]] = None,
35
+ system_prompt: str = "",
35
36
  n_predict: Optional[int] = None,
36
37
  stream: bool = False,
37
38
  temperature: float = 0.1,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.2
3
+ Version: 0.14.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -7,6 +7,7 @@ examples/text_2_audio.py
7
7
  examples/text_2_image.py
8
8
  examples/text_and_image_2_audio.py
9
9
  examples/text_gen.py
10
+ examples/text_gen_system_prompt.py
10
11
  examples/article_summary/article_summary.py
11
12
  examples/deep_analyze/deep_analyse.py
12
13
  examples/deep_analyze/deep_analyze_multiple_files.py
File without changes
File without changes
File without changes