lollms-client 0.9.1__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -0,0 +1,210 @@
1
+ # lollms_binding.py
2
+ from abc import ABC, abstractmethod
3
+ import importlib
4
+ from pathlib import Path
5
+ from typing import Optional, Callable, List
6
+ from lollms_client.lollms_types import ELF_COMPLETION_FORMAT
7
+ import importlib
8
+ from pathlib import Path
9
+ from typing import Optional
10
+ from ascii_colors import trace_exception
11
+
12
+ class LollmsLLMBinding(ABC):
13
+ """Abstract base class for all LOLLMS LLM bindings"""
14
+
15
+ def __init__(self,
16
+ host_address: Optional[str] = None,
17
+ model_name: str = "",
18
+ service_key: Optional[str] = None,
19
+ verify_ssl_certificate: bool = True,
20
+ default_completion_format: ELF_COMPLETION_FORMAT = ELF_COMPLETION_FORMAT.Chat
21
+ ):
22
+ """
23
+ Initialize the LollmsLLMBinding base class.
24
+
25
+ Args:
26
+ host_address (Optional[str]): The host address for the service. Defaults to None.
27
+ model_name (str): The name of the model to use. Defaults to empty string.
28
+ service_key (Optional[str]): Authentication key for the service. Defaults to None.
29
+ verify_ssl_certificate (bool): Whether to verify SSL certificates. Defaults to True.
30
+ default_completion_format (ELF_COMPLETION_FORMAT): The completion format (Chat or Instruct)
31
+ """
32
+ if host_address is not None:
33
+ self.host_address = host_address[:-1] if host_address.endswith("/") else host_address
34
+ else:
35
+ self.host_address = None
36
+ self.model_name = model_name
37
+ self.service_key = service_key
38
+ self.verify_ssl_certificate = verify_ssl_certificate
39
+ self.default_completion_format = default_completion_format
40
+
41
+ @abstractmethod
42
+ def generate_text(self,
43
+ prompt: str,
44
+ images: Optional[List[str]] = None,
45
+ n_predict: Optional[int] = None,
46
+ stream: bool = False,
47
+ temperature: float = 0.1,
48
+ top_k: int = 50,
49
+ top_p: float = 0.95,
50
+ repeat_penalty: float = 0.8,
51
+ repeat_last_n: int = 40,
52
+ seed: Optional[int] = None,
53
+ n_threads: int = 8,
54
+ streaming_callback: Optional[Callable[[str, str], None]] = None) -> str:
55
+ """
56
+ Generate text based on the provided prompt and parameters.
57
+
58
+ Args:
59
+ prompt (str): The input prompt for text generation.
60
+ images (Optional[List[str]]): List of image file paths for multimodal generation.
61
+ n_predict (Optional[int]): Maximum number of tokens to generate.
62
+ stream (bool): Whether to stream the output. Defaults to False.
63
+ temperature (float): Sampling temperature. Defaults to 0.1.
64
+ top_k (int): Top-k sampling parameter. Defaults to 50.
65
+ top_p (float): Top-p sampling parameter. Defaults to 0.95.
66
+ repeat_penalty (float): Penalty for repeated tokens. Defaults to 0.8.
67
+ repeat_last_n (int): Number of previous tokens to consider for repeat penalty. Defaults to 40.
68
+ seed (Optional[int]): Random seed for generation.
69
+ n_threads (int): Number of threads to use. Defaults to 8.
70
+ streaming_callback (Optional[Callable[[str, str], None]]): Callback function for streaming output.
71
+ - First parameter (str): The chunk of text received.
72
+ - Second parameter (str): The message type (e.g., MSG_TYPE.MSG_TYPE_CHUNK).
73
+
74
+ Returns:
75
+ str: Generated text or error dictionary if failed.
76
+ """
77
+ pass
78
+
79
+ @abstractmethod
80
+ def tokenize(self, text: str) -> list:
81
+ """
82
+ Tokenize the input text into a list of tokens.
83
+
84
+ Args:
85
+ text (str): The text to tokenize.
86
+
87
+ Returns:
88
+ list: List of tokens.
89
+ """
90
+ pass
91
+
92
+ @abstractmethod
93
+ def detokenize(self, tokens: list) -> str:
94
+ """
95
+ Convert a list of tokens back to text.
96
+
97
+ Args:
98
+ tokens (list): List of tokens to detokenize.
99
+
100
+ Returns:
101
+ str: Detokenized text.
102
+ """
103
+ pass
104
+
105
+ @abstractmethod
106
+ def embed(self, text: str, **kwargs) -> list:
107
+ """
108
+ Get embeddings for the input text using Ollama API
109
+
110
+ Args:
111
+ text (str or List[str]): Input text to embed
112
+ **kwargs: Additional arguments like model, truncate, options, keep_alive
113
+
114
+ Returns:
115
+ dict: Response containing embeddings
116
+ """
117
+ pass
118
+
119
+ @abstractmethod
120
+ def get_model_info(self) -> dict:
121
+ """
122
+ Return information about the current model.
123
+
124
+ Returns:
125
+ dict: Model information dictionary.
126
+ """
127
+ pass
128
+
129
+ @abstractmethod
130
+ def listModels(self) -> list:
131
+ """Lists models"""
132
+ pass
133
+
134
+
135
+ @abstractmethod
136
+ def load_model(self, model_name: str) -> bool:
137
+ """
138
+ Load a specific model.
139
+
140
+ Args:
141
+ model_name (str): Name of the model to load.
142
+
143
+ Returns:
144
+ bool: True if model loaded successfully, False otherwise.
145
+ """
146
+ pass
147
+
148
+
149
+ class LollmsLLMBindingManager:
150
+ """Manages binding discovery and instantiation"""
151
+
152
+ def __init__(self, llm_bindings_dir: str = "llm_bindings"):
153
+ """
154
+ Initialize the LollmsLLMBindingManager.
155
+
156
+ Args:
157
+ llm_bindings_dir (str): Directory containing binding implementations. Defaults to "llm_bindings".
158
+ """
159
+ self.llm_bindings_dir = Path(llm_bindings_dir)
160
+ self.available_bindings = {}
161
+
162
+ def _load_binding(self, binding_name: str):
163
+ """Dynamically load a specific binding implementation from the llm bindings directory."""
164
+ binding_dir = self.llm_bindings_dir / binding_name
165
+ if binding_dir.is_dir() and (binding_dir / "__init__.py").exists():
166
+ try:
167
+ module = importlib.import_module(f"lollms_client.llm_bindings.{binding_name}")
168
+ binding_class = getattr(module, module.BindingName)
169
+ self.available_bindings[binding_name] = binding_class
170
+ except Exception as e:
171
+ trace_exception(e)
172
+ print(f"Failed to load binding {binding_name}: {str(e)}")
173
+
174
+ def create_binding(self,
175
+ binding_name: str,
176
+ host_address: Optional[str] = None,
177
+ model_name: str = "",
178
+ service_key: Optional[str] = None,
179
+ verify_ssl_certificate: bool = True,
180
+ personality: Optional[int] = None) -> Optional[LollmsLLMBinding]:
181
+ """
182
+ Create an instance of a specific binding.
183
+
184
+ Args:
185
+ binding_name (str): Name of the binding to create.
186
+ host_address (Optional[str]): Host address for the service.
187
+ model_name (str): Name of the model to use.
188
+ service_key (Optional[str]): Authentication key for the service.
189
+ verify_ssl_certificate (bool): Whether to verify SSL certificates.
190
+ personality (Optional[int]): Personality ID for LOLLMS binding.
191
+
192
+ Returns:
193
+ Optional[LollmsLLMBinding]: Binding instance or None if creation failed.
194
+ """
195
+ if binding_name not in self.available_bindings:
196
+ self._load_binding(binding_name)
197
+
198
+ binding_class = self.available_bindings.get(binding_name)
199
+ if binding_class:
200
+ return binding_class(host_address, model_name, service_key, verify_ssl_certificate, personality)
201
+ return None
202
+
203
+ def get_available_bindings(self) -> list[str]:
204
+ """
205
+ Return list of available binding names.
206
+
207
+ Returns:
208
+ list[str]: List of binding names.
209
+ """
210
+ return [binding_dir.name for binding_dir in self.llm_bindings_dir.iterdir() if binding_dir.is_dir() and (binding_dir / "__init__.py").exists()]
@@ -57,23 +57,23 @@ class TasksLibrary:
57
57
  callback(text,message_type)
58
58
  self.bot_says = bot_says
59
59
  return True
60
- def generate(self, prompt, max_size, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False, show_progress=False, stream= False ):
60
+ def generate(self, prompt, n_predict, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False, show_progress=False, stream= False ):
61
61
  ASCIIColors.info("Text generation started: Warming up")
62
62
  self.nb_received_tokens = 0
63
63
  self.bot_says = ""
64
64
  if debug:
65
65
  self.print_prompt("gen",prompt)
66
66
 
67
- bot_says = self.lollms.generate(
68
- prompt,
69
- max_size,
67
+ bot_says = self.lollms.generate_text(
68
+ prompt=prompt,
69
+ n_predict = n_predict,
70
70
  stream=stream,
71
71
  streaming_callback=partial(self.process, callback=callback, show_progress=show_progress),
72
- temperature= temperature if temperature is not None else self.lollms.temperature,
73
- top_k= top_k if top_k is not None else self.lollms.top_k ,
74
- top_p= top_p if top_p is not None else self.lollms.top_p ,
75
- repeat_penalty= repeat_penalty if repeat_penalty is not None else self.lollms.repeat_penalty,
76
- repeat_last_n= repeat_last_n if repeat_last_n is not None else self.lollms.repeat_last_n,
72
+ temperature= temperature,
73
+ top_k= top_k,
74
+ top_p= top_p,
75
+ repeat_penalty= repeat_penalty,
76
+ repeat_last_n= repeat_last_n,
77
77
  ).strip()
78
78
  return self.bot_says if stream else bot_says
79
79
 
@@ -81,10 +81,11 @@ class TasksLibrary:
81
81
  def fast_gen(
82
82
  self,
83
83
  prompt: str,
84
- max_generation_size: int=None,
84
+ n_predict: int=None,
85
85
  placeholders: dict = {},
86
86
  sacrifice: list = ["previous_discussion"],
87
87
  debug: bool = False,
88
+ stream: bool = False,
88
89
  callback=None,
89
90
  show_progress=False,
90
91
  temperature = None,
@@ -109,106 +110,39 @@ class TasksLibrary:
109
110
  Returns:
110
111
  - str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
111
112
  """
112
- if max_generation_size is None:
113
+ if n_predict is None:
113
114
  prompt_size = self.lollms.tokenize(prompt)
114
- max_generation_size = self.lollms.ctx_size - len(prompt_size)
115
+ n_predict = self.lollms.default_ctx_size - len(prompt_size)
115
116
 
116
117
  pr = PromptReshaper(prompt)
117
118
  prompt = pr.build(placeholders,
118
- self.lollms.tokenize,
119
- self.lollms.detokenize,
120
- self.lollms.ctx_size - max_generation_size,
119
+ self.lollms.binding.tokenize,
120
+ self.lollms.binding.detokenize,
121
+ self.lollms.default_ctx_size - n_predict,
121
122
  sacrifice
122
123
  )
123
- ntk = len(self.lollms.tokenize(prompt))
124
- max_generation_size = min(self.lollms.ctx_size - ntk, max_generation_size)
124
+ ntk = len(self.lollms.binding.tokenize(prompt))
125
+ n_predict = min(self.lollms.default_ctx_size - ntk, n_predict)
125
126
  # TODO : add show progress
126
127
 
127
- gen = self.generate(prompt, max_generation_size, temperature = temperature, top_k = top_k, top_p=top_p, repeat_penalty=repeat_penalty, repeat_last_n=repeat_last_n, callback=callback, show_progress=show_progress).strip().replace("</s>", "").replace("<s>", "")
128
+ gen = self.lollms.generate_text(
129
+ prompt=prompt,
130
+ n_predict = n_predict,
131
+ stream=stream,
132
+ streaming_callback=partial(self.process, callback=callback, show_progress=show_progress),
133
+ temperature= temperature,
134
+ top_k= top_k,
135
+ top_p= top_p,
136
+ repeat_penalty= repeat_penalty,
137
+ repeat_last_n= repeat_last_n
138
+ ).strip().replace("</s>", "").replace("<s>", "")
128
139
  if debug:
129
140
  self.print_prompt("prompt", prompt+gen)
130
141
 
131
142
  return gen
132
143
 
133
- def generate_with_images(self, prompt, images, max_size, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False, show_progress=False, stream=False ):
134
- ASCIIColors.info("Text generation started: Warming up")
135
- self.nb_received_tokens = 0
136
- self.bot_says = ""
137
- if debug:
138
- self.print_prompt("gen",prompt)
139
-
140
- bot_says = self.lollms.generate_with_images(
141
- prompt,
142
- images,
143
- max_size,
144
- stream=stream,
145
- streaming_callback= partial(self.process, callback=callback, show_progress=show_progress),
146
- temperature=self.lollms.temperature if temperature is None else temperature,
147
- top_k=self.lollms.top_k if top_k is None else top_k,
148
- top_p=self.lollms.top_p if top_p is None else top_p,
149
- repeat_penalty=self.lollms.repeat_penalty if repeat_penalty is None else repeat_penalty,
150
- repeat_last_n = self.lollms.repeat_last_n if repeat_last_n is None else repeat_last_n
151
- ).strip()
152
- return self.bot_says if stream else bot_says
153
144
 
154
145
 
155
- def fast_gen_with_images(self, prompt: str, images:list, max_generation_size: int=None, placeholders: dict = {}, sacrifice: list = ["previous_discussion"], debug: bool = False, callback=None, show_progress=False) -> str:
156
- """
157
- Fast way to generate text from text and images
158
-
159
- This method takes in a prompt, maximum generation size, optional placeholders, sacrifice list, and debug flag.
160
- It reshapes the context before performing text generation by adjusting and cropping the number of tokens.
161
-
162
- Parameters:
163
- - prompt (str): The input prompt for text generation.
164
- - max_generation_size (int): The maximum number of tokens to generate.
165
- - placeholders (dict, optional): A dictionary of placeholders to be replaced in the prompt. Defaults to an empty dictionary.
166
- - sacrifice (list, optional): A list of placeholders to sacrifice if the window is bigger than the context size minus the number of tokens to generate. Defaults to ["previous_discussion"].
167
- - debug (bool, optional): Flag to enable/disable debug mode. Defaults to False.
168
-
169
- Returns:
170
- - str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
171
- """
172
- prompt = "\n".join([
173
- "!@>system: I am an AI assistant that can converse and analyze images. When asked to locate something in an image you send, I will reply with:",
174
- "boundingbox(image_index, label, left, top, width, height)",
175
- "Where:",
176
- "image_index: 0-based index of the image",
177
- "label: brief description of what is located",
178
- "left, top: x,y coordinates of top-left box corner (0-1 scale)",
179
- "width, height: box dimensions as fraction of image size",
180
- "Coordinates have origin (0,0) at top-left, (1,1) at bottom-right.",
181
- "For other queries, I will respond conversationally to the best of my abilities.",
182
- prompt
183
- ])
184
-
185
- if max_generation_size is None:
186
- prompt_size = self.lollms.tokenize(prompt)
187
- max_generation_size = self.lollms.ctx_size - len(prompt_size)
188
-
189
- pr = PromptReshaper(prompt)
190
- prompt = pr.build(placeholders,
191
- self.lollms.tokenize,
192
- self.lollms.detokenize,
193
- self.lollms.ctx_size - max_generation_size,
194
- sacrifice
195
- )
196
- ntk = len(self.lollms.tokenize(prompt))
197
- max_generation_size = min(self.lollms.ctx_size - ntk, max_generation_size)
198
- # TODO : add show progress
199
-
200
- gen = self.generate_with_images(prompt, images, max_generation_size, callback=callback, show_progress=show_progress).strip().replace("</s>", "").replace("<s>", "")
201
- try:
202
- gen = process_ai_output(gen, images, "/discussions/")
203
- except Exception as ex:
204
- pass
205
- if debug:
206
- self.print_prompt("prompt", prompt+gen)
207
-
208
- return gen
209
-
210
-
211
-
212
146
  def step_start(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
213
147
  """This triggers a step start
214
148
 
@@ -294,7 +228,7 @@ class TasksLibrary:
294
228
  prompt_parts[sacrifice_id] = sacrifice_text
295
229
  return "\n".join([s for s in prompt_parts if s!=""])
296
230
 
297
- def translate_text_chunk(self, text_chunk, output_language:str="french", host_address:str=None, model_name: str = None, temperature=0.1, max_generation_size=3000):
231
+ def translate_text_chunk(self, text_chunk, output_language:str="french", host_address:str=None, model_name: str = None, temperature=0.1, max_generation_size=3000, callback=None, show_progress:bool=False):
298
232
  """
299
233
  This function translates a given text chunk into a specified language.
300
234
 
@@ -310,22 +244,19 @@ class TasksLibrary:
310
244
  str: The translated text.
311
245
  """
312
246
  translated = self.lollms.generate_text(
313
- "\n".join([
314
- f"!@>system:",
247
+ prompt= "\n".join([
248
+ self.lollms.system_full_header,
315
249
  f"Translate the following text to {output_language}.",
316
250
  "Be faithful to the original text and do not add or remove any information.",
317
251
  "Respond only with the translated text.",
318
252
  "Do not add comments or explanations.",
319
- f"!@>text to translate:",
253
+ self.lollms.system_custom_header("text to translate"),
320
254
  f"{text_chunk}",
321
- f"!@>translation:",
255
+ self.lollms.ai_custom_header("translation"),
322
256
  ]),
323
- host_address,
324
- model_name,
325
- personality = -1,
326
- n_predict=max_generation_size,
327
- stream=False,
328
- temperature=temperature
257
+ n_predict = max_generation_size,
258
+ streaming_callback=partial(self.process, callback=callback, show_progress=show_progress),
259
+ temperature= temperature
329
260
  )
330
261
  return translated
331
262
 
@@ -435,12 +366,12 @@ class TasksLibrary:
435
366
  choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
436
367
  elements = [conditionning] if conditionning!="" else []
437
368
  elements += [
438
- "!@>system:",
369
+ self.lollms.system_full_header,
439
370
  "Answer this multi choices question.",
440
371
  ]
441
372
  if context!="":
442
373
  elements+=[
443
- "!@>Context:",
374
+ self.lollms.system_custom_header("Context"),
444
375
  f"{context}",
445
376
  ]
446
377
  elements +=[
@@ -450,14 +381,16 @@ class TasksLibrary:
450
381
  "the output should be an integer."
451
382
  ]
452
383
  elements += [
453
- f"!@>question: {question}",
454
- "!@>possible answers:",
384
+ f'{self.lollms.user_custom_header("question")} {question}',
385
+ f'{self.lollms.user_custom_header("possible answers")}',
455
386
  f"{choices}",
456
387
  ]
457
- elements += ["!@>answer:"]
388
+ elements += [self.lollms.ai_custom_header("answer")]
458
389
  prompt = self.build_prompt(elements)
459
390
 
460
- gen = self.lollms.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50, streaming_callback=self.sink).strip().replace("</s>","").replace("<s>","")
391
+ gen = self.lollms.generate_text(
392
+ prompt=prompt,
393
+ streaming_callback=self.sink).strip().replace("</s>","").replace("<s>","")
461
394
  if len(gen)>0:
462
395
  selection = gen.strip().split()[0].replace(",","").replace(".","")
463
396
  self.print_prompt("Multi choice selection",prompt+gen)
@@ -499,8 +432,8 @@ class TasksLibrary:
499
432
  callback,
500
433
  chunk_summary_post_processing=chunk_summary_post_processing,
501
434
  summary_mode=summary_mode)
502
- tk = self.lollms.tokenize(text)
503
- tk = self.lollms.tokenize(text)
435
+ tk = self.lollms.binding.tokenize(text)
436
+ tk = self.lollms.binding.tokenize(text)
504
437
  dtk_ln=prev_len-len(tk)
505
438
  prev_len = len(tk)
506
439
  self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
@@ -2,7 +2,7 @@ import requests
2
2
  from pydantic import BaseModel
3
3
  from lollms_client.lollms_core import LollmsClient
4
4
  from typing import Optional
5
-
5
+ from ascii_colors import ASCIIColors
6
6
  class LollmsTTSRequest(BaseModel):
7
7
  text: str
8
8
  voice: str|None = None
@@ -10,7 +10,7 @@ class LollmsTTSRequest(BaseModel):
10
10
 
11
11
  class LollmsTTS:
12
12
  def __init__(self, lollmsClient:LollmsClient):
13
- self.base_url = lollmsClient.host_address
13
+ self.base_url = lollmsClient.binding.host_address
14
14
 
15
15
  def text2Audio(self, text, voice=None, fn=None):
16
16
  endpoint = f"{self.base_url}/text2Audio"
@@ -28,7 +28,11 @@ class LollmsTTS:
28
28
  response = requests.get(endpoint)
29
29
  response.raise_for_status() # Raise an error for bad status codes
30
30
  voices = response.json() # Assuming the response is in JSON format
31
- return voices["voices"]
31
+ if "error" in voices:
32
+ ASCIIColors.error(voices["error"])
33
+ return []
34
+ else:
35
+ return voices["voices"]
32
36
  except requests.exceptions.RequestException as e:
33
37
  print(f"Couldn't list voices: {e}")
34
38
  return ["main_voice"]
@@ -39,4 +39,22 @@ class SENDER_TYPES(Enum):
39
39
 
40
40
  class SUMMARY_MODE(Enum):
41
41
  SUMMARY_MODE_SEQUENCIAL = 0
42
- SUMMARY_MODE_HIERARCHICAL = 0
42
+ SUMMARY_MODE_HIERARCHICAL = 0
43
+
44
+ class ELF_COMPLETION_FORMAT(Enum):
45
+ Instruct = 0
46
+ Chat = 1
47
+ @classmethod
48
+ def from_string(cls, format_string: str) -> 'ELF_COMPLETION_FORMAT':
49
+ format_mapping = {
50
+ "Instruct": cls.Instruct,
51
+ "Chat": cls.Chat,
52
+ }
53
+
54
+ try:
55
+ return format_mapping[format_string.upper()]
56
+ except KeyError:
57
+ raise ValueError(f"Invalid format string: {format_string}. Must be one of {list(format_mapping.keys())}.")
58
+
59
+ def __str__(self):
60
+ return self.name
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lollms_client
3
- Version: 0.9.1
3
+ Version: 0.10.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Home-page: https://github.com/ParisNeo/lollms_client
6
6
  Author: ParisNeo
@@ -37,30 +37,34 @@ from lollms_client import LollmsClient
37
37
  lc = LollmsClient()
38
38
 
39
39
  # Specify a custom host and port
40
- lc = LollmsClient(host_address="http://some.server:9600")
41
-
42
- # Use a specific model with a local or remote ollama server
43
- from lollms_client import ELF_GENERATION_FORMAT
44
- lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
45
-
46
- # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
47
- lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
40
+ lc = LollmsClient(host_address="http://some.lollms.server:9600")
48
41
 
42
+ # Use a specific model with a local ollama server
43
+ lc = LollmsClient("ollama", model_name="phi4:latest")
49
44
  # Use a specific model with an Ollama binding on the server, with a context size of 32800
50
45
  lc = LollmsClient(
46
+ "ollama",
51
47
  host_address="http://some.other.server:11434",
52
48
  model_name="phi4:latest",
53
49
  ctx_size=32800,
54
- default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
55
50
  )
51
+ # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
52
+ lc = LollmsClient("openai", model_name="gpt-3.5-turbo-0125", service_key="Key, or don't put anything if you have already an environment variable with these informations")
53
+
54
+ # Use a specific model with a other OpenAI compatible server
55
+ lc = LollmsClient("openai", host_address="http://some.other.server", model_name="gpt-3.5-turbo-0125")
56
56
  ```
57
57
 
58
58
  ### Text Generation
59
59
 
60
- Use `generate()` for generating text from the lollms API.
60
+ Use `generate_text()` for generating text from the lollms API.
61
61
 
62
62
  ```python
63
- response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
63
+ response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
64
+ print(response)
65
+ ```
66
+ ```python
67
+ response = lc.generate_text(prompt="Once upon a time", images= ["path to image1", "path to image 2"] stream=False, temperature=0.5)
64
68
  print(response)
65
69
  ```
66
70
 
@@ -0,0 +1,34 @@
1
+ lollms_client/__init__.py,sha256=tBrrYsu4fpi8ZR5ZzTjY88O0UMv9_tqG--VXGQxK2UU,493
2
+ lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
+ lollms_client/lollms_core.py,sha256=wdeKdxXckXTqpjA2pRvmBt019v6KHEkZHGABeOsjcAU,47283
4
+ lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
5
+ lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
6
+ lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
7
+ lollms_client/lollms_llm_binding.py,sha256=aegUdAz_3DwJ1nDmq6lxD6x1bI4c4jVa6IbgdAVyaFE,8233
8
+ lollms_client/lollms_personality.py,sha256=gTOU7WJrxyNE88g-9-is5QxMf84s6xbEMAv--SD2P64,20313
9
+ lollms_client/lollms_personality_worker.py,sha256=rQbZg9Gn-R3b6x0Ryb4JPWJzBfn4fObDzj5IWYez_9o,65331
10
+ lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
11
+ lollms_client/lollms_stt.py,sha256=4knP8SSj2S7DAFGXpIHc-_J6pb9xjExutEBd0RNex5E,1282
12
+ lollms_client/lollms_tasks.py,sha256=Ur-Aaqr3Tm9htlRAI16NKwaVAGkY-_SENhUEikikZwA,34458
13
+ lollms_client/lollms_tti.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1287
14
+ lollms_client/lollms_tts.py,sha256=KjFNkRmpM4kZpHm-IrPoYr4kPs2nOHNKPov0HXiinkg,1468
15
+ lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50,2723
16
+ lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
17
+ lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
18
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=KGRMOSZGvlbAXiTLlaUio372jyfvt6AX8_j_DU0T-a4,11822
19
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=f798oBx7d5OR9JVT3W1m9S_GP-eZEn-g4zSJ31tAPhU,12294
20
+ lollms_client/llm_bindings/openai/__init__.py,sha256=LeRMt1rZA4PHr3AGoa-oot7D0d62PNbduVCz5wOWyEY,10373
21
+ lollms_client/llm_bindings/transformers/__init__.py,sha256=FBUTryB-GIKV6SlSNyj59KdbJ19g6VYwc385OD2vtLQ,11684
22
+ lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
+ lollms_client/stt_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
+ lollms_client/tti_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
+ lollms_client/tts_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
+ lollms_client/ttv_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
+ lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
+ lollms_client-0.10.0.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
31
+ lollms_client-0.10.0.dist-info/METADATA,sha256=ScADA2xlQdZwRXGAjwW2AvaeoZtcfOYbSbj0PlN4_-Q,6663
32
+ lollms_client-0.10.0.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
33
+ lollms_client-0.10.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
34
+ lollms_client-0.10.0.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- lollms_client/__init__.py,sha256=_1_zkzDrAs43mf6LEBVZUEYCGZ8KRmj-jd_vgkB8xSw,516
2
- lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
- lollms_client/lollms_core.py,sha256=KjDXCiCZS_dI1DyEHRlw0Qhim9zJfVmmCd_Lxp9U0aU,86103
4
- lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
5
- lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
6
- lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
7
- lollms_client/lollms_personality.py,sha256=gTOU7WJrxyNE88g-9-is5QxMf84s6xbEMAv--SD2P64,20313
8
- lollms_client/lollms_personality_worker.py,sha256=rQbZg9Gn-R3b6x0Ryb4JPWJzBfn4fObDzj5IWYez_9o,65331
9
- lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
10
- lollms_client/lollms_stt.py,sha256=4knP8SSj2S7DAFGXpIHc-_J6pb9xjExutEBd0RNex5E,1282
11
- lollms_client/lollms_tasks.py,sha256=Qy7BG4pqcwsjMLTCfRhG8M4PhJnRZhouV4blXHICqmI,38630
12
- lollms_client/lollms_tti.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1287
13
- lollms_client/lollms_tts.py,sha256=WznZ5ADhig-SFNmwlgviLZaAfl67NVqnZxYzhel3vxU,1287
14
- lollms_client/lollms_types.py,sha256=uuaADVVfi1sZucY7gT8v-EDN5xrMI3vy_4M7k7Uz3eU,2170
15
- lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
16
- lollms_client-0.9.1.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
17
- lollms_client-0.9.1.dist-info/METADATA,sha256=gdd_TJNGajt13fJfPBaFPeyzulrc0ZWZWfTAK3VWTZw,6400
18
- lollms_client-0.9.1.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
19
- lollms_client-0.9.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
20
- lollms_client-0.9.1.dist-info/RECORD,,