lollms-client 0.29.3__py3-none-any.whl → 0.31.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -10,6 +10,7 @@ from typing import Optional, Dict, List
10
10
  from ascii_colors import trace_exception, ASCIIColors
11
11
  from lollms_client.lollms_types import MSG_TYPE
12
12
  from lollms_client.lollms_discussion import LollmsDiscussion
13
+ from lollms_client.lollms_utilities import ImageTokenizer
13
14
  import re
14
15
  class LollmsLLMBinding(ABC):
15
16
  """Abstract base class for all LOLLMS LLM bindings"""
@@ -197,6 +198,22 @@ class LollmsLLMBinding(ABC):
197
198
  """
198
199
  pass
199
200
 
201
+ def count_image_tokens(self, image: str) -> int:
202
+ """
203
+ Estimate the number of tokens for an image using ImageTokenizer based on self.model_name.
204
+
205
+ Args:
206
+ image (str): Image to count tokens from. Either base64 string, path to image file, or URL.
207
+
208
+ Returns:
209
+ int: Estimated number of tokens for the image. Returns -1 on error.
210
+ """
211
+ try:
212
+ # Delegate token counting to ImageTokenizer
213
+ return ImageTokenizer(self.model_name).count_image_tokens(image)
214
+ except Exception as e:
215
+ ASCIIColors.warning(f"Could not estimate image tokens: {e}")
216
+ return -1
200
217
  @abstractmethod
201
218
  def embed(self, text: str, **kwargs) -> list:
202
219
  """
@@ -354,3 +354,139 @@ def process_ai_output(output, images, output_folder):
354
354
 
355
355
  return output
356
356
 
357
+ import os
358
+ import base64
359
+ import requests
360
+ from PIL import Image
361
+ from io import BytesIO
362
+ import math
363
+
364
+ class ASCIIColors:
365
+ @staticmethod
366
+ def warning(message):
367
+ print(f"Warning: {message}")
368
+
369
+ class ImageTokenizer:
370
+ def __init__(self, model_name: str):
371
+ self.model_name = model_name.lower()
372
+
373
+ def count_image_tokens(self, image: str) -> int:
374
+ """
375
+ Estimate the number of tokens for an image based on the model specified in self.model_name.
376
+
377
+ Args:
378
+ image (str): Image to count tokens from. Either base64 string, path to image file, or URL.
379
+
380
+ Returns:
381
+ int: Estimated number of tokens for the image. Returns -1 on error.
382
+ """
383
+ try:
384
+ # Load image and get dimensions
385
+ img = self._load_image(image)
386
+ if img is None:
387
+ return -1
388
+ width, height = img.size
389
+
390
+ # Model-specific token counting
391
+ if "phi-3-vision" in self.model_name:
392
+ # Phi-3-vision: 144 tokens per 336x336 patch
393
+ patch_size = 336
394
+ patches_x = math.ceil(width / patch_size)
395
+ patches_y = math.ceil(height / patch_size)
396
+ total_patches = patches_x * patches_y
397
+ return total_patches * 144
398
+
399
+ elif "gpt-4o" in self.model_name:
400
+ # GPT-4o: Estimate ~100 tokens per 224x224 patch (based on industry standards)
401
+ patch_size = 224
402
+ patches_x = math.ceil(width / patch_size)
403
+ patches_y = math.ceil(height / patch_size)
404
+ total_patches = patches_x * patches_y
405
+ return total_patches * 100
406
+
407
+ elif "claude" in self.model_name:
408
+ # Claude: Estimate ~150 tokens per 336x336 patch (based on similar models)
409
+ patch_size = 336
410
+ patches_x = math.ceil(width / patch_size)
411
+ patches_y = math.ceil(height / patch_size)
412
+ total_patches = patches_x * patches_y
413
+ return total_patches * 150
414
+
415
+ elif "gemini" in self.model_name:
416
+ # Gemini: Estimate ~128 tokens per 256x256 patch (based on industry standards)
417
+ patch_size = 256
418
+ patches_x = math.ceil(width / patch_size)
419
+ patches_y = math.ceil(height / patch_size)
420
+ total_patches = patches_x * patches_y
421
+ return total_patches * 128
422
+
423
+ elif "mistral" in self.model_name or "mixtral" in self.model_name:
424
+ # Mistral: Estimate ~120 tokens per 336x336 patch (based on similar models)
425
+ patch_size = 336
426
+ patches_x = math.ceil(width / patch_size)
427
+ patches_y = math.ceil(height / patch_size)
428
+ total_patches = patches_x * patches_y
429
+ return total_patches * 120
430
+
431
+ elif "llama" in self.model_name:
432
+ # Llama multimodal: Estimate ~100 tokens per 224x224 patch (based on CLIP-like models)
433
+ patch_size = 224
434
+ patches_x = math.ceil(width / patch_size)
435
+ patches_y = math.ceil(height / patch_size)
436
+ total_patches = patches_x * patches_y
437
+ return total_patches * 100
438
+ elif "gemma" in self.model_name:
439
+ # Gemma: Fixed 256 tokens for images normalized to 896x896
440
+ return 256
441
+
442
+ else:
443
+ # Fallback: Original byte-based estimation for unsupported models
444
+ CONSTANT = 128/(512*512)
445
+ if image.startswith("http://") or image.startswith("https://"):
446
+ response = requests.get(image)
447
+ response.raise_for_status()
448
+ img_bytes = response.content
449
+ size = len(img_bytes)
450
+ elif os.path.isfile(image):
451
+ size = os.path.getsize(image)
452
+ elif image.startswith("data:image"):
453
+ header, b64data = image.split(",", 1)
454
+ img_bytes = base64.b64decode(b64data)
455
+ size = len(img_bytes)
456
+ else:
457
+ img_bytes = base64.b64decode(image)
458
+ size = len(img_bytes)
459
+ return int(size * CONSTANT)
460
+
461
+ except Exception as e:
462
+ ASCIIColors.warning(f"Could not estimate image tokens: {e}")
463
+ return -1
464
+
465
+ def _load_image(self, image: str) -> Image.Image:
466
+ """
467
+ Load an image from a URL, file path, or base64 string and return a PIL Image object.
468
+
469
+ Args:
470
+ image (str): Image source (URL, file path, or base64 string).
471
+
472
+ Returns:
473
+ Image.Image: PIL Image object, or None if loading fails.
474
+ """
475
+ try:
476
+ if image.startswith("http://") or image.startswith("https://"):
477
+ response = requests.get(image)
478
+ response.raise_for_status()
479
+ img_bytes = response.content
480
+ return Image.open(BytesIO(img_bytes))
481
+ elif os.path.isfile(image):
482
+ return Image.open(image)
483
+ elif image.startswith("data:image"):
484
+ header, b64data = image.split(",", 1)
485
+ img_bytes = base64.b64decode(b64data)
486
+ return Image.open(BytesIO(img_bytes))
487
+ else:
488
+ img_bytes = base64.b64decode(image)
489
+ return Image.open(BytesIO(img_bytes))
490
+ except Exception as e:
491
+ ASCIIColors.warning(f"Could not load image: {e}")
492
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.29.3
3
+ Version: 0.31.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -48,6 +48,7 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
48
48
 
49
49
  * 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM) without major code changes.
50
50
  * 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS) and images (TTI).
51
+ * 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
51
52
  * 🤖 **Function Calling with MCP:** Empowers LLMs to use external tools and functions through the Model Context Protocol (MCP), with built-in support for local Python tool execution via `local_mcp` binding and its default tools (file I/O, internet search, Python interpreter, image generation).
52
53
  * 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
53
54
  * 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions, including during MCP interactions.
@@ -408,6 +409,64 @@ print("------------------------------------------")
408
409
  3. **`memorize()`:** After the user mentions their favorite language, `memorize()` is called. The LLM analyzes the last turn, identifies this new, important fact, and appends it to the `discussion.memory` zone.
409
410
  4. **Recall:** In the final turn, when asked to recall the favorite language, the AI has access to the updated `memory` content within its system context and can correctly answer "Rust". This demonstrates true long-term, stateful memory.
410
411
 
412
+ ### Managing Multimodal Context: Activating and Deactivating Images
413
+
414
+ When working with multimodal models, you can now control which images in a message are active and sent to the model. This is useful for focusing the AI's attention, saving tokens on expensive vision models, or allowing a user to correct which images are relevant.
415
+
416
+ This is managed at the `LollmsMessage` level using the `toggle_image_activation()` method.
417
+
418
+ ```python
419
+ from lollms_client import LollmsClient, LollmsDiscussion, LollmsDataManager
420
+ from ascii_colors import ASCIIColors
421
+ import base64
422
+ from pathlib import Path
423
+
424
+ # Helper to create a dummy image b64 string
425
+ def create_dummy_image(text):
426
+ from PIL import Image, ImageDraw
427
+ img = Image.new('RGB', (100, 30), color = (73, 109, 137))
428
+ d = ImageDraw.Draw(img)
429
+ d.text((10,10), text, fill=(255,255,0))
430
+ buffer = Path("temp_img.png")
431
+ img.save(buffer, "PNG")
432
+ b64 = base64.b64encode(buffer.read_bytes()).decode('utf-8')
433
+ buffer.unlink()
434
+ return b64
435
+
436
+ # --- 1. Setup ---
437
+ lc = LollmsClient(binding_name="ollama", model_name="llava")
438
+ discussion = LollmsDiscussion.create_new(lollms_client=lc)
439
+
440
+ # --- 2. Add a message with multiple images ---
441
+ img1_b64 = create_dummy_image("Image 1")
442
+ img2_b64 = create_dummy_image("Image 2: Cat")
443
+ img3_b64 = create_dummy_image("Image 3")
444
+
445
+ discussion.add_message(
446
+ sender="user",
447
+ content="What is in the second image?",
448
+ images=[img1_b64, img2_b64, img3_b64]
449
+ )
450
+ user_message = discussion.get_messages()[-1]
451
+
452
+ # --- 3. Check the initial state ---
453
+ ASCIIColors.magenta("--- Initial State (All 3 Images Active) ---")
454
+ status_before = discussion.get_context_status()
455
+ print(f"Message History Text:\n{status_before['zones']['message_history']['content']}")
456
+
457
+ # --- 4. Deactivate irrelevant images ---
458
+ ASCIIColors.magenta("\n--- Deactivating images 1 and 3 ---")
459
+ user_message.toggle_image_activation(index=0, active=False) # Deactivate first image
460
+ user_message.toggle_image_activation(index=2, active=False) # Deactivate third image
461
+
462
+ # --- 5. Check the new state ---
463
+ ASCIIColors.magenta("\n--- New State (Only Image 2 is Active) ---")
464
+ status_after = discussion.get_context_status()
465
+ print(f"Message History Text:\n{status_after['zones']['message_history']['content']}")
466
+
467
+ ASCIIColors.green("\nNotice the message now says '(1 image(s) attached)' instead of 3.")
468
+ ASCIIColors.green("Only the active image will be sent to the multimodal LLM.")
469
+ ```
411
470
 
412
471
  ## Documentation
413
472
 
@@ -788,228 +847,8 @@ Contributions are welcome! Whether it's bug reports, feature suggestions, docume
788
847
 
789
848
  ## License
790
849
 
791
- This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LICENSE) file for details (assuming you have a LICENSE file, if not, state "Apache 2.0 License").
850
+ This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LICENSE) file for details.
792
851
 
793
852
  ## Changelog
794
853
 
795
854
  For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
796
- ```
797
-
798
- ---
799
- ### Phase 2: Update `docs/md/lollms_discussion.md`
800
-
801
- ```markdown
802
- # LollmsDiscussion Class
803
-
804
- The `LollmsDiscussion` class is a cornerstone of the `lollms-client` library, designed to represent and manage a single conversation. It provides a robust interface for handling message history, conversation branching, context formatting, and persistence.
805
-
806
- ## Overview
807
-
808
- A `LollmsDiscussion` can be either **in-memory** or **database-backed**, offering flexibility for different use cases.
809
-
810
- - **In-Memory:** Ideal for temporary or transient conversations. The discussion exists only for the duration of the application's runtime.
811
- - **Database-Backed:** Provides persistence by saving the entire conversation, including all branches and metadata, to a database file (e.g., SQLite). This is perfect for applications that need to retain user chat history.
812
-
813
- ## Key Features
814
-
815
- - **Message Management:** Add user and AI messages, which are automatically linked to form a conversation tree.
816
- - **Branching:** The conversation is a tree, not a simple list. This allows for exploring different conversational paths from any point. You can regenerate an AI response, and it will create a new branch.
817
- - **Context Exporting:** The `export()` method formats the conversation history for various LLM backends (`openai_chat`, `ollama_chat`, `lollms_text`, `markdown`), ensuring compatibility.
818
- - **Automatic Pruning:** To prevent exceeding the model's context window, it can automatically summarize older parts of the conversation without losing the original data.
819
- - **Sophisticated Context Layering:** Manage conversation state with multiple, distinct data zones (`user_data_zone`, `discussion_data_zone`, `personality_data_zone`) and a long-term `memory` field, allowing for rich and persistent context.
820
-
821
- ## Creating a Discussion
822
-
823
- The recommended way to create a discussion is using the `LollmsDiscussion.create_new()` class method.
824
-
825
- ```python
826
- from lollms_client import LollmsClient, LollmsDataManager, LollmsDiscussion
827
-
828
- # For an in-memory discussion (lost when the app closes)
829
- lc = LollmsClient(binding_name="ollama", model_name="llama3")
830
- discussion = LollmsDiscussion.create_new(lollms_client=lc, id="my-temp-discussion")
831
-
832
- # For a persistent, database-backed discussion
833
- # This will create a 'discussions.db' file if it doesn't exist
834
- db_manager = LollmsDataManager('sqlite:///discussions.db')
835
- discussion_db = LollmsDiscussion.create_new(
836
- lollms_client=lc,
837
- db_manager=db_manager,
838
- discussion_metadata={"title": "My First DB Chat"}
839
- )
840
- ```
841
-
842
- ## Core Properties
843
-
844
- ### Data and Memory Zones
845
-
846
- `LollmsDiscussion` moves beyond a single `data_zone` to a more structured system of context layers. These string properties allow you to inject specific, persistent information into the AI's system prompt, separate from the main conversational flow. The content of all non-empty zones is automatically formatted and included in the prompt.
847
-
848
- #### `system_prompt`
849
- The main instruction set for the AI's persona and core task. It's the foundation of the prompt.
850
- - **Purpose:** Defines who the AI is and what its primary goal is.
851
- - **Example:** `"You are a helpful and friendly assistant."`
852
-
853
- #### `memory`
854
- A special zone for storing long-term, cross-discussion information about the user or topics. It is designed to be built up over time.
855
- - **Purpose:** To give the AI a persistent memory that survives across different chat sessions.
856
- - **Example:** `"User's name is Alex.\nUser's favorite programming language is Rust."`
857
-
858
- #### `user_data_zone`
859
- Holds information specific to the current user that might be relevant for the session.
860
- - **Purpose:** Storing user preferences, profile details, or session-specific goals.
861
- -- **Example:** `"Current project: API development.\nUser is a beginner in Python."`
862
-
863
- #### `discussion_data_zone`
864
- Contains context relevant only to the current discussion.
865
- - **Purpose:** Holding summaries, state information, or data relevant to the current conversation topic that needs to be kept in front of the AI.
866
- - **Example:** `"The user has already tried libraries A and B and found them too complex."`
867
-
868
- #### `personality_data_zone`
869
- This is where static or dynamic knowledge from a `LollmsPersonality`'s `data_source` is loaded.
870
- - **Purpose:** To provide personalities with their own built-in knowledge bases or rulesets.
871
- - **Example:** `"Rule 1: All code must be documented.\nRule 2: Use type hints."`
872
-
873
- #### Example: How Zones are Combined
874
-
875
- The `export()` method intelligently combines these zones. If all zones were filled, the effective system prompt would look something like this:
876
-
877
- ```
878
- !@>system:
879
- You are a helpful and friendly assistant.
880
-
881
- -- Memory --
882
- User's name is Alex.
883
- User's favorite programming language is Rust.
884
-
885
- -- User Data Zone --
886
- Current project: API development.
887
- User is a beginner in Python.
888
-
889
- -- Discussion Data Zone --
890
- The user has already tried libraries A and B and found them too complex.
891
-
892
- -- Personality Data Zone --
893
- Rule 1: All code must be documented.
894
- Rule 2: Use type hints.
895
- ```
896
- ### Other Important Properties
897
-
898
- - `id`: The unique identifier for the discussion.
899
- - `metadata`: A dictionary for storing any custom metadata, like a title.
900
- - `active_branch_id`: The ID of the message at the "tip" of the current conversation branch.
901
- - `messages`: A list of all `LollmsMessage` objects in the discussion.
902
-
903
- ## Main Methods
904
-
905
- ### `chat()`
906
- The `chat()` method is the primary way to interact with the discussion. It handles a full user-to-AI turn, including invoking the advanced agentic capabilities of the `LollmsClient`.
907
-
908
- #### Personalities, Tools, and Data Sources
909
-
910
- The `chat` method intelligently handles tool activation and data loading when a `LollmsPersonality` is provided. This allows personalities to be configured as self-contained agents with their own default tools and knowledge bases.
911
-
912
- **Tool Activation (`use_mcps`):**
913
-
914
- 1. **Personality has tools, `use_mcps` is not set:** The agent will use the tools defined in `personality.active_mcps`.
915
- 2. **Personality has tools, `use_mcps` is also set:** The agent will use a *combination* of tools from both the personality and the `use_mcps` parameter for that specific turn. Duplicates are automatically handled. This allows you to augment a personality's default tools on the fly.
916
- 3. **Personality has no tools, `use_mcps` is set:** The agent will use only the tools specified in the `use_mcps` parameter.
917
- 4. **Neither are set:** The agentic turn is not triggered (unless a data store is used), and a simple chat generation occurs.
918
-
919
- **Knowledge Loading (`data_source`):**
920
-
921
- Before generation, the `chat` method checks for `personality.data_source`:
922
-
923
- - **If it's a `str` (static data):** The string is loaded into the `discussion.personality_data_zone`, making it part of the system context for the current turn.
924
- - **If it's a `Callable` (dynamic data):**
925
- 1. The AI first generates a query based on the current conversation.
926
- 2. The `chat` method calls your function with this query.
927
- 3. The returned string is loaded into the `discussion.personality_data_zone`.
928
- 4. The final response generation proceeds with this newly added context.
929
-
930
- This makes it easy to create powerful, reusable agents. For a complete, runnable example of building a **Python Coder Agent** that uses both `active_mcps` and a static `data_source`, **please see the "Putting It All Together" section in the main `README.md` file.**
931
-
932
- ### New Methods for State and Context Management
933
-
934
- #### `memorize()`
935
- This method empowers the AI to build its own long-term memory. It analyzes the current conversation, extracts key facts or preferences, and appends them to the `memory` data zone.
936
-
937
- - **How it works:** It uses the LLM itself to summarize the most important, long-term takeaways from the recent conversation.
938
- - **Use Case:** Perfect for creating assistants that learn about the user over time, remembering their name, preferences, or past projects without the user needing to repeat themselves.
939
-
940
- ```python
941
- # User has just said: "My company is called 'Innovatech'."
942
- discussion.chat("My company is called 'Innovatech'.")
943
-
944
- # Now, trigger memorization
945
- discussion.memorize()
946
- discussion.commit() # Save the updated memory to the database
947
-
948
- # The discussion.memory field might now contain:
949
- # "... previous memory ...
950
- #
951
- # --- Memory entry from 2024-06-27 10:30:00 UTC ---
952
- # - User's company is named 'Innovatech'."
953
- ```
954
-
955
- #### `get_context_status()`
956
-
957
- Provides a detailed, real-time breakdown of the current prompt context, showing exactly what will be sent to the model and how many tokens each major component occupies. This is crucial for debugging context issues and understanding token usage.
958
-
959
- The method accurately reflects the structure of the `lollms_text` format, where all system-level instructions (the main prompt, all data zones, and the pruning summary) are combined into a single system block.
960
-
961
- - **Return Value:** A dictionary containing:
962
- - `max_tokens`: The configured maximum token limit for the discussion.
963
- - `current_tokens`: The total, most accurate token count for the entire prompt, calculated using the same logic as the `chat()` method.
964
- - `zones`: A dictionary with up to two keys:
965
- - **`system_context`**: Present if there is any system-level content. It contains:
966
- - `tokens`: The total token count for the **entire combined system block** (e.g., `!@>system:\n...\n`).
967
- - `content`: The full string content of the system block, showing exactly how all zones are merged.
968
- - `breakdown`: A sub-dictionary showing the raw text of each individual component (e.g., `system_prompt`, `memory`, `user_data_zone`) that was used to build the `content`.
969
- - **`message_history`**: Present if there are messages in the branch. It contains:
970
- - `tokens`: The total token count for the message history part of the prompt.
971
- - `content`: The full string of the formatted message history.
972
- - `message_count`: The number of messages included in the history.
973
-
974
- - **Use Case:** Essential for debugging context issues, visualizing how different data zones contribute to the final prompt, and monitoring token consumption.
975
-
976
- ```python
977
- import json
978
-
979
- # Assuming 'discussion' is an LollmsDiscussion object with some data
980
- discussion.system_prompt = "You are a helpful AI."
981
- discussion.user_data_zone = "User is named Bob."
982
- discussion.add_message(sender="user", content="Hello!")
983
- discussion.add_message(sender="assistant", content="Hi Bob!")
984
-
985
- status = discussion.get_context_status()
986
- print(json.dumps(status, indent=2))
987
-
988
- # Expected Output Structure:
989
- # {
990
- # "max_tokens": null,
991
- # "current_tokens": 46,
992
- # "zones": {
993
- # "system_context": {
994
- # "content": "You are a helpful AI.\n\n-- User Data Zone --\nUser is named Bob.",
995
- # "tokens": 25,
996
- # "breakdown": {
997
- # "system_prompt": "You are a helpful AI.",
998
- # "user_data_zone": "User is named Bob."
999
- # }
1000
- # },
1001
- # "message_history": {
1002
- # "content": "!@>user:\nHello!\n!@>assistant:\nHi Bob!\n",
1003
- # "tokens": 21,
1004
- # "message_count": 2
1005
- # }
1006
- # }
1007
- # }
1008
- ```
1009
-
1010
- ### Other Methods
1011
- - `add_message(sender, content, ...)`: Adds a new message.
1012
- - `export(format_type, ...)`: Exports the discussion to a specific format.
1013
- - `commit()`: Saves changes to the database (if DB-backed).
1014
- - `summarize_and_prune()`: Automatically handles context window limits.
1015
- - `count_discussion_tokens()`: Counts the tokens for a given format.
@@ -29,12 +29,12 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
29
29
  examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
30
30
  examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
31
31
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
32
- lollms_client/__init__.py,sha256=5WRehZnsWKKGP_lLPJ_PCHk1NMQBGRcezXExtvvtKzg,1147
32
+ lollms_client/__init__.py,sha256=xGG08Mr3A0qFgHz0rb_LZpFFL8pH88U8JyZVYa6MCmA,1147
33
33
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
34
- lollms_client/lollms_core.py,sha256=ABfUq13P_zo_qpLwHNhtvzmiA1nHZyqbBLKoaVECNi4,171407
35
- lollms_client/lollms_discussion.py,sha256=zdm02lzd3cQNPaZfJ3zCa8yQTYw7mogqWk1cve3UOao,67697
34
+ lollms_client/lollms_core.py,sha256=MXTJgsAZ4eCeM-KbY7KJX-klryLX9MCdp8G6O-Y5mEE,176429
35
+ lollms_client/lollms_discussion.py,sha256=vaBJ9LJumTUgi2550toNOnEOYMN412OvPicMn8CNi64,85306
36
36
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
37
- lollms_client/lollms_llm_binding.py,sha256=cU0cmxZfIrp-ofutbRLx7W_59dxzPXpU-vO98MqVnQA,14788
37
+ lollms_client/lollms_llm_binding.py,sha256=_r5_bZfasJQlI84EfH_sKlVMlOuiIgMXL6wYznRT_GM,15526
38
38
  lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
39
39
  lollms_client/lollms_personality.py,sha256=O-9nqZhazcITOkxjT24ENTxTmIoZLgqIsQ9WtWs0Id0,8719
40
40
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
@@ -44,7 +44,7 @@ lollms_client/lollms_ttm_binding.py,sha256=FjVVSNXOZXK1qvcKEfxdiX6l2b4XdGOSNnZ0u
44
44
  lollms_client/lollms_tts_binding.py,sha256=5cJYECj8PYLJAyB6SEH7_fhHYK3Om-Y3arkygCnZ24o,4342
45
45
  lollms_client/lollms_ttv_binding.py,sha256=KkTaHLBhEEdt4sSVBlbwr5i_g_TlhcrwrT-7DjOsjWQ,4131
46
46
  lollms_client/lollms_types.py,sha256=0iSH1QHRRD-ddBqoL9EEKJ8wWCuwDUlN_FrfbCdg7Lw,3522
47
- lollms_client/lollms_utilities.py,sha256=8vCCO1EKI457Mjddo3mRalKOvP-sDv7nIQthOBK8fPY,13725
47
+ lollms_client/lollms_utilities.py,sha256=3DAsII2X9uhRzRL-D0QlALcEdRg82y7OIL4yHVF32gY,19446
48
48
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
49
49
  lollms_client/llm_bindings/azure_openai/__init__.py,sha256=8C-gXoVa-OI9FmFM3PaMgrTfzqCLbs4f7CHJHxKuAR8,16675
50
50
  lollms_client/llm_bindings/claude/__init__.py,sha256=CsWILXAFytXtxp1ZAoNwq8KycW0POQ2MCmpT6Bz0Hd0,24877
@@ -57,7 +57,7 @@ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=4cotP3cYhiA0501UnGVljlEBB
57
57
  lollms_client/llm_bindings/lollms/__init__.py,sha256=scGHEKzlGX5fw2XwefVicsf28GrwgN3wU5nl4EPJ_Sk,24424
58
58
  lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=Thoq3PJR2e03Y2Kd_FBb-DULJK0zT5-2ID1YIJLcPlw,17864
59
59
  lollms_client/llm_bindings/mistral/__init__.py,sha256=624Gr462yBh52ttHFOapKgJOn8zZ1vZcTEcC3i4FYt8,12750
60
- lollms_client/llm_bindings/ollama/__init__.py,sha256=1yxw_JXye_8l1YaEznK5QhOZmLV_opY-FkYnwy530eo,36109
60
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=_plx8cO3Bl9igmIEvTkJ6tkZ2imHS_L76hCHdJAGIhQ,36851
61
61
  lollms_client/llm_bindings/open_router/__init__.py,sha256=v91BpNcuQCbbA6r82gbgMP8UYhSrJUMOf4UtOzEo18Q,13235
62
62
  lollms_client/llm_bindings/openai/__init__.py,sha256=kLG0-FyLMoSbEay1hcK46XjEBaLbFE3U51lUjAZ8HoI,25663
63
63
  lollms_client/llm_bindings/openllm/__init__.py,sha256=xv2XDhJNCYe6NPnWBboDs24AQ1VJBOzsTuMcmuQ6xYY,29864
@@ -92,8 +92,9 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
92
92
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
93
93
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
94
94
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- lollms_client-0.29.3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
96
- lollms_client-0.29.3.dist-info/METADATA,sha256=wWn-0CasMd51exqHdQXynjYTcPtYVSiycyCGMv7aTII,47847
97
- lollms_client-0.29.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
98
- lollms_client-0.29.3.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
99
- lollms_client-0.29.3.dist-info/RECORD,,
95
+ lollms_client-0.31.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
96
+ test/test_lollms_discussion.py,sha256=KxTsV1bPdNz8QqZd7tIof9kTWkeXLUtAMU08BQmoY6U,16829
97
+ lollms_client-0.31.0.dist-info/METADATA,sha256=9BCSndDpLDG8OigH4U8QZPl_puk0soNAdtg2pGcd1dI,38717
98
+ lollms_client-0.31.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
+ lollms_client-0.31.0.dist-info/top_level.txt,sha256=1jIpjTnOSGEGtIW2rGAFM6tVRzgsDdMOiox_SmDH_zw,28
100
+ lollms_client-0.31.0.dist-info/RECORD,,
@@ -1,2 +1,3 @@
1
1
  examples
2
2
  lollms_client
3
+ test