lollms-client 0.14.1__py3-none-any.whl → 0.15.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/simple_text_gen_with_image_test.py +21 -9
- examples/text_gen.py +3 -1
- examples/text_gen_system_prompt.py +2 -1
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/llamacpp/__init__.py +1041 -0
- lollms_client/llm_bindings/ollama/__init__.py +3 -3
- lollms_client/llm_bindings/openllm/__init__.py +547 -0
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +591 -0
- lollms_client/llm_bindings/transformers/__init__.py +660 -251
- lollms_client/lollms_core.py +5 -3
- lollms_client/lollms_llm_binding.py +1 -5
- {lollms_client-0.14.1.dist-info → lollms_client-0.15.1.dist-info}/METADATA +1 -1
- {lollms_client-0.14.1.dist-info → lollms_client-0.15.1.dist-info}/RECORD +16 -13
- {lollms_client-0.14.1.dist-info → lollms_client-0.15.1.dist-info}/WHEEL +0 -0
- {lollms_client-0.14.1.dist-info → lollms_client-0.15.1.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.14.1.dist-info → lollms_client-0.15.1.dist-info}/top_level.txt +0 -0
|
@@ -10,10 +10,16 @@ from ascii_colors import ASCIIColors, trace_exception
|
|
|
10
10
|
# MODEL_NAME = None # Server will use its default or last loaded model
|
|
11
11
|
|
|
12
12
|
# Option 2: Ollama binding
|
|
13
|
-
BINDING_NAME = "ollama"
|
|
14
|
-
HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
|
|
15
|
-
MODEL_NAME = "llava:latest" # Or "llama3:latest", "phi3:latest", etc. - ensure it's pulled in Ollama
|
|
16
|
-
|
|
13
|
+
# BINDING_NAME = "ollama"
|
|
14
|
+
# HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
|
|
15
|
+
# MODEL_NAME = "llava:latest" # Or "llama3:latest", "phi3:latest", etc. - ensure it's pulled in Ollama
|
|
16
|
+
|
|
17
|
+
# Option 2: llamacpp binding
|
|
18
|
+
BINDING_NAME = "llamacpp"
|
|
19
|
+
MODELS_PATH = r"E:\drumber" # Change to your own models folder
|
|
20
|
+
MODEL_NAME = "llava-v1.6-mistral-7b.Q3_K_XS.gguf" # Change to your vision capable model (make sure you have a mmprj file with the gguf model with the same name but without the quantization name and with mmproj- prefix (mmproj-llava-v1.6-mistral-7b.gguf))
|
|
21
|
+
# You can also add a clip_model_path parameter to your lc_params
|
|
22
|
+
img = "E:\\drumber\\1711741182996.jpg"
|
|
17
23
|
# Option 3: OpenAI binding (requires OPENAI_API_KEY environment variable or service_key)
|
|
18
24
|
# BINDING_NAME = "openai"
|
|
19
25
|
# HOST_ADDRESS = None # Defaults to OpenAI API
|
|
@@ -34,19 +40,25 @@ def simple_streaming_callback(chunk: str, msg_type: MSG_TYPE, params=None, metad
|
|
|
34
40
|
|
|
35
41
|
def test_text_generation():
|
|
36
42
|
ASCIIColors.cyan(f"\n--- Testing Text Generation with '{BINDING_NAME}' binding ---")
|
|
37
|
-
ASCIIColors.cyan(f"Host: {HOST_ADDRESS or 'Default'}, Model: {MODEL_NAME or 'Default'}")
|
|
38
43
|
|
|
44
|
+
if BINDING_NAME!="llamacpp":
|
|
45
|
+
ASCIIColors.cyan(f"Host: {HOST_ADDRESS or 'Default'}, Model: {MODEL_NAME or 'Default'}")
|
|
46
|
+
else:
|
|
47
|
+
ASCIIColors.cyan(f"Host: {MODELS_PATH or 'Default'}, Model: {MODEL_NAME or 'Default'}")
|
|
39
48
|
try:
|
|
40
49
|
# Initialize LollmsClient
|
|
41
50
|
lc_params = {
|
|
42
51
|
"binding_name": BINDING_NAME,
|
|
43
|
-
"host_address": HOST_ADDRESS,
|
|
44
52
|
"model_name": MODEL_NAME,
|
|
45
53
|
# "service_key": SERVICE_KEY, # Uncomment for OpenAI if needed
|
|
46
54
|
}
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
55
|
+
if BINDING_NAME!="llamacpp":
|
|
56
|
+
lc_params["host_address"]= HOST_ADDRESS
|
|
57
|
+
# Remove None host_address for bindings that have internal defaults (like OpenAI)
|
|
58
|
+
if lc_params["host_address"] is None and BINDING_NAME in ["openai"]:
|
|
59
|
+
del lc_params["host_address"]
|
|
60
|
+
else:
|
|
61
|
+
lc_params["models_path"]= MODELS_PATH
|
|
50
62
|
|
|
51
63
|
|
|
52
64
|
lc = LollmsClient(**lc_params)
|
examples/text_gen.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
from lollms_client import LollmsClient
|
|
2
2
|
|
|
3
3
|
# Initialize the LollmsClient instance
|
|
4
|
-
lc = LollmsClient("lollms")
|
|
4
|
+
#lc = LollmsClient("lollms")
|
|
5
|
+
#lc = LollmsClient("ollama", model_name="mistral-nemo:latest")
|
|
6
|
+
lc = LollmsClient("llamacpp", models_path=r"E:\drumber", model_name="llava-v1.6-mistral-7b.Q3_K_XS.gguf")
|
|
5
7
|
# Generate Text
|
|
6
8
|
# response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
7
9
|
# print(response)
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
from lollms_client import LollmsClient
|
|
2
2
|
|
|
3
3
|
# Initialize the LollmsClient instance
|
|
4
|
-
lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
|
|
4
|
+
#lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
|
|
5
|
+
lc = LollmsClient("llamacpp", models_path=r"E:\drumber", model_name="llava-v1.6-mistral-7b.Q3_K_XS.gguf")
|
|
5
6
|
# Generate Text
|
|
6
7
|
# response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
7
8
|
# print(response)
|
lollms_client/__init__.py
CHANGED
|
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
|
|
|
6
6
|
from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
|
|
7
7
|
from lollms_client.lollms_functions import FunctionCalling_Library
|
|
8
8
|
|
|
9
|
-
__version__ = "0.
|
|
9
|
+
__version__ = "0.15.1"
|
|
10
10
|
|
|
11
11
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
12
12
|
__all__ = [
|