lollms-client 0.27.1__tar.gz → 0.27.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-0.27.1 → lollms_client-0.27.3}/PKG-INFO +1 -1
- lollms_client-0.27.3/examples/lollms_chat/calculator.py +59 -0
- lollms_client-0.27.3/examples/lollms_chat/derivative.py +48 -0
- lollms_client-0.27.3/examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +12 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/__init__.py +1 -1
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/litellm/__init__.py +21 -2
- {lollms_client-0.27.1/lollms_client/llm_bindings/openai → lollms_client-0.27.3/lollms_client/llm_bindings/lollms_chat}/__init__.py +73 -4
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/ollama/__init__.py +70 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/open_router/__init__.py +1 -2
- lollms_client-0.27.3/lollms_client/llm_bindings/openai/__init__.py +577 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_core.py +87 -5
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_discussion.py +57 -1
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_llm_binding.py +44 -5
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client.egg-info/PKG-INFO +1 -1
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client.egg-info/SOURCES.txt +4 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/LICENSE +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/README.md +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/article_summary/article_summary.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/console_discussion/console_app.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/console_discussion.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/deep_analyze/deep_analyse.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/function_calling_with_local_custom_mcp.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/generate_a_benchmark_for_safe_store.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/generate_and_speak/generate_and_speak.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/generate_game_sfx/generate_game_fx.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/generate_text_with_multihop_rag_example.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/gradio_chat_app.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/gradio_lollms_chat.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/internet_search_with_rag.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/lollms_discussions_test.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/mcp_examples/external_mcp.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/mcp_examples/local_mcp.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/mcp_examples/openai_mcp.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/simple_text_gen_test.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/simple_text_gen_with_image_test.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/test_local_models/local_chat.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/text_2_audio.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/text_2_image.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/text_2_image_diffusers.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/text_and_image_2_audio.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/text_gen.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/examples/text_gen_system_prompt.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/claude/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/gemini/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/grok/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/groq/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/mistral/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_config.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_personality.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_types.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/pyproject.toml +0 -0
- {lollms_client-0.27.1 → lollms_client-0.27.3}/setup.cfg +0 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# This is a simple example of a calculator using lollms_chat. don't expect this to be perfect as LLMs are very bad in computing
|
|
2
|
+
# this can be used for very simple calculations. don't expect it to be accurate to compute sqrt(35) or something
|
|
3
|
+
# it is just for the purpose of experimentation as even the best models in the world are not capable of doing accurate calculations yet without a calculator
|
|
4
|
+
|
|
5
|
+
from lollms_client import LollmsClient
|
|
6
|
+
import json
|
|
7
|
+
import math # Import the math module for calculations
|
|
8
|
+
|
|
9
|
+
# Make sure you use your key
|
|
10
|
+
lc = LollmsClient(
|
|
11
|
+
"openai",
|
|
12
|
+
"http://localhost:9642/v1/",
|
|
13
|
+
service_key="lollms_y-uyV-p2_AQGo5Ut6uHDmfIoRk6rKfmf0Rz6xQx-Zkl8cNyVUSFM"# make sure you generate your own key
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
# if you want to see what binding/model does the server support, use this:
|
|
17
|
+
models = lc.listModels()
|
|
18
|
+
print(f"Found models:\n{models}")
|
|
19
|
+
|
|
20
|
+
lc.set_model_name("ollama/gemma3:27b") # Or your preferred binding/model
|
|
21
|
+
|
|
22
|
+
expression = input("Give an expression to evaluate: ")
|
|
23
|
+
|
|
24
|
+
# Construct a detailed prompt
|
|
25
|
+
system_prompt = (
|
|
26
|
+
"You are a highly accurate calculator. You receive a mathematical expression "
|
|
27
|
+
"as input and return the result as a JSON object. "
|
|
28
|
+
"The expression can include numbers, basic arithmetic operators (+, -, *, /), "
|
|
29
|
+
"parentheses, and common mathematical functions like sin, cos, tan, pi, sqrt, and log. "
|
|
30
|
+
"Always evaluate the expression and return the final numeric result. If the expression is invalid, return 'Error'."
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
template = '{"result": the numeric result of the evaluated expression}'
|
|
34
|
+
|
|
35
|
+
# Include the expression in the user prompt. This is important!
|
|
36
|
+
user_prompt = f"Evaluate the following expression: {expression}"
|
|
37
|
+
|
|
38
|
+
# Generate the code
|
|
39
|
+
generation_output = lc.generate_code(
|
|
40
|
+
user_prompt,
|
|
41
|
+
system_prompt=system_prompt,
|
|
42
|
+
template=template
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
# Attempt to parse the JSON response
|
|
47
|
+
generation_output = json.loads(generation_output)
|
|
48
|
+
result = generation_output["result"]
|
|
49
|
+
|
|
50
|
+
# Attempt to convert the result to a float
|
|
51
|
+
try:
|
|
52
|
+
result = float(result)
|
|
53
|
+
print(f"Result: {result}")
|
|
54
|
+
except ValueError:
|
|
55
|
+
print(f"Result: {result} (Could not convert to a number)") #Handles cases where the LLM returns non-numeric output
|
|
56
|
+
except json.JSONDecodeError:
|
|
57
|
+
print(f"Error: Could not decode JSON response: {generation_output}")
|
|
58
|
+
except KeyError:
|
|
59
|
+
print(f"Error: 'result' key not found in JSON response: {generation_output}")
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from lollms_client import LollmsClient
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
# Make sure you use your key
|
|
5
|
+
lc = LollmsClient(
|
|
6
|
+
"openai",
|
|
7
|
+
"http://localhost:9642/v1/",
|
|
8
|
+
service_key="lollms_y-uyV-p2_AQGo5Ut6uHDmfIoRk6rKfmf0Rz6xQx-Zkl8cNyVUSFM" # make sure you generate your own key
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
# if you want to see what binding/model does the server support, use this:
|
|
12
|
+
models = lc.listModels()
|
|
13
|
+
print(f"Found models:\n{models}")
|
|
14
|
+
|
|
15
|
+
lc.set_model_name("ollama/gemma3:27b") # Or your preferred binding/model
|
|
16
|
+
|
|
17
|
+
function = input("Enter the function (e.g., x^2 + 2*x): ")
|
|
18
|
+
parameter = input("Enter the parameter to differentiate with respect to (e.g., x): ")
|
|
19
|
+
|
|
20
|
+
# Construct a detailed prompt
|
|
21
|
+
system_prompt = (
|
|
22
|
+
"You are a symbolic differentiation engine. You receive a mathematical function "
|
|
23
|
+
"and a parameter as input, and you return the derivative of the function with respect to that parameter. "
|
|
24
|
+
"The function can include variables, numbers, and common mathematical operations. "
|
|
25
|
+
"Return the derivative as a string. If the function or parameter is invalid, return 'Error'."
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
template = '"{derivative}": the derivative of the function with respect to the parameter'
|
|
29
|
+
|
|
30
|
+
# Include the function and parameter in the user prompt. This is important!
|
|
31
|
+
user_prompt = f"Find the derivative of the function '{function}' with respect to '{parameter}'."
|
|
32
|
+
|
|
33
|
+
# Generate the code
|
|
34
|
+
generation_output = lc.generate_code(
|
|
35
|
+
user_prompt,
|
|
36
|
+
system_prompt=system_prompt,
|
|
37
|
+
template=template
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
# Attempt to parse the JSON response
|
|
42
|
+
generation_output = json.loads(generation_output)
|
|
43
|
+
derivative = generation_output["derivative"]
|
|
44
|
+
print(f"Derivative: {derivative}")
|
|
45
|
+
except json.JSONDecodeError:
|
|
46
|
+
print(f"Error: Could not decode JSON response: {generation_output}")
|
|
47
|
+
except KeyError:
|
|
48
|
+
print(f"Error: 'derivative' key not found in JSON response: {generation_output}")
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from lollms_client import LollmsClient
|
|
2
|
+
#make sure you use your key
|
|
3
|
+
lc = LollmsClient("openai","http://localhost:9642/v1/", service_key="lollms_zXQdyvrP_ecMXm3UZ0D004x979aHpyF8iq4ki_b52q0WdFuiEfMo")
|
|
4
|
+
models = lc.listModels()
|
|
5
|
+
print(f"Found models:\n{models}")
|
|
6
|
+
|
|
7
|
+
lc.set_model_name("ollama/gemma3:27b")
|
|
8
|
+
|
|
9
|
+
res = lc.generate_text("Describe this image",images=[
|
|
10
|
+
r"C:\Users\parisneo\Pictures\me.jpg"
|
|
11
|
+
])
|
|
12
|
+
print(res)
|
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "0.27.
|
|
11
|
+
__version__ = "0.27.3" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
{lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/litellm/__init__.py
RENAMED
|
@@ -110,6 +110,25 @@ class LiteLLMBinding(LollmsLLMBinding):
|
|
|
110
110
|
|
|
111
111
|
return self._perform_generation(messages, n_predict, is_streaming, temperature, top_p, repeat_penalty, seed, streaming_callback)
|
|
112
112
|
|
|
113
|
+
def generate_from_messages(self,
|
|
114
|
+
messages: List[Dict],
|
|
115
|
+
n_predict: Optional[int] = None,
|
|
116
|
+
stream: Optional[bool] = None,
|
|
117
|
+
temperature: Optional[float] = None,
|
|
118
|
+
top_k: Optional[int] = None,
|
|
119
|
+
top_p: Optional[float] = None,
|
|
120
|
+
repeat_penalty: Optional[float] = None,
|
|
121
|
+
repeat_last_n: Optional[int] = None,
|
|
122
|
+
seed: Optional[int] = None,
|
|
123
|
+
n_threads: Optional[int] = None,
|
|
124
|
+
ctx_size: int | None = None,
|
|
125
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
126
|
+
**kwargs
|
|
127
|
+
) -> Union[str, dict]:
|
|
128
|
+
is_streaming = stream if stream is not None else (streaming_callback is not None)
|
|
129
|
+
return self._perform_generation(messages, n_predict, is_streaming, temperature, top_p, repeat_penalty, seed, streaming_callback)
|
|
130
|
+
|
|
131
|
+
|
|
113
132
|
def chat(self, discussion: LollmsDiscussion, branch_tip_id: Optional[str] = None, n_predict: Optional[int] = None, stream: Optional[bool] = None, temperature: float = 0.7, top_p: float = 0.9, repeat_penalty: float = 1.1, seed: Optional[int] = None, streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None, **kwargs) -> Union[str, dict]:
|
|
114
133
|
is_streaming = stream if stream is not None else (streaming_callback is not None)
|
|
115
134
|
messages = discussion.export("openai_chat", branch_tip_id)
|
|
@@ -178,9 +197,9 @@ class LiteLLMBinding(LollmsLLMBinding):
|
|
|
178
197
|
entries.append({
|
|
179
198
|
"category": "api", "datasets": "unknown", "icon": get_icon_path(model_name),
|
|
180
199
|
"license": "unknown", "model_creator": model_info.get('owned_by', 'unknown'),
|
|
181
|
-
"
|
|
200
|
+
"model_name": model_name, "provider": "litellm", "rank": "1.0", "type": "api",
|
|
182
201
|
"variants": [{
|
|
183
|
-
"
|
|
202
|
+
"model_name": model_name, "size": context_size,
|
|
184
203
|
"input_cost_per_token": model_info.get('input_cost_per_token', 0),
|
|
185
204
|
"output_cost_per_token": model_info.get('output_cost_per_token', 0),
|
|
186
205
|
"max_output_tokens": model_info.get('max_output_tokens', 0),
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# bindings/
|
|
1
|
+
# bindings/Lollms_chat/binding.py
|
|
2
2
|
import requests
|
|
3
3
|
import json
|
|
4
4
|
from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
@@ -18,11 +18,11 @@ import openai
|
|
|
18
18
|
import tiktoken
|
|
19
19
|
import os
|
|
20
20
|
|
|
21
|
-
BindingName = "
|
|
21
|
+
BindingName = "LollmsChatBinding"
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
class
|
|
25
|
-
"""
|
|
24
|
+
class LollmsChatBinding(LollmsLLMBinding):
|
|
25
|
+
"""LollmsChat-specific binding implementation (open ai compatible with some extra parameters)"""
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
def __init__(self,
|
|
@@ -229,6 +229,66 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
229
229
|
|
|
230
230
|
return output
|
|
231
231
|
|
|
232
|
+
def generate_from_messages(self,
|
|
233
|
+
messages: List[Dict],
|
|
234
|
+
n_predict: Optional[int] = None,
|
|
235
|
+
stream: Optional[bool] = None,
|
|
236
|
+
temperature: Optional[float] = None,
|
|
237
|
+
top_k: Optional[int] = None,
|
|
238
|
+
top_p: Optional[float] = None,
|
|
239
|
+
repeat_penalty: Optional[float] = None,
|
|
240
|
+
repeat_last_n: Optional[int] = None,
|
|
241
|
+
seed: Optional[int] = None,
|
|
242
|
+
n_threads: Optional[int] = None,
|
|
243
|
+
ctx_size: int | None = None,
|
|
244
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
245
|
+
**kwargs
|
|
246
|
+
) -> Union[str, dict]:
|
|
247
|
+
# Build the request parameters
|
|
248
|
+
params = {
|
|
249
|
+
"model": self.model_name,
|
|
250
|
+
"messages": messages,
|
|
251
|
+
"max_tokens": n_predict,
|
|
252
|
+
"n": 1,
|
|
253
|
+
"temperature": temperature,
|
|
254
|
+
"top_p": top_p,
|
|
255
|
+
"frequency_penalty": repeat_penalty,
|
|
256
|
+
"stream": stream
|
|
257
|
+
}
|
|
258
|
+
# Add seed if available, as it's supported by newer OpenAI models
|
|
259
|
+
if seed is not None:
|
|
260
|
+
params["seed"] = seed
|
|
261
|
+
|
|
262
|
+
# Remove None values, as the API expects them to be absent
|
|
263
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
264
|
+
|
|
265
|
+
output = ""
|
|
266
|
+
# 2. Call the API
|
|
267
|
+
try:
|
|
268
|
+
completion = self.client.chat.completions.create(**params)
|
|
269
|
+
|
|
270
|
+
if stream:
|
|
271
|
+
for chunk in completion:
|
|
272
|
+
# The streaming response for chat has a different structure
|
|
273
|
+
delta = chunk.choices[0].delta
|
|
274
|
+
if delta.content:
|
|
275
|
+
word = delta.content
|
|
276
|
+
if streaming_callback is not None:
|
|
277
|
+
if not streaming_callback(word, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
278
|
+
break
|
|
279
|
+
output += word
|
|
280
|
+
else:
|
|
281
|
+
output = completion.choices[0].message.content
|
|
282
|
+
|
|
283
|
+
except Exception as e:
|
|
284
|
+
# Handle API errors gracefully
|
|
285
|
+
error_message = f"An error occurred with the OpenAI API: {e}"
|
|
286
|
+
if streaming_callback:
|
|
287
|
+
streaming_callback(error_message, MSG_TYPE.MSG_TYPE_EXCEPTION)
|
|
288
|
+
return {"status": "error", "message": error_message}
|
|
289
|
+
|
|
290
|
+
return output
|
|
291
|
+
|
|
232
292
|
def chat(self,
|
|
233
293
|
discussion: LollmsDiscussion,
|
|
234
294
|
branch_tip_id: Optional[str] = None,
|
|
@@ -457,6 +517,15 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
457
517
|
"context_length": context_length,
|
|
458
518
|
"max_generation": max_generation,
|
|
459
519
|
})
|
|
520
|
+
else:
|
|
521
|
+
models_info.append({
|
|
522
|
+
"model_name": model_id,
|
|
523
|
+
"owned_by": getattr(model, "owned_by", "N/A"),
|
|
524
|
+
"created": getattr(model, "created", "N/A"),
|
|
525
|
+
"context_length": None,
|
|
526
|
+
"max_generation": None,
|
|
527
|
+
})
|
|
528
|
+
|
|
460
529
|
except Exception as e:
|
|
461
530
|
print(f"Failed to list models: {e}")
|
|
462
531
|
|
|
@@ -258,7 +258,77 @@ class OllamaBinding(LollmsLLMBinding):
|
|
|
258
258
|
error_message = f"An unexpected error occurred: {str(ex)}"
|
|
259
259
|
trace_exception(ex)
|
|
260
260
|
return {"status": False, "error": error_message}
|
|
261
|
+
|
|
262
|
+
def generate_from_messages(self,
|
|
263
|
+
messages: List[Dict],
|
|
264
|
+
n_predict: Optional[int] = None,
|
|
265
|
+
stream: Optional[bool] = None,
|
|
266
|
+
temperature: Optional[float] = None,
|
|
267
|
+
top_k: Optional[int] = None,
|
|
268
|
+
top_p: Optional[float] = None,
|
|
269
|
+
repeat_penalty: Optional[float] = None,
|
|
270
|
+
repeat_last_n: Optional[int] = None,
|
|
271
|
+
seed: Optional[int] = None,
|
|
272
|
+
n_threads: Optional[int] = None,
|
|
273
|
+
ctx_size: int | None = None,
|
|
274
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
275
|
+
**kwargs
|
|
276
|
+
) -> Union[str, dict]:
|
|
277
|
+
if not self.ollama_client:
|
|
278
|
+
return {"status": False, "error": "Ollama client not initialized."}
|
|
279
|
+
|
|
280
|
+
options = {}
|
|
281
|
+
if n_predict is not None: options['num_predict'] = n_predict
|
|
282
|
+
if temperature is not None: options['temperature'] = float(temperature)
|
|
283
|
+
if top_k is not None: options['top_k'] = top_k
|
|
284
|
+
if top_p is not None: options['top_p'] = top_p
|
|
285
|
+
if repeat_penalty is not None: options['repeat_penalty'] = repeat_penalty
|
|
286
|
+
if repeat_last_n is not None: options['repeat_last_n'] = repeat_last_n
|
|
287
|
+
if seed is not None: options['seed'] = seed
|
|
288
|
+
if n_threads is not None: options['num_thread'] = n_threads
|
|
289
|
+
if ctx_size is not None: options['num_ctx'] = ctx_size
|
|
290
|
+
|
|
291
|
+
full_response_text = ""
|
|
292
|
+
|
|
293
|
+
try:
|
|
294
|
+
if stream:
|
|
295
|
+
response_stream = self.ollama_client.chat(
|
|
296
|
+
model=self.model_name,
|
|
297
|
+
messages=messages,
|
|
298
|
+
stream=True,
|
|
299
|
+
options=options if options else None
|
|
300
|
+
)
|
|
301
|
+
for chunk_dict in response_stream:
|
|
302
|
+
chunk_content = chunk_dict.get('message', {}).get('content', '')
|
|
303
|
+
if chunk_content: # Ensure there is content to process
|
|
304
|
+
full_response_text += chunk_content
|
|
305
|
+
if streaming_callback:
|
|
306
|
+
if not streaming_callback(chunk_content, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
307
|
+
break # Callback requested stop
|
|
308
|
+
return full_response_text
|
|
309
|
+
else: # Not streaming
|
|
310
|
+
response_dict = self.ollama_client.chat(
|
|
311
|
+
model=self.model_name,
|
|
312
|
+
messages=messages,
|
|
313
|
+
stream=False,
|
|
314
|
+
options=options if options else None
|
|
315
|
+
)
|
|
316
|
+
return response_dict.get('message', {}).get('content', '')
|
|
317
|
+
|
|
318
|
+
except ollama.ResponseError as e:
|
|
319
|
+
error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
|
|
320
|
+
ASCIIColors.error(error_message)
|
|
321
|
+
return {"status": False, "error": error_message, "status_code": e.status_code}
|
|
322
|
+
except ollama.RequestError as e: # Covers connection errors, timeouts during request
|
|
323
|
+
error_message = f"Ollama API RequestError: {str(e)}"
|
|
324
|
+
ASCIIColors.error(error_message)
|
|
325
|
+
return {"status": False, "error": error_message}
|
|
326
|
+
except Exception as ex:
|
|
327
|
+
error_message = f"An unexpected error occurred: {str(ex)}"
|
|
328
|
+
trace_exception(ex)
|
|
329
|
+
return {"status": False, "error": error_message}
|
|
261
330
|
|
|
331
|
+
|
|
262
332
|
def chat(self,
|
|
263
333
|
discussion: LollmsDiscussion,
|
|
264
334
|
branch_tip_id: Optional[str] = None,
|
{lollms_client-0.27.1 → lollms_client-0.27.3}/lollms_client/llm_bindings/open_router/__init__.py
RENAMED
|
@@ -276,7 +276,6 @@ if __name__ == '__main__':
|
|
|
276
276
|
binding.load_model("meta-llama/llama-3-8b-instruct:free") # Use the free tier on OpenRouter
|
|
277
277
|
full_streamed_text = ""
|
|
278
278
|
def stream_callback(chunk: str, msg_type: int):
|
|
279
|
-
nonlocal full_streamed_text
|
|
280
279
|
ASCIIColors.green(chunk, end="", flush=True)
|
|
281
280
|
full_streamed_text += chunk
|
|
282
281
|
return True
|
|
@@ -301,4 +300,4 @@ if __name__ == '__main__':
|
|
|
301
300
|
ASCIIColors.error(f"An error occurred during testing: {e}")
|
|
302
301
|
trace_exception(e)
|
|
303
302
|
|
|
304
|
-
ASCIIColors.yellow("\nOpenRouterBinding test finished.")
|
|
303
|
+
ASCIIColors.yellow("\nOpenRouterBinding test finished.")
|