lollms-client 0.12.6__tar.gz → 0.13.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-0.12.6 → lollms_client-0.13.0}/PKG-INFO +12 -12
- lollms_client-0.13.0/examples/article_summary/article_summary.py +58 -0
- lollms_client-0.13.0/examples/deep_analyze/deep_analyse.py +30 -0
- lollms_client-0.13.0/examples/deep_analyze/deep_analyze_multiple_files.py +32 -0
- lollms_client-0.13.0/examples/function_call/functions_call_with images.py +52 -0
- lollms_client-0.13.0/examples/personality_test/chat_test.py +37 -0
- lollms_client-0.13.0/examples/personality_test/chat_with_aristotle.py +42 -0
- lollms_client-0.13.0/examples/personality_test/tesks_test.py +62 -0
- lollms_client-0.13.0/examples/simple_text_gen_test.py +171 -0
- lollms_client-0.13.0/examples/simple_text_gen_with_image_test.py +166 -0
- lollms_client-0.13.0/examples/test_local_models/local_chat.py +9 -0
- lollms_client-0.13.0/examples/text_2_audio.py +77 -0
- lollms_client-0.13.0/examples/text_2_image.py +140 -0
- lollms_client-0.13.0/examples/text_and_image_2_audio.py +59 -0
- lollms_client-0.13.0/examples/text_gen.py +28 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/__init__.py +2 -1
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/llm_bindings/lollms/__init__.py +13 -11
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/llm_bindings/ollama/__init__.py +8 -7
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/llm_bindings/openai/__init__.py +69 -29
- lollms_client-0.13.0/lollms_client/llm_bindings/tensor_rt/__init__.py +603 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/llm_bindings/transformers/__init__.py +7 -11
- lollms_client-0.13.0/lollms_client/llm_bindings/vllm/__init__.py +603 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_core.py +0 -3
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_llm_binding.py +5 -25
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client.egg-info/PKG-INFO +12 -12
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client.egg-info/SOURCES.txt +17 -1
- lollms_client-0.13.0/lollms_client.egg-info/top_level.txt +4 -0
- lollms_client-0.13.0/pyproject.toml +40 -0
- lollms_client-0.12.6/lollms_client.egg-info/top_level.txt +0 -1
- lollms_client-0.12.6/setup.py +0 -27
- {lollms_client-0.12.6 → lollms_client-0.13.0}/LICENSE +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/README.md +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_config.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_functions.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_tasks.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_types.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-0.12.6 → lollms_client-0.13.0}/setup.cfg +0 -0
|
@@ -1,25 +1,25 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.13.0
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
5
|
+
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
|
+
License: Apache Software License
|
|
7
|
+
Project-URL: Homepage, https://github.com/ParisNeo/lollms_client
|
|
8
8
|
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
9
14
|
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
15
|
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Intended Audience :: Developers
|
|
17
|
+
Classifier: Intended Audience :: Science/Research
|
|
18
|
+
Requires-Python: >=3.7
|
|
11
19
|
Description-Content-Type: text/markdown
|
|
12
20
|
License-File: LICENSE
|
|
13
21
|
Requires-Dist: requests
|
|
14
|
-
Dynamic: author
|
|
15
|
-
Dynamic: author-email
|
|
16
|
-
Dynamic: classifier
|
|
17
|
-
Dynamic: description
|
|
18
|
-
Dynamic: description-content-type
|
|
19
|
-
Dynamic: home-page
|
|
20
22
|
Dynamic: license-file
|
|
21
|
-
Dynamic: requires-dist
|
|
22
|
-
Dynamic: summary
|
|
23
23
|
|
|
24
24
|
# lollms_client
|
|
25
25
|
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from lollms_client import LollmsClient
|
|
2
|
+
import pipmaster as pm
|
|
3
|
+
from ascii_colors import ASCIIColors
|
|
4
|
+
if not pm.is_installed("docling"):
|
|
5
|
+
pm.install("docling")
|
|
6
|
+
from docling.document_converter import DocumentConverter
|
|
7
|
+
|
|
8
|
+
ASCIIColors.set_log_file("log.log")
|
|
9
|
+
|
|
10
|
+
lc = LollmsClient()
|
|
11
|
+
# Create prompts for each section
|
|
12
|
+
article_url = "https://arxiv.org/pdf/2109.09572"
|
|
13
|
+
converter = DocumentConverter()
|
|
14
|
+
result = converter.convert(article_url)
|
|
15
|
+
article_text = result.document.export_to_markdown()
|
|
16
|
+
|
|
17
|
+
ASCIIColors.info("Article loaded successfully")
|
|
18
|
+
|
|
19
|
+
# Use the sequential_summarize method from lollms
|
|
20
|
+
summary = lc.sequential_summarize(
|
|
21
|
+
article_text,
|
|
22
|
+
"""
|
|
23
|
+
Extract the following information if present in the chunk:
|
|
24
|
+
|
|
25
|
+
1. **Title**:
|
|
26
|
+
- Found in text chunk number 1 at the beginning. It should be followed by # or ##
|
|
27
|
+
- Copy exactly as presented; do not interpret.
|
|
28
|
+
- Never alter this if already in the memory. This is important
|
|
29
|
+
|
|
30
|
+
2. **Authors**:
|
|
31
|
+
- Listed in text chunk number 1 at the beginning.
|
|
32
|
+
- If you fail to find the authors keep this empty.
|
|
33
|
+
- Copy exactly as presented; do not interpret.
|
|
34
|
+
- Never alter this if already in the memory. This is important
|
|
35
|
+
|
|
36
|
+
3. **Summary**:
|
|
37
|
+
- Provide a concise but detailed summary of the article by adding ned information from the text chunk to the memory content.
|
|
38
|
+
|
|
39
|
+
4. **Results**:
|
|
40
|
+
- Extract quantified results if available.
|
|
41
|
+
|
|
42
|
+
Ensure that any information already in memory is retained unless explicitly updated by the current chunk.
|
|
43
|
+
""",
|
|
44
|
+
"markdown",
|
|
45
|
+
"""Write a final markdown with these sections:
|
|
46
|
+
## Title
|
|
47
|
+
## Authors
|
|
48
|
+
## Summary
|
|
49
|
+
## Results
|
|
50
|
+
""",
|
|
51
|
+
ctx_size=128000,
|
|
52
|
+
chunk_size=4096,
|
|
53
|
+
bootstrap_chunk_size=1024,
|
|
54
|
+
bootstrap_steps=1,
|
|
55
|
+
debug = True
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
ASCIIColors.yellow(summary)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from lollms_client import LollmsClient
|
|
2
|
+
import pipmaster as pm
|
|
3
|
+
from ascii_colors import ASCIIColors
|
|
4
|
+
if not pm.is_installed("docling"):
|
|
5
|
+
pm.install("docling")
|
|
6
|
+
from docling.document_converter import DocumentConverter
|
|
7
|
+
|
|
8
|
+
ASCIIColors.set_log_file("log.log")
|
|
9
|
+
|
|
10
|
+
lc = LollmsClient()
|
|
11
|
+
# Create prompts for each section
|
|
12
|
+
article_url = "https://arxiv.org/pdf/2109.09572"
|
|
13
|
+
converter = DocumentConverter()
|
|
14
|
+
result = converter.convert(article_url)
|
|
15
|
+
article_text = result.document.export_to_markdown()
|
|
16
|
+
|
|
17
|
+
ASCIIColors.info("Article loaded successfully")
|
|
18
|
+
|
|
19
|
+
# Use the sequential_summarize method from lollms
|
|
20
|
+
result = lc.deep_analyze(
|
|
21
|
+
"Explain what is the difference between HGG and QGG",
|
|
22
|
+
article_text,
|
|
23
|
+
ctx_size=128000,
|
|
24
|
+
chunk_size=1024,
|
|
25
|
+
bootstrap_chunk_size=512,
|
|
26
|
+
bootstrap_steps=1,
|
|
27
|
+
debug = True
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
print(result)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from lollms_client import LollmsClient
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import pipmaster as pm
|
|
4
|
+
from ascii_colors import ASCIIColors
|
|
5
|
+
|
|
6
|
+
ASCIIColors.set_log_file("log.log")
|
|
7
|
+
def load_and_analyze_files():
|
|
8
|
+
folder_path = Path('.') # Change '.' to your desired directory
|
|
9
|
+
allowed_extensions = {'.pdf', '.txt', '.md', '.docx', '.pptx', '.html'}
|
|
10
|
+
|
|
11
|
+
matching_files = []
|
|
12
|
+
for file in folder_path.rglob('*'):
|
|
13
|
+
if file.suffix.lower() in allowed_extensions and file.is_file():
|
|
14
|
+
matching_files.append(str(file.absolute()))
|
|
15
|
+
|
|
16
|
+
# Now use these files with LollmsClient
|
|
17
|
+
lc = LollmsClient()
|
|
18
|
+
ASCIIColors.info(f"Loading {len(matching_files)} files for analysis")
|
|
19
|
+
|
|
20
|
+
result = lc.deep_analyze(
|
|
21
|
+
"Explain what is the difference between HGG and QGG",
|
|
22
|
+
files=matching_files,
|
|
23
|
+
ctx_size=128000,
|
|
24
|
+
chunk_size=1024,
|
|
25
|
+
bootstrap_chunk_size=512,
|
|
26
|
+
bootstrap_steps=1,
|
|
27
|
+
debug=True
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
print(result)
|
|
31
|
+
|
|
32
|
+
load_and_analyze_files()
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import cv2
|
|
2
|
+
from lollms_client import LollmsClient, LollmsTTS, TasksLibrary, FunctionCalling_Library
|
|
3
|
+
import random
|
|
4
|
+
|
|
5
|
+
# Initialize the LollmsClient instance
|
|
6
|
+
lc = LollmsClient()
|
|
7
|
+
tl = TasksLibrary(lc)
|
|
8
|
+
tts = LollmsTTS(lc)
|
|
9
|
+
fcl = FunctionCalling_Library(tl)
|
|
10
|
+
voices = tts.get_voices()
|
|
11
|
+
if voices:
|
|
12
|
+
# Pick a voice randomly
|
|
13
|
+
random_voice = random.choice(voices)
|
|
14
|
+
print(f"Selected voice: {random_voice}")
|
|
15
|
+
|
|
16
|
+
# File path to save the captured image
|
|
17
|
+
file_path = "captured_image.jpg"
|
|
18
|
+
images = []
|
|
19
|
+
# Capture image from webcam and save it to a file
|
|
20
|
+
def capture_image():
|
|
21
|
+
cap = cv2.VideoCapture(0)
|
|
22
|
+
if not cap.isOpened():
|
|
23
|
+
raise Exception("Could not open webcam")
|
|
24
|
+
|
|
25
|
+
ret, frame = cap.read()
|
|
26
|
+
if not ret:
|
|
27
|
+
raise Exception("Failed to capture image")
|
|
28
|
+
images.clear()
|
|
29
|
+
images.append(file_path)
|
|
30
|
+
cv2.imwrite(file_path, frame)
|
|
31
|
+
cap.release()
|
|
32
|
+
return "Image captured successfully"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
fcl.register_function("capture_image",capture_image,"Captures an image from the user webcam",[])
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# Function to handle streaming callback
|
|
40
|
+
def cb(chunk, type):
|
|
41
|
+
print(chunk, end="", flush=True)
|
|
42
|
+
|
|
43
|
+
# Generate text with image
|
|
44
|
+
response, function_calls = fcl.generate_with_functions_and_images(prompt="user: take a look at me then tell ma how i look.\nassistant: ", images=images, stream=False, temperature=0.5, streaming_callback=cb)
|
|
45
|
+
print(f"response: {response}")
|
|
46
|
+
if len(function_calls)>0:
|
|
47
|
+
results = fcl.execute_function_calls(function_calls)
|
|
48
|
+
result = "\n".join(results)
|
|
49
|
+
prompt="user: take a look at me then tell ma how i look.\nassistant: "+response + f"\nfunction execution result: {result}\nassistant: "
|
|
50
|
+
response, function_calls = fcl.generate_with_functions_and_images(prompt, images=images, stream=False, temperature=0.5, streaming_callback=cb)
|
|
51
|
+
print(f"response: {response}")
|
|
52
|
+
tts.text2Audio(response, random_voice)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from lollms_client import LollmsClient, LollmsDiscussion
|
|
2
|
+
from lollms_client import LollmsPersonality
|
|
3
|
+
from lollms_client import MSG_TYPE, ELF_GENERATION_FORMAT
|
|
4
|
+
from ascii_colors import ASCIIColors
|
|
5
|
+
# Callback send
|
|
6
|
+
def cb(chunk, type: MSG_TYPE):
|
|
7
|
+
print(chunk,end="", flush=True)
|
|
8
|
+
|
|
9
|
+
# Initialize the LollmsClient instance
|
|
10
|
+
lc = LollmsClient("http://localhost:9600",default_generation_mode=ELF_GENERATION_FORMAT.OPENAI)
|
|
11
|
+
# Bu_ild inline personality
|
|
12
|
+
p = LollmsPersonality(
|
|
13
|
+
lc,
|
|
14
|
+
"./personality/test/work_dir",
|
|
15
|
+
"./personality/test/config_dir",
|
|
16
|
+
cb,
|
|
17
|
+
None,
|
|
18
|
+
author="ParisNeo",
|
|
19
|
+
name="test_persona",
|
|
20
|
+
user_name="user",
|
|
21
|
+
category="generic",
|
|
22
|
+
category_desc="generic stuff",
|
|
23
|
+
language="English",
|
|
24
|
+
personality_conditioning="!@>system: Act as a helper to the user.",
|
|
25
|
+
welcome_message="Hi, I'm your helper. Let me help you",
|
|
26
|
+
|
|
27
|
+
)
|
|
28
|
+
d = LollmsDiscussion(lc)
|
|
29
|
+
prompt=""
|
|
30
|
+
ASCIIColors.green("To quit press q")
|
|
31
|
+
ASCIIColors.yellow(p.welcome_message)
|
|
32
|
+
while prompt!="q":
|
|
33
|
+
prompt = input("user:")
|
|
34
|
+
if prompt=="q":
|
|
35
|
+
break
|
|
36
|
+
p.generate(d,prompt,stream=True)
|
|
37
|
+
print("")
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from lollms_client import LollmsClient, LollmsDiscussion
|
|
2
|
+
from lollms_client import LollmsPersonality
|
|
3
|
+
from lollms_client import MSG_TYPE, ELF_GENERATION_FORMAT
|
|
4
|
+
from ascii_colors import ASCIIColors
|
|
5
|
+
# Callback send
|
|
6
|
+
def cb(chunk, type: MSG_TYPE):
|
|
7
|
+
print(chunk,end="", flush=True)
|
|
8
|
+
|
|
9
|
+
# Initialize the LollmsClient instance
|
|
10
|
+
lc = LollmsClient("http://localhost:9600",default_generation_mode=ELF_GENERATION_FORMAT.LOLLMS)
|
|
11
|
+
# Bu_ild inline personality
|
|
12
|
+
aristotle_personality = LollmsPersonality(
|
|
13
|
+
lc,
|
|
14
|
+
"./personality/test/work_dir",
|
|
15
|
+
"./personality/test/config_dir",
|
|
16
|
+
cb,
|
|
17
|
+
None,
|
|
18
|
+
author="ParisNeo",
|
|
19
|
+
name="test_persona",
|
|
20
|
+
user_name="user",
|
|
21
|
+
category="generic",
|
|
22
|
+
category_desc="generic stuff",
|
|
23
|
+
language="English",
|
|
24
|
+
personality_conditioning="!@>system: Act as the philosopher Aristotle, sharing wisdom and engaging in logical discussions.",
|
|
25
|
+
welcome_message="Greetings, I am Aristotle, your guide in the pursuit of knowledge. How may I assist you in your philosophical inquiries?",
|
|
26
|
+
)
|
|
27
|
+
# Create a Discussion instance for Aristotle
|
|
28
|
+
aristotle_discussion = LollmsDiscussion(lc)
|
|
29
|
+
|
|
30
|
+
# Initialize user prompt
|
|
31
|
+
prompt = ""
|
|
32
|
+
|
|
33
|
+
# Print welcome message in yellow
|
|
34
|
+
ASCIIColors.yellow(aristotle_personality.welcome_message)
|
|
35
|
+
|
|
36
|
+
# Interaction loop
|
|
37
|
+
while prompt.lower() != "q":
|
|
38
|
+
prompt = input("student: ")
|
|
39
|
+
if prompt.lower() == "q":
|
|
40
|
+
break
|
|
41
|
+
aristotle_personality.generate(aristotle_discussion, prompt, stream=True)
|
|
42
|
+
print("")
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from lollms_client import LollmsClient, LollmsDiscussion
|
|
2
|
+
from lollms_client import TasksLibrary
|
|
3
|
+
from ascii_colors import ASCIIColors
|
|
4
|
+
|
|
5
|
+
lc = LollmsClient("http://localhost:9600")
|
|
6
|
+
tl = TasksLibrary(lc)
|
|
7
|
+
|
|
8
|
+
# ======================================= Multichoice Q&A ==========================
|
|
9
|
+
# Define a multichoice question
|
|
10
|
+
question = "What is the capital city of France?"
|
|
11
|
+
|
|
12
|
+
# Define the possible answers
|
|
13
|
+
possible_answers = ["Paris", "Berlin", "London", "Madrid"]
|
|
14
|
+
|
|
15
|
+
# Call the multichoice_question function with the question and possible answers
|
|
16
|
+
selected_option = tl.multichoice_question(question, possible_answers)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
ASCIIColors.yellow(question)
|
|
20
|
+
ASCIIColors.green(possible_answers[selected_option])
|
|
21
|
+
|
|
22
|
+
# ======================================= Yes no ==========================
|
|
23
|
+
# Define a yes or no question
|
|
24
|
+
question = "Is Paris the capital city of France?"
|
|
25
|
+
|
|
26
|
+
# Call the yes_no function with the question
|
|
27
|
+
answer = tl.yes_no(question)
|
|
28
|
+
ASCIIColors.yellow(question)
|
|
29
|
+
ASCIIColors.green("Yes" if answer else "No")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# ======================================= Code extraction ==========================
|
|
33
|
+
# Define a text with code blocks
|
|
34
|
+
text = """
|
|
35
|
+
Here is some text with a code block:
|
|
36
|
+
```python
|
|
37
|
+
def hello_world():
|
|
38
|
+
print("Hello, world!")
|
|
39
|
+
```
|
|
40
|
+
And here is another code block:
|
|
41
|
+
```java
|
|
42
|
+
public class HelloWorld {
|
|
43
|
+
public static void main(String[] args) {
|
|
44
|
+
System.out.println("Hello, World!");
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
```
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
# Call the extract_code_blocks function with the text
|
|
51
|
+
code_blocks = tl.extract_code_blocks(text)
|
|
52
|
+
|
|
53
|
+
# Print the extracted code blocks
|
|
54
|
+
for i, code_block in enumerate(code_blocks):
|
|
55
|
+
ASCIIColors.bold(f"Code block {i + 1}:")
|
|
56
|
+
ASCIIColors.bold(f"Index: {code_block['index']}")
|
|
57
|
+
ASCIIColors.bold(f"File name: {code_block['file_name']}")
|
|
58
|
+
ASCIIColors.bold(f"Content: {code_block['content']}")
|
|
59
|
+
ASCIIColors.bold(f"Type: {code_block['type']}")
|
|
60
|
+
print()
|
|
61
|
+
|
|
62
|
+
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
from lollms_client import LollmsClient, ELF_COMPLETION_FORMAT
|
|
2
|
+
from lollms_client.lollms_types import MSG_TYPE # For callback signature
|
|
3
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
4
|
+
|
|
5
|
+
# --- Configuration ---
|
|
6
|
+
# Choose your LLM binding and parameters here
|
|
7
|
+
# Option 1: Default LOLLMS server binding
|
|
8
|
+
BINDING_NAME = "lollms"
|
|
9
|
+
HOST_ADDRESS = "http://localhost:9600"
|
|
10
|
+
MODEL_NAME = None # Server will use its default or last loaded model
|
|
11
|
+
|
|
12
|
+
# Option 2: Ollama binding
|
|
13
|
+
#ensure you have the right models
|
|
14
|
+
#BINDING_NAME = "ollama"
|
|
15
|
+
#HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
|
|
16
|
+
#MODEL_NAME = "mistral:latest" # Or "llama3:latest", "phi3:latest", etc. - ensure it's pulled in Ollama
|
|
17
|
+
|
|
18
|
+
# Option 3: OpenAI binding (requires OPENAI_API_KEY environment variable or service_key)
|
|
19
|
+
# BINDING_NAME = "openai"
|
|
20
|
+
# HOST_ADDRESS = None # Defaults to OpenAI API
|
|
21
|
+
# MODEL_NAME = "gpt-3.5-turbo"
|
|
22
|
+
# SERVICE_KEY = "" # Optional, can use env var
|
|
23
|
+
|
|
24
|
+
# --- Callback for streaming ---
|
|
25
|
+
def simple_streaming_callback(chunk: str, msg_type: MSG_TYPE, params=None, metadata=None) -> bool:
|
|
26
|
+
"""
|
|
27
|
+
Simple callback function to print streamed text chunks.
|
|
28
|
+
"""
|
|
29
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
30
|
+
print(chunk, end="", flush=True)
|
|
31
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
|
|
32
|
+
ASCIIColors.error(f"\nStreaming Error: {chunk}")
|
|
33
|
+
# Return True to continue streaming, False to stop
|
|
34
|
+
return True
|
|
35
|
+
|
|
36
|
+
def test_text_generation():
|
|
37
|
+
ASCIIColors.cyan(f"\n--- Testing Text Generation with '{BINDING_NAME}' binding ---")
|
|
38
|
+
ASCIIColors.cyan(f"Host: {HOST_ADDRESS or 'Default'}, Model: {MODEL_NAME or 'Default'}")
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
# Initialize LollmsClient
|
|
42
|
+
lc_params = {
|
|
43
|
+
"binding_name": BINDING_NAME,
|
|
44
|
+
"host_address": HOST_ADDRESS,
|
|
45
|
+
"model_name": MODEL_NAME,
|
|
46
|
+
#"service_key": SERVICE_KEY, # Uncomment for OpenAI if needed
|
|
47
|
+
}
|
|
48
|
+
# Remove None host_address for bindings that have internal defaults (like OpenAI)
|
|
49
|
+
if lc_params["host_address"] is None and BINDING_NAME in ["openai"]:
|
|
50
|
+
del lc_params["host_address"]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
lc = LollmsClient(**lc_params)
|
|
54
|
+
|
|
55
|
+
# 1. Test basic non-streaming generation
|
|
56
|
+
ASCIIColors.magenta("\n1. Basic Non-Streaming Generation:")
|
|
57
|
+
prompt_non_stream = "Tell me a short joke about a programmer."
|
|
58
|
+
ASCIIColors.yellow(f"Prompt: {prompt_non_stream}")
|
|
59
|
+
response_non_stream = lc.generate_text(
|
|
60
|
+
prompt=prompt_non_stream,
|
|
61
|
+
stream=False,
|
|
62
|
+
temperature=0.7,
|
|
63
|
+
n_predict=100 # Max tokens for the joke
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
if isinstance(response_non_stream, str):
|
|
67
|
+
ASCIIColors.green("Response:")
|
|
68
|
+
print(response_non_stream)
|
|
69
|
+
elif isinstance(response_non_stream, dict) and "error" in response_non_stream:
|
|
70
|
+
ASCIIColors.error(f"Error in non-streaming generation: {response_non_stream['error']}")
|
|
71
|
+
else:
|
|
72
|
+
ASCIIColors.warning(f"Unexpected response format: {response_non_stream}")
|
|
73
|
+
|
|
74
|
+
# 2. Test streaming generation
|
|
75
|
+
ASCIIColors.magenta("\n\n2. Streaming Generation:")
|
|
76
|
+
prompt_stream = "Explain the concept of recursion in one sentence."
|
|
77
|
+
ASCIIColors.yellow(f"Prompt: {prompt_stream}")
|
|
78
|
+
ASCIIColors.green("Response (streaming):")
|
|
79
|
+
response_stream = lc.generate_text(
|
|
80
|
+
prompt=prompt_stream,
|
|
81
|
+
stream=True,
|
|
82
|
+
streaming_callback=simple_streaming_callback,
|
|
83
|
+
temperature=0.5,
|
|
84
|
+
n_predict=150
|
|
85
|
+
)
|
|
86
|
+
print() # Newline after streaming
|
|
87
|
+
|
|
88
|
+
# The 'response_stream' variable will contain the full concatenated text if streaming_callback returns True throughout
|
|
89
|
+
# or an error dictionary if generation failed.
|
|
90
|
+
if isinstance(response_stream, str):
|
|
91
|
+
ASCIIColors.cyan(f"\n(Full streamed text was: {response_stream[:100]}...)") # Show a snippet of full text
|
|
92
|
+
elif isinstance(response_stream, dict) and "error" in response_stream:
|
|
93
|
+
ASCIIColors.error(f"Error in streaming generation: {response_stream['error']}")
|
|
94
|
+
|
|
95
|
+
print("Testing embedding")
|
|
96
|
+
emb = lc.embed("hello")
|
|
97
|
+
print(emb)
|
|
98
|
+
|
|
99
|
+
# else: if callback returns False early, response_stream might be partial.
|
|
100
|
+
|
|
101
|
+
# 3. Test generation with a specific model (if applicable and different from default)
|
|
102
|
+
# This tests the switch_model or model loading mechanism of the binding.
|
|
103
|
+
# For 'lollms' binding, this would set the model on the server.
|
|
104
|
+
# For 'ollama' or 'openai', it means the next generate_text will use this model.
|
|
105
|
+
ASCIIColors.magenta("\n\n3. List Available Models & Generate with Specific Model:")
|
|
106
|
+
available_models = lc.listModels()
|
|
107
|
+
if isinstance(available_models, list) and available_models:
|
|
108
|
+
ASCIIColors.green("Available models:")
|
|
109
|
+
for i, model_info in enumerate(available_models[:5]): # Print first 5
|
|
110
|
+
model_id = model_info.get('model_name', model_info.get('id', str(model_info)))
|
|
111
|
+
print(f" - {model_id}")
|
|
112
|
+
|
|
113
|
+
# Try to use the first available model (or a known one if list is too generic)
|
|
114
|
+
target_model = None
|
|
115
|
+
if BINDING_NAME == "ollama":
|
|
116
|
+
# For Ollama, try using a different small model if available, or the same one
|
|
117
|
+
if "phi3:latest" in [m.get('name') for m in available_models if isinstance(m, dict)]:
|
|
118
|
+
target_model = "phi3:latest"
|
|
119
|
+
elif available_models: # Fallback to first model in list if phi3 not present
|
|
120
|
+
first_model_entry = available_models[0]
|
|
121
|
+
target_model = first_model_entry.get('name', first_model_entry.get('model_name'))
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
elif BINDING_NAME == "lollms":
|
|
125
|
+
# For lollms, this would typically be a path or server-recognized name
|
|
126
|
+
# This part is harder to make generic without knowing server's models
|
|
127
|
+
ASCIIColors.yellow("For 'lollms' binding, ensure the target model is known to the server.")
|
|
128
|
+
if available_models and isinstance(available_models[0], str): # e.g. gptq model paths
|
|
129
|
+
target_model = available_models[0]
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
if target_model and target_model != lc.binding.model_name: # Only if different and valid
|
|
133
|
+
ASCIIColors.info(f"\nSwitching to model (or using for next gen): {target_model}")
|
|
134
|
+
# For bindings like ollama/openai, setting model_name on binding directly works.
|
|
135
|
+
# For 'lollms' server binding, LollmsClient doesn't have a direct 'switch_model_on_server'
|
|
136
|
+
# but setting lc.binding.model_name will make the next generate_text request it.
|
|
137
|
+
lc.binding.model_name = target_model # Update the binding's current model_name
|
|
138
|
+
|
|
139
|
+
prompt_specific_model = f"What is the main capability of the {target_model.split(':')[0]} language model?"
|
|
140
|
+
ASCIIColors.yellow(f"Prompt (for {target_model}): {prompt_specific_model}")
|
|
141
|
+
ASCIIColors.green("Response:")
|
|
142
|
+
response_specific = lc.generate_text(
|
|
143
|
+
prompt=prompt_specific_model,
|
|
144
|
+
stream=True, # Keep it streaming for responsiveness
|
|
145
|
+
streaming_callback=simple_streaming_callback,
|
|
146
|
+
n_predict=200
|
|
147
|
+
)
|
|
148
|
+
print()
|
|
149
|
+
elif target_model == lc.binding.model_name:
|
|
150
|
+
ASCIIColors.yellow(f"Target model '{target_model}' is already the current model. Skipping specific model test.")
|
|
151
|
+
else:
|
|
152
|
+
ASCIIColors.yellow("Could not determine a different target model from the list to test specific model generation.")
|
|
153
|
+
|
|
154
|
+
elif isinstance(available_models, dict) and "error" in available_models:
|
|
155
|
+
ASCIIColors.error(f"Error listing models: {available_models['error']}")
|
|
156
|
+
else:
|
|
157
|
+
ASCIIColors.yellow("No models listed by the binding or format not recognized.")
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
except ValueError as ve:
|
|
161
|
+
ASCIIColors.error(f"Initialization Error: {ve}")
|
|
162
|
+
trace_exception(ve)
|
|
163
|
+
except RuntimeError as re:
|
|
164
|
+
ASCIIColors.error(f"Runtime Error (binding likely not initialized): {re}")
|
|
165
|
+
trace_exception(re)
|
|
166
|
+
except Exception as e:
|
|
167
|
+
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
168
|
+
trace_exception(e)
|
|
169
|
+
|
|
170
|
+
if __name__ == "__main__":
|
|
171
|
+
test_text_generation()
|