lollms-client 0.32.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (73) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
  3. lollms_client/llm_bindings/claude/__init__.py +4 -7
  4. lollms_client/llm_bindings/gemini/__init__.py +3 -7
  5. lollms_client/llm_bindings/grok/__init__.py +3 -7
  6. lollms_client/llm_bindings/groq/__init__.py +4 -7
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
  8. lollms_client/llm_bindings/litellm/__init__.py +15 -6
  9. lollms_client/llm_bindings/llamacpp/__init__.py +214 -388
  10. lollms_client/llm_bindings/lollms/__init__.py +24 -14
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
  12. lollms_client/llm_bindings/mistral/__init__.py +58 -29
  13. lollms_client/llm_bindings/ollama/__init__.py +6 -11
  14. lollms_client/llm_bindings/open_router/__init__.py +45 -14
  15. lollms_client/llm_bindings/openai/__init__.py +7 -14
  16. lollms_client/llm_bindings/openllm/__init__.py +12 -12
  17. lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
  18. lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
  19. lollms_client/llm_bindings/transformers/__init__.py +14 -6
  20. lollms_client/llm_bindings/vllm/__init__.py +16 -12
  21. lollms_client/lollms_core.py +296 -487
  22. lollms_client/lollms_discussion.py +436 -78
  23. lollms_client/lollms_llm_binding.py +223 -11
  24. lollms_client/lollms_mcp_binding.py +33 -2
  25. lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
  26. lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
  27. lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
  28. lollms_client/stt_bindings/lollms/__init__.py +6 -8
  29. lollms_client/stt_bindings/whisper/__init__.py +2 -4
  30. lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
  31. lollms_client/tti_bindings/dalle/__init__.py +29 -28
  32. lollms_client/tti_bindings/diffusers/__init__.py +25 -21
  33. lollms_client/tti_bindings/gemini/__init__.py +215 -0
  34. lollms_client/tti_bindings/lollms/__init__.py +8 -9
  35. lollms_client-1.0.0.dist-info/METADATA +1214 -0
  36. lollms_client-1.0.0.dist-info/RECORD +69 -0
  37. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/top_level.txt +0 -2
  38. examples/article_summary/article_summary.py +0 -58
  39. examples/console_discussion/console_app.py +0 -266
  40. examples/console_discussion.py +0 -448
  41. examples/deep_analyze/deep_analyse.py +0 -30
  42. examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
  43. examples/function_calling_with_local_custom_mcp.py +0 -250
  44. examples/generate_a_benchmark_for_safe_store.py +0 -89
  45. examples/generate_and_speak/generate_and_speak.py +0 -251
  46. examples/generate_game_sfx/generate_game_fx.py +0 -240
  47. examples/generate_text_with_multihop_rag_example.py +0 -210
  48. examples/gradio_chat_app.py +0 -228
  49. examples/gradio_lollms_chat.py +0 -259
  50. examples/internet_search_with_rag.py +0 -226
  51. examples/lollms_chat/calculator.py +0 -59
  52. examples/lollms_chat/derivative.py +0 -48
  53. examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
  54. examples/lollms_discussions_test.py +0 -155
  55. examples/mcp_examples/external_mcp.py +0 -267
  56. examples/mcp_examples/local_mcp.py +0 -171
  57. examples/mcp_examples/openai_mcp.py +0 -203
  58. examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
  59. examples/mcp_examples/run_standard_mcp_example.py +0 -204
  60. examples/simple_text_gen_test.py +0 -173
  61. examples/simple_text_gen_with_image_test.py +0 -178
  62. examples/test_local_models/local_chat.py +0 -9
  63. examples/text_2_audio.py +0 -77
  64. examples/text_2_image.py +0 -144
  65. examples/text_2_image_diffusers.py +0 -274
  66. examples/text_and_image_2_audio.py +0 -59
  67. examples/text_gen.py +0 -30
  68. examples/text_gen_system_prompt.py +0 -29
  69. lollms_client-0.32.1.dist-info/METADATA +0 -854
  70. lollms_client-0.32.1.dist-info/RECORD +0 -101
  71. test/test_lollms_discussion.py +0 -368
  72. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/WHEEL +0 -0
  73. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,448 +0,0 @@
1
- import os
2
- import re
3
- import yaml
4
- import json
5
- from pathlib import Path
6
- from collections import defaultdict
7
- from typing import Dict, Optional
8
-
9
- # --- Mock RAG Backend (for demonstration purposes) ---
10
- # In a real app, this would be a proper vector database (ChromaDB, FAISS, etc.)
11
- MOCK_VECTOR_DB_PATH = Path("./rag_db")
12
- MOCK_VECTOR_DB_PATH.mkdir(exist_ok=True)
13
-
14
- def mock_vectorize_chunk(chunk_text: str, chunk_id: str):
15
- # Simulate vectorization by just saving the chunk text to a file.
16
- # A real implementation would convert chunk_text to a vector and store it.
17
- (MOCK_VECTOR_DB_PATH / f"{chunk_id}.json").write_text(json.dumps({
18
- "id": chunk_id,
19
- "text": chunk_text
20
- }, indent=2))
21
-
22
- def mock_is_vectorized(chunk_id: str) -> bool:
23
- return (MOCK_VECTOR_DB_PATH / f"{chunk_id}.json").exists()
24
-
25
- def mock_query_rag(user_query: str) -> str:
26
- # Simulate RAG by doing a simple keyword search across all chunk files.
27
- # A real implementation would do a vector similarity search.
28
- relevant_chunks = []
29
- query_words = set(user_query.lower().split())
30
- if not query_words:
31
- return ""
32
-
33
- for file in MOCK_VECTOR_DB_PATH.glob("*.json"):
34
- data = json.loads(file.read_text(encoding='utf-8'))
35
- if any(word in data["text"].lower() for word in query_words):
36
- relevant_chunks.append(data["text"])
37
-
38
- if not relevant_chunks:
39
- return ""
40
-
41
- return "\n---\n".join(relevant_chunks)
42
-
43
- # --- Library Imports ---
44
- # Assumes lollms_client.py, lollms_discussion.py, and lollms_personality.py are in the same directory or accessible in PYTHONPATH
45
- from lollms_client import LollmsClient, MSG_TYPE
46
- from lollms_client.lollms_discussion import LollmsDiscussion, LollmsDataManager
47
- from lollms_client.lollms_personality import LollmsPersonality
48
- from ascii_colors import ASCIIColors
49
- from sqlalchemy import Column, String
50
- from sqlalchemy.exc import IntegrityError
51
-
52
- # --- Application-Specific Schema ---
53
- class ResearchDiscussionMixin:
54
- project_name = Column(String(100), index=True, nullable=False, unique=True)
55
-
56
- class ResearchMessageMixin:
57
- pass # No custom fields needed for this demo
58
-
59
- # --- Personality Management ---
60
- def load_personalities(personalities_path: Path) -> Dict[str, LollmsPersonality]:
61
- """Loads all personalities from a directory of YAML files."""
62
- personalities = {}
63
- if not personalities_path.is_dir():
64
- return {}
65
-
66
- for file_path in personalities_path.glob("*.yaml"):
67
- try:
68
- config = yaml.safe_load(file_path.read_text(encoding='utf-8'))
69
-
70
- script_content = None
71
- script_path = file_path.with_suffix(".py")
72
- if script_path.exists():
73
- script_content = script_path.read_text(encoding='utf-8')
74
-
75
- # Make data file paths relative to the personalities folder
76
- data_files = [personalities_path / f for f in config.get("data_files", [])]
77
-
78
- personality = LollmsPersonality(
79
- name=config.get("name", file_path.stem),
80
- author=config.get("author", "Unknown"),
81
- category=config.get("category", "General"),
82
- description=config.get("description", ""),
83
- system_prompt=config.get("system_prompt", "You are a helpful AI."),
84
- data_files=data_files,
85
- script=script_content,
86
- vectorize_chunk_callback=mock_vectorize_chunk,
87
- is_vectorized_callback=mock_is_vectorized,
88
- query_rag_callback=mock_query_rag
89
- )
90
- personalities[personality.personality_id] = personality
91
- except Exception as e:
92
- ASCIIColors.red(f"Failed to load personality from {file_path.name}: {e}")
93
-
94
- return personalities
95
-
96
- def select_personality(personalities: Dict[str, LollmsPersonality]) -> Optional[LollmsPersonality]:
97
- """UI for selecting a personality."""
98
- if not personalities:
99
- ASCIIColors.yellow("No personalities found.")
100
- return None
101
-
102
- print("\n--- Select a Personality ---")
103
- sorted_p = sorted(personalities.values(), key=lambda p: (p.category, p.name))
104
- for i, p in enumerate(sorted_p):
105
- print(f"{i+1}. {p.category}/{p.name} (by {p.author})")
106
- print("0. Deselect Personality")
107
-
108
- while True:
109
- try:
110
- choice_str = input("> ")
111
- if not choice_str: return None
112
- choice = int(choice_str)
113
- if choice == 0:
114
- return None
115
- if 1 <= choice <= len(sorted_p):
116
- return sorted_p[choice - 1]
117
- else:
118
- ASCIIColors.red("Invalid number.")
119
- except ValueError:
120
- ASCIIColors.red("Please enter a number.")
121
-
122
- # --- Main Application Logic ---
123
- def main():
124
- print("--- LOLLMS Advanced Agentic Framework ---")
125
- try:
126
- lc = LollmsClient("ollama", model_name="qwen3:4b")
127
- print("LollmsClient connected successfully to Ollama.")
128
- except Exception as e:
129
- print(f"\nFATAL: Could not connect to LLM binding. Is the service running?\nError: {e}")
130
- return
131
-
132
- DB_PATH = "sqlite:///research_projects_final.db"
133
- ENCRYPTION_KEY = "a-very-secure-password-for-the-database"
134
-
135
- try:
136
- db_manager = LollmsDataManager(
137
- db_path=DB_PATH,
138
- discussion_mixin=ResearchDiscussionMixin,
139
- message_mixin=ResearchMessageMixin,
140
- encryption_key=ENCRYPTION_KEY
141
- )
142
- print(f"Database setup complete. Encryption is ENABLED.")
143
- except Exception as e:
144
- print(f"\nFATAL: Could not initialize database. Error: {e}")
145
- return
146
-
147
- personalities_path = Path("./personalities")
148
- personalities = load_personalities(personalities_path)
149
- print(f"Loaded {len(personalities)} personalities.")
150
-
151
- discussion: Optional[LollmsDiscussion] = None
152
- personality: Optional[LollmsPersonality] = None
153
-
154
- while True:
155
- print("\n" + "="*20 + " Main Menu " + "="*20)
156
- if discussion:
157
- p_name = f" with '{personality.name}'" if personality else ""
158
- ASCIIColors.cyan(f"Current Project: '{discussion.project_name}'{p_name}")
159
- print("c. Chat in current project")
160
- print("r. Regenerate last AI response")
161
- print("l. List all projects")
162
- print("s. Search for a project")
163
- print("n. Start a new project")
164
- print("o. Open an existing project")
165
- print("d. Delete a project")
166
- print("p. Select a Personality")
167
- print("e. Exit")
168
-
169
- choice = input("> ").lower().strip()
170
-
171
- if choice == 'c' and discussion:
172
- chat_loop(discussion, personality)
173
- elif choice == 'r' and discussion:
174
- regenerate_response(discussion, personality)
175
- elif choice == 'l':
176
- list_all_projects(db_manager)
177
- elif choice == 's':
178
- search_for_project(db_manager)
179
- elif choice == 'n':
180
- new_discussion = start_new_project(lc, db_manager)
181
- if new_discussion: discussion = new_discussion
182
- elif choice == 'o':
183
- new_discussion = open_project(lc, db_manager)
184
- if new_discussion: discussion = new_discussion
185
- elif choice == 'd':
186
- delete_project(db_manager)
187
- if discussion and not db_manager.get_discussion(lc, discussion.id):
188
- discussion = None
189
- elif choice == 'p':
190
- personality = select_personality(personalities)
191
- if personality:
192
- ASCIIColors.green(f"Personality '{personality.name}' selected.")
193
- else:
194
- ASCIIColors.yellow("No personality selected.")
195
- elif choice == 'e':
196
- if discussion: discussion.close()
197
- break
198
- else:
199
- ASCIIColors.red("Invalid choice.")
200
-
201
- print("\n--- Demo complete. Database and RAG files are preserved. ---")
202
-
203
- # --- UI Functions ---
204
- def list_all_projects(db_manager: LollmsDataManager):
205
- projects = db_manager.list_discussions()
206
- if not projects:
207
- ASCIIColors.yellow("No projects found.")
208
- return
209
- print("\n--- All Projects ---")
210
- for p in projects:
211
- print(f"- Name: {p['project_name']:<30} | ID: {p['id']}")
212
-
213
- def search_for_project(db_manager: LollmsDataManager):
214
- term = input("Enter search term for project name: ").strip()
215
- if not term: return
216
- projects = db_manager.search_discussions(project_name=term)
217
- if not projects:
218
- ASCIIColors.yellow(f"No projects found matching '{term}'.")
219
- return
220
- print(f"\n--- Search Results for '{term}' ---")
221
- for p in projects:
222
- print(f"- Name: {p['project_name']:<30} | ID: {p['id']}")
223
-
224
- def start_new_project(lc: LollmsClient, db_manager: LollmsDataManager) -> Optional[LollmsDiscussion]:
225
- name = input("Enter new project name: ").strip()
226
- if not name:
227
- ASCIIColors.red("Project name cannot be empty.")
228
- return None
229
- try:
230
- discussion = LollmsDiscussion.create_new(
231
- lollms_client=lc, db_manager=db_manager,
232
- autosave=True, project_name=name
233
- )
234
- ASCIIColors.green(f"Project '{name}' created successfully.")
235
- return discussion
236
- except IntegrityError:
237
- ASCIIColors.red(f"Failed to create project. A project named '{name}' already exists.")
238
- return None
239
- except Exception as e:
240
- ASCIIColors.red(f"An unexpected error occurred while creating the project: {e}")
241
- return None
242
-
243
- def open_project(lc: LollmsClient, db_manager: LollmsDataManager) -> Optional[LollmsDiscussion]:
244
- list_all_projects(db_manager)
245
- disc_id = input("Enter project ID to open: ").strip()
246
- if not disc_id: return None
247
- discussion = db_manager.get_discussion(lollms_client=lc, discussion_id=disc_id, autosave=True)
248
- if not discussion:
249
- ASCIIColors.red("Project not found."); return None
250
- ASCIIColors.green(f"Opened project '{discussion.project_name}'.")
251
- return discussion
252
-
253
- def delete_project(db_manager: LollmsDataManager):
254
- list_all_projects(db_manager)
255
- disc_id = input("Enter project ID to DELETE: ").strip()
256
- if not disc_id: return
257
- confirm = input(f"Are you sure you want to permanently delete project {disc_id}? (y/N): ").lower()
258
- if confirm == 'y':
259
- db_manager.delete_discussion(disc_id)
260
- ASCIIColors.green("Project deleted.")
261
- else:
262
- ASCIIColors.yellow("Deletion cancelled.")
263
-
264
- def display_branch_history(discussion: LollmsDiscussion):
265
- current_branch = discussion.get_branch(discussion.active_branch_id)
266
- if not current_branch: return
267
- ASCIIColors.cyan("\n--- Current Conversation History (Active Branch) ---")
268
- for msg in current_branch:
269
- if msg.sender_type == 'user':
270
- ASCIIColors.green(f"\nYou: {msg.content}")
271
- else:
272
- ASCIIColors.blue(f"\nAI: {msg.content}")
273
- speed_str = f"{msg.generation_speed:.1f} t/s" if msg.generation_speed is not None else "N/A"
274
- ASCIIColors.dim(f" [Model: {msg.model_name}, Tokens: {msg.tokens}, Speed: {speed_str}]")
275
- if msg.thoughts:
276
- ASCIIColors.dark_gray(f" [Thoughts: {msg.thoughts[:100]}...]")
277
- if msg.scratchpad:
278
- ASCIIColors.yellow(f" [Scratchpad: {msg.scratchpad[:100]}...]")
279
- ASCIIColors.cyan("-----------------------------------------------------")
280
-
281
- def display_message_tree(discussion: LollmsDiscussion):
282
- print("\n--- Project Message Tree ---")
283
- messages_by_id = {msg.id: msg for msg in discussion.messages}
284
- children_map = defaultdict(list)
285
- root_ids = []
286
- for msg in messages_by_id.values():
287
- if msg.parent_id and msg.parent_id in messages_by_id:
288
- children_map[msg.parent_id].append(msg.id)
289
- else:
290
- root_ids.append(msg.id)
291
- def print_node(msg_id, indent=""):
292
- msg = messages_by_id.get(msg_id)
293
- if not msg: return
294
- is_active = " (*)" if msg.id == discussion.active_branch_id else ""
295
- color = ASCIIColors.green if msg.sender_type == "user" else ASCIIColors.blue
296
- content_preview = re.sub(r'\s+', ' ', msg.content).strip()[:50] + "..."
297
- color(f"{indent}├─ {msg.id[-8:]}{is_active} ({msg.sender}): {content_preview}")
298
- for child_id in children_map.get(msg_id, []):
299
- print_node(child_id, indent + " ")
300
- for root_id in root_ids:
301
- print_node(root_id)
302
- print("----------------------------")
303
-
304
- def handle_config_command(discussion: LollmsDiscussion):
305
- while True:
306
- ASCIIColors.cyan("\n--- Thought Configuration ---")
307
- ASCIIColors.yellow(f"1. Show Thoughts during generation : {'ON' if discussion.show_thoughts else 'OFF'}")
308
- ASCIIColors.yellow(f"2. Include Thoughts in AI context : {'ON' if discussion.include_thoughts_in_context else 'OFF'}")
309
- ASCIIColors.yellow(f"3. Thought Placeholder text : '{discussion.thought_placeholder}'")
310
- print("Enter number to toggle, 3 to set text, or 'back'.")
311
- choice = input("> ").lower().strip()
312
- if choice == '1': discussion.show_thoughts = not discussion.show_thoughts
313
- elif choice == '2': discussion.include_thoughts_in_context = not discussion.include_thoughts_in_context
314
- elif choice == '3': discussion.thought_placeholder = input("Enter new placeholder text: ")
315
- elif choice == 'back': break
316
- else: ASCIIColors.red("Invalid choice.")
317
-
318
- def handle_info_command(discussion: LollmsDiscussion):
319
- ASCIIColors.cyan("\n--- Discussion Info ---")
320
- rem_tokens = discussion.remaining_tokens
321
- if rem_tokens is not None:
322
- max_ctx = discussion.lollmsClient.binding.ctx_size
323
- ASCIIColors.yellow(f"Context Window: {rem_tokens} / {max_ctx} tokens remaining")
324
- else:
325
- ASCIIColors.yellow("Context Window: Max size not available from binding.")
326
- handle_config_command(discussion)
327
-
328
- def chat_loop(discussion: LollmsDiscussion, personality: Optional[LollmsPersonality]):
329
- display_branch_history(discussion)
330
-
331
- print("\n--- Entering Chat ---")
332
- p_name = f" (with '{personality.name}')" if personality else ""
333
- ASCIIColors.cyan(f"Commands: /back, /tree, /switch <id>, /process, /history, /config, /info{p_name}")
334
-
335
- def stream_to_console(token: str, msg_type: MSG_TYPE):
336
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: print(token, end="", flush=True)
337
- elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CHUNK: ASCIIColors.magenta(token, end="", flush=True)
338
- return True
339
-
340
- while True:
341
- user_input = input("\nYou > ").strip()
342
- if not user_input: continue
343
- if user_input.lower() == '/back': break
344
- if user_input.lower() == '/history': display_branch_history(discussion); continue
345
- if user_input.lower() == '/tree': display_message_tree(discussion); continue
346
- if user_input.lower() == '/config': handle_config_command(discussion); continue
347
- if user_input.lower() == '/info': handle_info_command(discussion); continue
348
-
349
- if user_input.lower().startswith('/switch '):
350
- try:
351
- msg_id_part = user_input.split(' ', 1)[1]
352
- # Find the full message ID from the partial one
353
- full_id = next((mid for mid in discussion._message_index if mid.endswith(msg_id_part)), None)
354
- if not full_id: raise ValueError(f"No message found ending with '{msg_id_part}'")
355
- discussion.switch_to_branch(full_id)
356
- ASCIIColors.green(f"Switched to branch ending at message {full_id}.")
357
- display_branch_history(discussion)
358
- except IndexError: ASCIIColors.red("Usage: /switch <last_8_chars_of_id>")
359
- except ValueError as e: ASCIIColors.red(f"Error: {e}")
360
- continue
361
-
362
- if user_input.lower() == '/process':
363
- try:
364
- file_path_str = input("Enter path to text file: ").strip()
365
- chunk_size_str = input("Enter chunk size in characters [4096]: ").strip() or "4096"
366
- file_path = Path(file_path_str)
367
- if not file_path.exists():
368
- ASCIIColors.red(f"File not found: {file_path}"); continue
369
- large_text = file_path.read_text(encoding='utf-8')
370
- ASCIIColors.yellow(f"Read {len(large_text)} characters from file.")
371
- user_prompt = input("What should I do with this text? > ").strip()
372
- if not user_prompt:
373
- ASCIIColors.red("Prompt cannot be empty."); continue
374
-
375
- ASCIIColors.blue("AI is processing the document...")
376
- ai_message = discussion.process_and_summarize(large_text, user_prompt, chunk_size=int(chunk_size_str))
377
- ASCIIColors.blue(f"\nAI: {ai_message.content}")
378
- if ai_message.scratchpad:
379
- ASCIIColors.yellow(f" [AI's Scratchpad: {ai_message.scratchpad[:150]}...]")
380
- except Exception as e: ASCIIColors.red(f"An error occurred during processing: {e}")
381
- continue
382
-
383
- print("AI > ", end="", flush=True)
384
- discussion.chat(user_input, personality=personality, streaming_callback=stream_to_console)
385
- print()
386
-
387
- def regenerate_response(discussion: LollmsDiscussion, personality: Optional[LollmsPersonality]):
388
- try:
389
- ASCIIColors.yellow("\nRegenerating last AI response (new branch will be created)...")
390
- print("New AI > ", end="", flush=True)
391
- def stream_to_console(token: str, msg_type: MSG_TYPE):
392
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: print(token, end="", flush=True)
393
- elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CHUNK: ASCIIColors.magenta(token, end="", flush=True)
394
- return True
395
- discussion.regenerate_branch(personality=personality, streaming_callback=stream_to_console)
396
- print()
397
- ASCIIColors.green(f"New branch created. Active message is now {discussion.active_branch_id}")
398
- ASCIIColors.cyan("Use '/tree' to see the branching structure.")
399
- except (ValueError, AttributeError) as e:
400
- ASCIIColors.red(f"Could not regenerate: {e}")
401
-
402
- if __name__ == "__main__":
403
- # --- Create dummy personalities and data for first-time run ---
404
- personalities_folder = Path("./personalities")
405
- personalities_folder.mkdir(exist_ok=True)
406
-
407
- lollms_facts_file = personalities_folder / "lollms_facts.txt"
408
- if not lollms_facts_file.exists():
409
- lollms_facts_file.write_text(
410
- "LoLLMs is a project created by ParisNeo. It stands for Lord of Large Language Models. It aims to provide a unified interface for all LLMs. The client library allows for advanced discussion and agentic features."
411
- )
412
-
413
- lollms_expert_yaml = personalities_folder / "lollms_expert.yaml"
414
- if not lollms_expert_yaml.exists():
415
- lollms_expert_yaml.write_text("""
416
- name: LoLLMs Expert
417
- author: Manual
418
- category: AI Tools
419
- description: An expert on the LoLLMs project.
420
- system_prompt: You are an expert on the LoLLMs project. Answer questions based on the provided information. Be concise.
421
- data_files:
422
- - lollms_facts.txt
423
- """)
424
-
425
- parrot_yaml = personalities_folder / "parrot.yaml"
426
- if not parrot_yaml.exists():
427
- parrot_yaml.write_text("""
428
- name: Parrot
429
- author: Manual
430
- category: Fun
431
- description: A personality that just repeats what you say.
432
- system_prompt: You are a parrot. You must start every sentence with 'Squawk!'.
433
- """)
434
-
435
- parrot_py = personalities_folder / "parrot.py"
436
- if not parrot_py.exists():
437
- parrot_py.write_text("""
438
- def run(discussion, on_chunk_callback):
439
- # This script overrides the normal chat flow.
440
- user_message = discussion.get_branch(discussion.active_branch_id)[-1].content
441
- response = f"Squawk! {user_message}! Squawk!"
442
- if on_chunk_callback:
443
- # We need to simulate the message type for the callback
444
- from lollms_client import MSG_TYPE
445
- on_chunk_callback(response, MSG_TYPE.MSG_TYPE_CHUNK)
446
- return response # Return the full raw response
447
- """)
448
- main()
@@ -1,30 +0,0 @@
1
- from lollms_client import LollmsClient
2
- import pipmaster as pm
3
- from ascii_colors import ASCIIColors
4
- if not pm.is_installed("docling"):
5
- pm.install("docling")
6
- from docling.document_converter import DocumentConverter
7
-
8
- ASCIIColors.set_log_file("log.log")
9
-
10
- lc = LollmsClient()
11
- # Create prompts for each section
12
- article_url = "https://arxiv.org/pdf/2109.09572"
13
- converter = DocumentConverter()
14
- result = converter.convert(article_url)
15
- article_text = result.document.export_to_markdown()
16
-
17
- ASCIIColors.info("Article loaded successfully")
18
-
19
- # Use the sequential_summarize method from lollms
20
- result = lc.deep_analyze(
21
- "Explain what is the difference between HGG and QGG",
22
- article_text,
23
- ctx_size=128000,
24
- chunk_size=1024,
25
- bootstrap_chunk_size=512,
26
- bootstrap_steps=1,
27
- debug = True
28
- )
29
-
30
- print(result)
@@ -1,32 +0,0 @@
1
- from lollms_client import LollmsClient
2
- from pathlib import Path
3
- import pipmaster as pm
4
- from ascii_colors import ASCIIColors
5
-
6
- ASCIIColors.set_log_file("log.log")
7
- def load_and_analyze_files():
8
- folder_path = Path('.') # Change '.' to your desired directory
9
- allowed_extensions = {'.pdf', '.txt', '.md', '.docx', '.pptx', '.html'}
10
-
11
- matching_files = []
12
- for file in folder_path.rglob('*'):
13
- if file.suffix.lower() in allowed_extensions and file.is_file():
14
- matching_files.append(str(file.absolute()))
15
-
16
- # Now use these files with LollmsClient
17
- lc = LollmsClient()
18
- ASCIIColors.info(f"Loading {len(matching_files)} files for analysis")
19
-
20
- result = lc.deep_analyze(
21
- "Explain what is the difference between HGG and QGG",
22
- files=matching_files,
23
- ctx_size=128000,
24
- chunk_size=1024,
25
- bootstrap_chunk_size=512,
26
- bootstrap_steps=1,
27
- debug=True
28
- )
29
-
30
- print(result)
31
-
32
- load_and_analyze_files()