lollms-client 0.32.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (73) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
  3. lollms_client/llm_bindings/claude/__init__.py +4 -7
  4. lollms_client/llm_bindings/gemini/__init__.py +3 -7
  5. lollms_client/llm_bindings/grok/__init__.py +3 -7
  6. lollms_client/llm_bindings/groq/__init__.py +4 -7
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
  8. lollms_client/llm_bindings/litellm/__init__.py +15 -6
  9. lollms_client/llm_bindings/llamacpp/__init__.py +214 -388
  10. lollms_client/llm_bindings/lollms/__init__.py +24 -14
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
  12. lollms_client/llm_bindings/mistral/__init__.py +58 -29
  13. lollms_client/llm_bindings/ollama/__init__.py +6 -11
  14. lollms_client/llm_bindings/open_router/__init__.py +45 -14
  15. lollms_client/llm_bindings/openai/__init__.py +7 -14
  16. lollms_client/llm_bindings/openllm/__init__.py +12 -12
  17. lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
  18. lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
  19. lollms_client/llm_bindings/transformers/__init__.py +14 -6
  20. lollms_client/llm_bindings/vllm/__init__.py +16 -12
  21. lollms_client/lollms_core.py +296 -487
  22. lollms_client/lollms_discussion.py +436 -78
  23. lollms_client/lollms_llm_binding.py +223 -11
  24. lollms_client/lollms_mcp_binding.py +33 -2
  25. lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
  26. lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
  27. lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
  28. lollms_client/stt_bindings/lollms/__init__.py +6 -8
  29. lollms_client/stt_bindings/whisper/__init__.py +2 -4
  30. lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
  31. lollms_client/tti_bindings/dalle/__init__.py +29 -28
  32. lollms_client/tti_bindings/diffusers/__init__.py +25 -21
  33. lollms_client/tti_bindings/gemini/__init__.py +215 -0
  34. lollms_client/tti_bindings/lollms/__init__.py +8 -9
  35. lollms_client-1.0.0.dist-info/METADATA +1214 -0
  36. lollms_client-1.0.0.dist-info/RECORD +69 -0
  37. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/top_level.txt +0 -2
  38. examples/article_summary/article_summary.py +0 -58
  39. examples/console_discussion/console_app.py +0 -266
  40. examples/console_discussion.py +0 -448
  41. examples/deep_analyze/deep_analyse.py +0 -30
  42. examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
  43. examples/function_calling_with_local_custom_mcp.py +0 -250
  44. examples/generate_a_benchmark_for_safe_store.py +0 -89
  45. examples/generate_and_speak/generate_and_speak.py +0 -251
  46. examples/generate_game_sfx/generate_game_fx.py +0 -240
  47. examples/generate_text_with_multihop_rag_example.py +0 -210
  48. examples/gradio_chat_app.py +0 -228
  49. examples/gradio_lollms_chat.py +0 -259
  50. examples/internet_search_with_rag.py +0 -226
  51. examples/lollms_chat/calculator.py +0 -59
  52. examples/lollms_chat/derivative.py +0 -48
  53. examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
  54. examples/lollms_discussions_test.py +0 -155
  55. examples/mcp_examples/external_mcp.py +0 -267
  56. examples/mcp_examples/local_mcp.py +0 -171
  57. examples/mcp_examples/openai_mcp.py +0 -203
  58. examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
  59. examples/mcp_examples/run_standard_mcp_example.py +0 -204
  60. examples/simple_text_gen_test.py +0 -173
  61. examples/simple_text_gen_with_image_test.py +0 -178
  62. examples/test_local_models/local_chat.py +0 -9
  63. examples/text_2_audio.py +0 -77
  64. examples/text_2_image.py +0 -144
  65. examples/text_2_image_diffusers.py +0 -274
  66. examples/text_and_image_2_audio.py +0 -59
  67. examples/text_gen.py +0 -30
  68. examples/text_gen_system_prompt.py +0 -29
  69. lollms_client-0.32.1.dist-info/METADATA +0 -854
  70. lollms_client-0.32.1.dist-info/RECORD +0 -101
  71. test/test_lollms_discussion.py +0 -368
  72. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/WHEEL +0 -0
  73. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,69 @@
1
+ lollms_client/__init__.py,sha256=TB277pr7BJ2eH1ezWZ-B0_8lxMBqjQYPhc2jUJLOASg,1146
2
+ lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
+ lollms_client/lollms_core.py,sha256=68EFgqGyw0cpJQOhJZrRzogd20j1zSghwLBie6kyJGI,167221
4
+ lollms_client/lollms_discussion.py,sha256=wkadV6qiegxOzukMVn5vukdeJivnlyygSzZBkzOi9Gc,106714
5
+ lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
6
+ lollms_client/lollms_llm_binding.py,sha256=TBdFNNktpIUSUd4mlUHeNUPUQeLRWKBj80UTJ-YdwBg,24940
7
+ lollms_client/lollms_mcp_binding.py,sha256=psb27A23VFWDfZsR2WUbQXQxiZDW5yfOak6ZtbMfszI,10222
8
+ lollms_client/lollms_mcp_security.py,sha256=FhVTDhSBjksGEZnopVnjFmEF5dv7D8bBTqoaj4BiF0E,3562
9
+ lollms_client/lollms_personality.py,sha256=O-9nqZhazcITOkxjT24ENTxTmIoZLgqIsQ9WtWs0Id0,8719
10
+ lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
11
+ lollms_client/lollms_stt_binding.py,sha256=jAUhLouEhh2hmm1bK76ianfw_6B59EHfY3FmLv6DU-g,5111
12
+ lollms_client/lollms_tti_binding.py,sha256=afO0-d-Kqsmh8UHTijTvy6dZAt-XDB6R-IHmdbf-_fs,5928
13
+ lollms_client/lollms_ttm_binding.py,sha256=FjVVSNXOZXK1qvcKEfxdiX6l2b4XdGOSNnZ0utAsbDg,4167
14
+ lollms_client/lollms_tts_binding.py,sha256=5cJYECj8PYLJAyB6SEH7_fhHYK3Om-Y3arkygCnZ24o,4342
15
+ lollms_client/lollms_ttv_binding.py,sha256=KkTaHLBhEEdt4sSVBlbwr5i_g_TlhcrwrT-7DjOsjWQ,4131
16
+ lollms_client/lollms_types.py,sha256=0iSH1QHRRD-ddBqoL9EEKJ8wWCuwDUlN_FrfbCdg7Lw,3522
17
+ lollms_client/lollms_utilities.py,sha256=3DAsII2X9uhRzRL-D0QlALcEdRg82y7OIL4yHVF32gY,19446
18
+ lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
19
+ lollms_client/llm_bindings/azure_openai/__init__.py,sha256=XBDwct0nkvWfpo1J9J9lTOszH_c_4IiCYxEsG6aJLo0,16501
20
+ lollms_client/llm_bindings/claude/__init__.py,sha256=tzt9sR-9WlkgTgDBOtV708ZmuBjMm55fEYhurMnfXO4,24669
21
+ lollms_client/llm_bindings/gemini/__init__.py,sha256=bMzjVo_LU5uC493q4DVjq-WVotzgTZN1T_w0Qqv5aDg,21386
22
+ lollms_client/llm_bindings/grok/__init__.py,sha256=tVIIl2uXpBYD7ia3k8JqYM8uvAVYlp-mtG-8D3LFVS8,22929
23
+ lollms_client/llm_bindings/groq/__init__.py,sha256=EGrMh9vuCoM4pskDw8ydfsAWYgEb423e9HBwqdO2JQc,12120
24
+ lollms_client/llm_bindings/hugging_face_inference_api/__init__.py,sha256=SFcj5XQTDmN9eR4of82IgQa9iRYZaGlF6rMlF5S5wWg,13938
25
+ lollms_client/llm_bindings/litellm/__init__.py,sha256=lRH4VfZMUG5JCCj6a7hk2PTfSyDowAu-ujLOM-XPl-8,12756
26
+ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=4CbNYpfquVEgfsxuLsxQta_dZRSpbSBL-VWhyDMdBAc,59379
27
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=a4gNH4axiDgsri8NGAcq0OitgYdnzBDLNkzUMhkFArA,24781
28
+ lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=iuDfhZZoLC-PDEPLHrcjk5-962S5c7OeCI7PMdJxI_A,17753
29
+ lollms_client/llm_bindings/mistral/__init__.py,sha256=cddz9xIj8NRFLKHe2JMxzstpUrNIu5s9juci3mhiHfo,14133
30
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=d61pSEWlJ2KOvnaztji2wblvadu0oTelEJeHG4IcL9I,41193
31
+ lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
32
+ lollms_client/llm_bindings/openai/__init__.py,sha256=J8v7XU9TrvXJd1ffwhYkya5YeXxWnNiFuNBAwRfoHDk,26066
33
+ lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
34
+ lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=ZTuVa5ngu9GPVImjs_g8ArV7Bx7a1Rze518Tz8AFJ3U,31807
35
+ lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=xiT-JAyNI_jo6CE0nle9Xoc7U8-UHAfEHrnCwmDTiOE,32023
36
+ lollms_client/llm_bindings/transformers/__init__.py,sha256=hEonNvmrVSc9YWg_1uVwxe31rC-fsjVGh6QvyBc0TEE,37598
37
+ lollms_client/llm_bindings/vllm/__init__.py,sha256=pZGzCuBos1dzkbqfkAnSAIC2XUX-2BT25c15Yeh8Nwo,32721
38
+ lollms_client/mcp_bindings/local_mcp/__init__.py,sha256=S_hEilM3WLt0u2uluhuJXMNAzTAnWC8FWteBP92V_2g,14336
39
+ lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py,sha256=2pkt1JcEKj61lIA5zuW3s6qkdpQN5rKfooo7bnebx24,3061
40
+ lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py,sha256=THtZsMxNnXZiBdkwoBlfbWY2C5hhDdmPtnM-8cSKN6s,9488
41
+ lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py,sha256=PLC31-D04QKTOTb1uuCHnrAlpysQjsk89yIJngK0VGc,4586
42
+ lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py,sha256=McDCBVoVrMDYgU7EYtyOY7mCk1uEeTea0PSD69QqDsQ,6228
43
+ lollms_client/mcp_bindings/remote_mcp/__init__.py,sha256=YpSclbNJDYVUe2W0H5Xki4gs_qqAqE95uCl_RYZsxPA,20406
44
+ lollms_client/mcp_bindings/standard_mcp/__init__.py,sha256=wJQofr4zS5RIS9V5_WuMMFsJxSDJgXDW3PQPX1hlx6g,31519
45
+ lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
+ lollms_client/stt_bindings/lollms/__init__.py,sha256=9Vmn1sQQZKLGLe7nZnc-0LnNeSY8r9xw3pYZF-wVtPo,5889
47
+ lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZkxJASkWm5eF07ztDQ,15363
48
+ lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
49
+ lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
+ lollms_client/tti_bindings/dalle/__init__.py,sha256=KWUow9z-xR_am_gHg3kGZ_5u_nnF88BB_0JvyfuOG_s,23456
51
+ lollms_client/tti_bindings/diffusers/__init__.py,sha256=vjzuJfOAIhoiwomuIZvWpxmcM5UxpTwqTbD04E-zi3Y,37786
52
+ lollms_client/tti_bindings/gemini/__init__.py,sha256=iFJMAR944nRDhXA7SdFjsCOqJ1dPSqyTG4eMFNg6Vwc,9734
53
+ lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
54
+ lollms_client/ttm_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
+ lollms_client/ttm_bindings/audiocraft/__init__.py,sha256=a0k6wTrHth6GaVOiNnVboeFY3oKVvCQPbQlqO38XEyc,14328
56
+ lollms_client/ttm_bindings/bark/__init__.py,sha256=Pr3ou2a-7hNYDqbkxrAbghZpO5HvGUhz7e-7VGXIHHA,18976
57
+ lollms_client/ttm_bindings/lollms/__init__.py,sha256=DU3WLmJaWNM1NAMtJsnaFo4Y9wlfc675M8aUiaLnojA,3143
58
+ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
+ lollms_client/tts_bindings/bark/__init__.py,sha256=cpnmr6rmXcNdy4ib_5UHAbUP5oGoMJwB931_vU6VI14,19480
60
+ lollms_client/tts_bindings/lollms/__init__.py,sha256=8x2_T9XscvISw2TiaLoFxvrS7TIsVLdqbwSc04cX-wc,7164
61
+ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5Lvhgp5Gs2-2GN51MTjA,18930
62
+ lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
63
+ lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
64
+ lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
65
+ lollms_client-1.0.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
66
+ lollms_client-1.0.0.dist-info/METADATA,sha256=qqfmnRtnMRYdgvQTzwjlh8v_lAaOGPJ5H0Sf-X3j1dk,58549
67
+ lollms_client-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
+ lollms_client-1.0.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
69
+ lollms_client-1.0.0.dist-info/RECORD,,
@@ -1,3 +1 @@
1
- examples
2
1
  lollms_client
3
- test
@@ -1,58 +0,0 @@
1
- from lollms_client import LollmsClient
2
- import pipmaster as pm
3
- from ascii_colors import ASCIIColors
4
- if not pm.is_installed("docling"):
5
- pm.install("docling")
6
- from docling.document_converter import DocumentConverter
7
-
8
- ASCIIColors.set_log_file("log.log")
9
-
10
- lc = LollmsClient()
11
- # Create prompts for each section
12
- article_url = "https://arxiv.org/pdf/2109.09572"
13
- converter = DocumentConverter()
14
- result = converter.convert(article_url)
15
- article_text = result.document.export_to_markdown()
16
-
17
- ASCIIColors.info("Article loaded successfully")
18
-
19
- # Use the sequential_summarize method from lollms
20
- summary = lc.sequential_summarize(
21
- article_text,
22
- """
23
- Extract the following information if present in the chunk:
24
-
25
- 1. **Title**:
26
- - Found in text chunk number 1 at the beginning. It should be followed by # or ##
27
- - Copy exactly as presented; do not interpret.
28
- - Never alter this if already in the memory. This is important
29
-
30
- 2. **Authors**:
31
- - Listed in text chunk number 1 at the beginning.
32
- - If you fail to find the authors keep this empty.
33
- - Copy exactly as presented; do not interpret.
34
- - Never alter this if already in the memory. This is important
35
-
36
- 3. **Summary**:
37
- - Provide a concise but detailed summary of the article by adding ned information from the text chunk to the memory content.
38
-
39
- 4. **Results**:
40
- - Extract quantified results if available.
41
-
42
- Ensure that any information already in memory is retained unless explicitly updated by the current chunk.
43
- """,
44
- "markdown",
45
- """Write a final markdown with these sections:
46
- ## Title
47
- ## Authors
48
- ## Summary
49
- ## Results
50
- """,
51
- ctx_size=128000,
52
- chunk_size=4096,
53
- bootstrap_chunk_size=1024,
54
- bootstrap_steps=1,
55
- debug = True
56
- )
57
-
58
- ASCIIColors.yellow(summary)
@@ -1,266 +0,0 @@
1
- import os
2
- import shutil
3
- import subprocess
4
- import sys
5
- import json
6
- from pathlib import Path
7
- from typing import Optional, List, Dict, Any
8
-
9
- # Correctly import all necessary classes from the lollms_client package
10
- from lollms_client import LollmsClient, LollmsDataManager, LollmsDiscussion, LollmsPersonality
11
- from ascii_colors import ASCIIColors, trace_exception
12
-
13
- # --- Configuration ---
14
- MAX_CONTEXT_SIZE_FOR_TEST = 2048 # Increased for agentic turns
15
-
16
- # Database and workspace configuration
17
- WORKSPACE_DIR = Path("./test_workspace_agentic")
18
- DATABASE_PATH = f"sqlite:///{WORKSPACE_DIR / 'test_discussion_agentic.db'}"
19
- DISCUSSION_ID = "console-agentic-test-1" # Use a fixed ID for easy resumption
20
-
21
- # --- MOCK KNOWLEDGE BASE for RAG ---
22
- MOCK_KNOWLEDGE_BASE = {
23
- "python_basics.md": [
24
- {"chunk_id": 1, "text": "Python is a high-level, interpreted programming language known for its readability. It was created by Guido van Rossum and released in 1991."},
25
- {"chunk_id": 2, "text": "Key features of Python include dynamic typing, garbage collection, and a large standard library. It supports procedural, object-oriented, and functional programming."},
26
- ],
27
- "javascript_info.js": [
28
- {"chunk_id": 1, "text": "JavaScript is a scripting language for front-end web development. It is also used in back-end development (Node.js)."},
29
- {"chunk_id": 2, "text": "Popular JavaScript frameworks include React, Angular, and Vue.js."},
30
- ],
31
- "ai_concepts.txt": [
32
- {"chunk_id": 1, "text": "Retrieval Augmented Generation (RAG) is an AI framework for improving LLM responses by grounding the model on external knowledge sources."},
33
- ]
34
- }
35
-
36
- # --- Dummy MCP Server Scripts ---
37
- TIME_SERVER_PY = """
38
- import asyncio
39
- from datetime import datetime
40
- from mcp.server.fastmcp import FastMCP
41
- mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.", host="localhost",
42
- port=9624,
43
- log_level="DEBUG")
44
- @mcp_server.tool()
45
- def get_current_time(user_id: str = "unknown"):
46
- return {"time": datetime.now().isoformat(), "user_id": user_id}
47
- if __name__ == "__main__": mcp_server.run(transport="streamable-http")
48
- """
49
- CALCULATOR_SERVER_PY = """
50
- import asyncio
51
- from typing import List, Union
52
- from mcp.server.fastmcp import FastMCP
53
- mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.", host="localhost",
54
- port=9625,
55
- log_level="DEBUG")
56
- @mcp_server.tool()
57
- def add_numbers(numbers: List[Union[int, float]]):
58
- if not isinstance(numbers, list): return {"error": "Input must be a list"}
59
- return {"sum": sum(numbers)}
60
- if __name__ == "__main__": mcp_server.run(transport="streamable-http")
61
- """
62
-
63
- # --- RAG Mock Function ---
64
- def mock_rag_query_function(query_text: str, top_k: int = 3, **kwargs) -> List[Dict[str, Any]]:
65
- ASCIIColors.magenta(f"\n [MOCK RAG] Querying knowledge base for: '{query_text}'")
66
- results = []
67
- query_lower = query_text.lower()
68
- for file_path, chunks in MOCK_KNOWLEDGE_BASE.items():
69
- for chunk in chunks:
70
- if any(word in chunk["text"].lower() for word in query_lower.split() if len(word) > 2):
71
- results.append({"file_path": file_path, "chunk_text": chunk["text"]})
72
- ASCIIColors.magenta(f" [MOCK RAG] Found {len(results[:top_k])} relevant chunks.")
73
- return results[:top_k]
74
-
75
- def start_mcp_servers():
76
- """Starts the dummy MCP servers in the background."""
77
- ASCIIColors.yellow("--- Starting background MCP servers ---")
78
- server_dir = WORKSPACE_DIR / "mcp_servers"
79
- server_dir.mkdir(exist_ok=True, parents=True)
80
-
81
- (server_dir / "time_server.py").write_text(TIME_SERVER_PY)
82
- (server_dir / "calculator_server.py").write_text(CALCULATOR_SERVER_PY)
83
-
84
- procs = []
85
- procs.append(subprocess.Popen([sys.executable, str(server_dir / "time_server.py")], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
86
- procs.append(subprocess.Popen([sys.executable, str(server_dir / "calculator_server.py")], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
87
- ASCIIColors.yellow("--- MCP servers launched ---")
88
- return procs
89
-
90
- def setup_client_and_discussion() -> LollmsDiscussion:
91
- """Sets up the LollmsClient with MCP, the DB manager, and the discussion."""
92
- print("--- Setting up Lollms Environment ---")
93
- WORKSPACE_DIR.mkdir(exist_ok=True)
94
-
95
- mcp_config = {
96
- "servers_infos": {
97
- "time_machine": {"server_url": "http://localhost:9624/mcp"},
98
- "calc_unit": {"server_url": "http://localhost:9625/mcp"},
99
- }
100
- }
101
-
102
- try:
103
- client = LollmsClient(
104
- "ollama",
105
- model_name="mistral-nemo:latest",
106
- mcp_binding_name="remote_mcp",
107
- mcp_binding_config=mcp_config
108
- )
109
- except Exception as e:
110
- trace_exception(e)
111
- print("\n---FATAL ERROR---")
112
- print("Could not initialize LollmsClient. Ensure Ollama is running and mcp is installed.")
113
- exit()
114
-
115
- print(f"-> Using model: {client.binding.model_name}")
116
- print(f"-> Using MCP binding: {client.mcp.binding_name}")
117
-
118
- db_manager = LollmsDataManager(db_path=DATABASE_PATH)
119
- discussion = db_manager.get_discussion(client, DISCUSSION_ID)
120
-
121
- if discussion:
122
- print(f"-> Resuming discussion (ID: {DISCUSSION_ID})")
123
- discussion.max_context_size = MAX_CONTEXT_SIZE_FOR_TEST
124
- else:
125
- print(f"-> Creating new discussion (ID: {DISCUSSION_ID})")
126
- discussion = LollmsDiscussion.create_new(
127
- lollms_client=client,
128
- db_manager=db_manager,
129
- id=DISCUSSION_ID,
130
- title="Console Agentic Test",
131
- max_context_size=MAX_CONTEXT_SIZE_FOR_TEST
132
- )
133
-
134
- print("--- Setup Complete. Ready to chat! ---\n")
135
- return discussion
136
-
137
- def print_help():
138
- print("\n--- Commands ---")
139
- print("!agent <prompt> - Run a prompt using all available tools (MCP).")
140
- print("!rag <prompt> - Run a prompt using the mock knowledge base (RAG).")
141
- print("!both <prompt> - Run a prompt using both MCP tools and RAG.")
142
- print("!status - Show current discussion state (pruning, message count).")
143
- print("!regen - Regenerate the last AI response.")
144
- print("!exit - Exit the application.")
145
- print("----------------\n")
146
-
147
- def print_agentic_results(response_dict):
148
- """Renders a beautiful report of the agent's turn."""
149
- ai_message = response_dict.get('ai_message')
150
- if not ai_message:
151
- return
152
-
153
- ASCIIColors.cyan("\n" + "="*22 + " Agentic Turn Report " + "="*22)
154
-
155
- # --- Final Answer ---
156
- ASCIIColors.blue("\nFinal Answer:")
157
- ASCIIColors.green(f" {ai_message.content}")
158
-
159
- # --- Agent's Internal Monologue (The Scratchpad) ---
160
- if ai_message.scratchpad:
161
- ASCIIColors.blue("\nAgent's Reasoning Log (Scratchpad):")
162
- # Print scratchpad line by line for better color coding
163
- for line in ai_message.scratchpad.split('\n'):
164
- if line.startswith("### Step"):
165
- ASCIIColors.yellow(line)
166
- elif line.startswith("- **Action**:") or line.startswith("- **Result**:") or line.startswith("- **Error**:") :
167
- ASCIIColors.magenta(line)
168
- else:
169
- print(line)
170
-
171
- # --- Sources Used (from metadata) ---
172
- if ai_message.metadata and "sources" in ai_message.metadata:
173
- sources = ai_message.metadata.get("sources", [])
174
- if sources:
175
- ASCIIColors.blue("\nSources Consulted (RAG):")
176
- for i, source in enumerate(sources):
177
- print(f" [{i+1}] Path: {source.get('file_path', 'N/A')}")
178
- # Indent the content for readability
179
- content = source.get('chunk_text', 'N/A').replace('\n', '\n ')
180
- print(f" Content: \"{content}\"")
181
-
182
- ASCIIColors.cyan("\n" + "="*61 + "\n")
183
-
184
- def run_chat_console(discussion: LollmsDiscussion):
185
- print_help()
186
- while True:
187
- user_input = input("You: ")
188
- if not user_input:
189
- continue
190
-
191
- use_mcps_flag = False
192
- use_data_store_flag = False
193
- prompt = user_input
194
-
195
- # --- Command Handling ---
196
- if user_input.lower().startswith("!exit"):
197
- break
198
- elif user_input.lower().startswith("!help"):
199
- print_help()
200
- continue
201
- elif user_input.lower().startswith("!status"):
202
- # Assuming a print_status function exists
203
- # print_status(discussion)
204
- continue
205
- elif user_input.lower().startswith("!regen"):
206
- # Assuming a regenerate_branch method exists
207
- # discussion.regenerate_branch(...)
208
- continue
209
- elif user_input.lower().startswith("!agent "):
210
- use_mcps_flag = True
211
- prompt = user_input[7:].strip()
212
- ASCIIColors.yellow(f"Agentic MCP turn initiated for: '{prompt}'")
213
- elif user_input.lower().startswith("!rag "):
214
- use_data_store_flag = True
215
- prompt = user_input[5:].strip()
216
- ASCIIColors.yellow(f"Agentic RAG turn initiated for: '{prompt}'")
217
- elif user_input.lower().startswith("!both "):
218
- use_mcps_flag = True
219
- use_data_store_flag = True
220
- prompt = user_input[6:].strip()
221
- ASCIIColors.yellow(f"Agentic MCP+RAG turn initiated for: '{prompt}'")
222
-
223
- # --- Streaming Callback ---
224
- def stream_callback(chunk, msg_type, metadata={}, **kwargs):
225
- # Render steps and thoughts in real-time
226
- if msg_type == 12: # MSG_TYPE.MSG_TYPE_STEP_START
227
- ASCIIColors.cyan(f"\n> Starting: {chunk}")
228
- elif msg_type == 13: # MSG_TYPE.MSG_TYPE_STEP_END
229
- ASCIIColors.cyan(f"> Finished: {chunk}")
230
- elif msg_type == 2: # MSG_TYPE.MSG_TYPE_INFO (for thoughts)
231
- ASCIIColors.yellow(f"\n (Thought): {chunk}")
232
- else: # Final answer chunks are printed by the main loop
233
- pass # The final answer is printed after the full report
234
- return True
235
-
236
- # --- Main Chat Logic ---
237
- try:
238
- #print("\nAI: ", end="", flush=True)
239
-
240
- response_dict = discussion.chat(
241
- user_message=prompt,
242
- use_mcps=use_mcps_flag,
243
- use_data_store={"coding_store": mock_rag_query_function} if use_data_store_flag else None,
244
- streaming_callback=stream_callback
245
- )
246
- if use_mcps_flag or use_data_store_flag:
247
- print_agentic_results(response_dict)
248
- print("\nAI: ", end="")
249
- ASCIIColors.green(response_dict['ai_message'].content)
250
-
251
- except Exception as e:
252
- trace_exception(e)
253
- print(f"\nAn error occurred during generation.")
254
-
255
- if __name__ == "__main__":
256
- mcp_procs = start_mcp_servers()
257
- try:
258
- discussion_session = setup_client_and_discussion()
259
- run_chat_console(discussion_session)
260
- finally:
261
- ASCIIColors.red("\n--- Shutting down MCP servers ---")
262
- for proc in mcp_procs:
263
- proc.terminate()
264
- proc.wait()
265
- shutil.rmtree(WORKSPACE_DIR, ignore_errors=True)
266
- print("Cleanup complete. Goodbye!")