lollms-client 0.25.5__tar.gz → 0.26.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (100) hide show
  1. {lollms_client-0.25.5 → lollms_client-0.26.0}/PKG-INFO +366 -1
  2. lollms_client-0.26.0/README.md +573 -0
  3. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/__init__.py +1 -1
  4. lollms_client-0.26.0/lollms_client/llm_bindings/azure_openai/__init__.py +364 -0
  5. lollms_client-0.26.0/lollms_client/llm_bindings/claude/__init__.py +549 -0
  6. lollms_client-0.26.0/lollms_client/llm_bindings/groq/__init__.py +292 -0
  7. lollms_client-0.26.0/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +307 -0
  8. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/lollms/__init__.py +1 -0
  9. lollms_client-0.26.0/lollms_client/llm_bindings/mistral/__init__.py +298 -0
  10. lollms_client-0.26.0/lollms_client/llm_bindings/open_router/__init__.py +304 -0
  11. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_core.py +2 -2
  12. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_discussion.py +16 -20
  13. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client.egg-info/PKG-INFO +366 -1
  14. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client.egg-info/SOURCES.txt +6 -0
  15. lollms_client-0.25.5/README.md +0 -208
  16. {lollms_client-0.25.5 → lollms_client-0.26.0}/LICENSE +0 -0
  17. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/article_summary/article_summary.py +0 -0
  18. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/console_discussion/console_app.py +0 -0
  19. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/console_discussion.py +0 -0
  20. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/deep_analyze/deep_analyse.py +0 -0
  21. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  22. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/function_calling_with_local_custom_mcp.py +0 -0
  23. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  24. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/generate_and_speak/generate_and_speak.py +0 -0
  25. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  26. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/generate_text_with_multihop_rag_example.py +0 -0
  27. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/gradio_chat_app.py +0 -0
  28. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/gradio_lollms_chat.py +0 -0
  29. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/internet_search_with_rag.py +0 -0
  30. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/lollms_discussions_test.py +0 -0
  31. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/mcp_examples/external_mcp.py +0 -0
  32. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/mcp_examples/local_mcp.py +0 -0
  33. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/mcp_examples/openai_mcp.py +0 -0
  34. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
  35. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
  36. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/simple_text_gen_test.py +0 -0
  37. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/simple_text_gen_with_image_test.py +0 -0
  38. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/test_local_models/local_chat.py +0 -0
  39. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/text_2_audio.py +0 -0
  40. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/text_2_image.py +0 -0
  41. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/text_2_image_diffusers.py +0 -0
  42. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/text_and_image_2_audio.py +0 -0
  43. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/text_gen.py +0 -0
  44. {lollms_client-0.25.5 → lollms_client-0.26.0}/examples/text_gen_system_prompt.py +0 -0
  45. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/__init__.py +0 -0
  46. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  47. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  48. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  49. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  50. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  51. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  52. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  53. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  54. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  55. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  56. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_config.py +0 -0
  57. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_js_analyzer.py +0 -0
  58. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_llm_binding.py +0 -0
  59. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_mcp_binding.py +0 -0
  60. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_personality.py +0 -0
  61. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_python_analyzer.py +0 -0
  62. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_stt_binding.py +0 -0
  63. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_tti_binding.py +0 -0
  64. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_ttm_binding.py +0 -0
  65. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_tts_binding.py +0 -0
  66. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_ttv_binding.py +0 -0
  67. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_types.py +0 -0
  68. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/lollms_utilities.py +0 -0
  69. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  70. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  71. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  72. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  73. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  74. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  75. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  76. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/stt_bindings/__init__.py +0 -0
  77. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  78. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  79. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  80. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tti_bindings/__init__.py +0 -0
  81. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  82. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  83. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  84. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  85. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/ttm_bindings/__init__.py +0 -0
  86. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  87. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  88. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  89. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tts_bindings/__init__.py +0 -0
  90. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  91. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  92. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  93. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  94. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/ttv_bindings/__init__.py +0 -0
  95. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  96. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client.egg-info/dependency_links.txt +0 -0
  97. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client.egg-info/requires.txt +0 -0
  98. {lollms_client-0.25.5 → lollms_client-0.26.0}/lollms_client.egg-info/top_level.txt +0 -0
  99. {lollms_client-0.25.5 → lollms_client-0.26.0}/pyproject.toml +0 -0
  100. {lollms_client-0.25.5 → lollms_client-0.26.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.25.5
3
+ Version: 0.26.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -169,6 +169,107 @@ except Exception as e:
169
169
  ```
170
170
  For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
171
171
 
172
+ ### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
173
+
174
+ For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
175
+
176
+ **Key Agent Capabilities:**
177
+
178
+ * **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
179
+ * **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
180
+ * **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
181
+ * **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
182
+ * **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
183
+ * **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
184
+
185
+ Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
186
+
187
+ ```python
188
+ import json
189
+ from lollms_client import LollmsClient, MSG_TYPE
190
+ from ascii_colors import ASCIIColors
191
+
192
+ # 1. Define a mock RAG data store and retrieval function
193
+ project_notes = {
194
+ "project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
195
+ }
196
+
197
+ def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
198
+ """A simple keyword-based retriever for our mock data store."""
199
+ results = []
200
+ for key, text in project_notes.items():
201
+ if query.lower() in text.lower():
202
+ results.append({"source": key, "content": text})
203
+ return results[:top_k]
204
+
205
+ # 2. Define a detailed streaming callback to visualize the agent's process
206
+ def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
207
+ if not params: params = {}
208
+ msg_id = params.get("id", "")
209
+
210
+ if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
211
+ ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
212
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
213
+ ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
214
+ if params.get('result'):
215
+ ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
216
+ elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
217
+ ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
218
+ elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
219
+ ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
220
+ elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
221
+ ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
222
+ elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
223
+ print(chunk, end="", flush=True) # Final answer stream
224
+ return True
225
+
226
+ try:
227
+ # 3. Initialize LollmsClient with an LLM and local tools enabled
228
+ lc = LollmsClient(
229
+ binding_name="ollama", # Use Ollama
230
+ model_name="llama3", # Or any capable model like mistral, gemma, etc.
231
+ mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
232
+ )
233
+
234
+ # 4. Define the user prompt and the RAG data store
235
+ prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
236
+
237
+ rag_data_store = {
238
+ "project_notes": {"callable": retrieve_from_notes}
239
+ }
240
+
241
+ ASCIIColors.yellow(f"User Prompt: {prompt}")
242
+ print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
243
+
244
+ # 5. Run the agent
245
+ agent_output = lc.generate_with_mcp_rag(
246
+ prompt=prompt,
247
+ use_data_store=rag_data_store,
248
+ use_mcps=["python_code_interpreter"], # Make specific tools available
249
+ streaming_callback=agent_streaming_callback,
250
+ max_reasoning_steps=5
251
+ )
252
+
253
+ print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
254
+
255
+ # 6. Print the final results
256
+ if agent_output.get("error"):
257
+ ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
258
+ else:
259
+ ASCIIColors.green("\n--- Final Answer ---")
260
+ print(agent_output.get("final_answer"))
261
+
262
+ ASCIIColors.magenta("\n--- Tool Calls ---")
263
+ print(json.dumps(agent_output.get("tool_calls", []), indent=2))
264
+
265
+ ASCIIColors.cyan("\n--- RAG Sources ---")
266
+ print(json.dumps(agent_output.get("sources", []), indent=2))
267
+
268
+ except Exception as e:
269
+ ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
270
+
271
+ ```
272
+
172
273
  ## Documentation
173
274
 
174
275
  For more in-depth information, please refer to:
@@ -226,6 +327,270 @@ The `examples/` directory in this repository contains a rich set of scripts demo
226
327
 
227
328
  Explore these examples to see `lollms-client` in action!
228
329
 
330
+ ## Using LoLLMs Client with Different Bindings
331
+
332
+ `lollms-client` supports a wide range of LLM backends through its binding system. This section provides practical examples of how to initialize `LollmsClient` for each of the major supported bindings.
333
+
334
+ ### A Note on Configuration
335
+
336
+ The recommended way to provide credentials and other binding-specific settings is through the `llm_binding_config` dictionary during `LollmsClient` initialization. While many bindings can fall back to reading environment variables (e.g., `OPENAI_API_KEY`), passing them explicitly in the config is clearer and less error-prone.
337
+
338
+ ```python
339
+ # General configuration pattern
340
+ lc = LollmsClient(
341
+ binding_name="your_binding_name",
342
+ model_name="a_model_name",
343
+ llm_binding_config={
344
+ "specific_api_key_param": "your_api_key_here",
345
+ "another_specific_param": "some_value"
346
+ }
347
+ )
348
+ ```
349
+
350
+ ---
351
+
352
+ ### 1. Local Bindings
353
+
354
+ These bindings run models directly on your local machine, giving you full control and privacy.
355
+
356
+ #### **Ollama**
357
+
358
+ The `ollama` binding connects to a running Ollama server instance on your machine or network.
359
+
360
+ **Prerequisites:**
361
+ * [Ollama installed and running](https://ollama.com/).
362
+ * Models pulled, e.g., `ollama pull llama3`.
363
+
364
+ **Usage:**
365
+
366
+ ```python
367
+ from lollms_client import LollmsClient
368
+
369
+ # Configuration for a local Ollama server
370
+ lc = LollmsClient(
371
+ binding_name="ollama",
372
+ model_name="llama3", # Or any other model you have pulled
373
+ host_address="http://localhost:11434" # Default Ollama address
374
+ )
375
+
376
+ # Now you can use lc.generate_text(), lc.chat(), etc.
377
+ response = lc.generate_text("Why is the sky blue?")
378
+ print(response)
379
+ ```
380
+
381
+ #### **PythonLlamaCpp (Local GGUF Models)**
382
+
383
+ The `pythonllamacpp` binding loads and runs GGUF model files directly using the powerful `llama-cpp-python` library. This is ideal for high-performance, local inference on CPU or GPU.
384
+
385
+ **Prerequisites:**
386
+ * A GGUF model file downloaded to your machine.
387
+ * `llama-cpp-python` installed. For GPU support, it must be compiled with the correct flags (e.g., `CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python`).
388
+
389
+ **Usage:**
390
+
391
+ ```python
392
+ from lollms_client import LollmsClient
393
+
394
+ # --- Configuration for Llama.cpp ---
395
+ # Path to your GGUF model file
396
+ MODEL_PATH = "/path/to/your/model.gguf"
397
+
398
+ # Binding-specific configuration
399
+ LLAMACPP_CONFIG = {
400
+ "n_gpu_layers": -1, # -1 for all layers to GPU, 0 for CPU
401
+ "n_ctx": 4096, # Context size
402
+ "seed": -1, # -1 for random seed
403
+ "chat_format": "chatml" # Or another format like 'llama-2'
404
+ }
405
+
406
+ try:
407
+ lc = LollmsClient(
408
+ binding_name="pythonllamacpp",
409
+ model_name=MODEL_PATH, # For this binding, model_name is the file path
410
+ llm_binding_config=LLAMACPP_CONFIG
411
+ )
412
+
413
+ response = lc.generate_text("Write a recipe for a great day.")
414
+ print(response)
415
+
416
+ except Exception as e:
417
+ print(f"Error initializing Llama.cpp binding: {e}")
418
+ print("Please ensure llama-cpp-python is installed and the model path is correct.")
419
+
420
+ ```
421
+
422
+ ---
423
+
424
+ ### 2. Cloud Service Bindings
425
+
426
+ These bindings connect to hosted LLM APIs from major providers.
427
+
428
+ #### **OpenAI**
429
+
430
+ Connects to the official OpenAI API to use models like GPT-4o, GPT-4, and GPT-3.5.
431
+
432
+ **Prerequisites:**
433
+ * An OpenAI API key.
434
+
435
+ **Usage:**
436
+
437
+ ```python
438
+ from lollms_client import LollmsClient
439
+
440
+ OPENAI_CONFIG = {
441
+ "service_key": "your_openai_api_key_here" # sk-...
442
+ }
443
+
444
+ lc = LollmsClient(
445
+ binding_name="openai",
446
+ model_name="gpt-4o",
447
+ llm_binding_config=OPENAI_CONFIG
448
+ )
449
+
450
+ response = lc.generate_text("What is the difference between AI and machine learning?")
451
+ print(response)
452
+ ```
453
+
454
+ #### **Google Gemini**
455
+
456
+ Connects to Google's Gemini family of models via the Google AI Studio API.
457
+
458
+ **Prerequisites:**
459
+ * A Google AI Studio API key.
460
+
461
+ **Usage:**
462
+
463
+ ```python
464
+ from lollms_client import LollmsClient
465
+
466
+ GEMINI_CONFIG = {
467
+ "service_key": "your_google_api_key_here"
468
+ }
469
+
470
+ lc = LollmsClient(
471
+ binding_name="gemini",
472
+ model_name="gemini-1.5-pro-latest",
473
+ llm_binding_config=GEMINI_CONFIG
474
+ )
475
+
476
+ response = lc.generate_text("Summarize the plot of 'Dune' in three sentences.")
477
+ print(response)
478
+ ```
479
+
480
+ #### **Anthropic Claude**
481
+
482
+ Connects to Anthropic's API to use the Claude family of models, including Claude 3.5 Sonnet, Opus, and Haiku.
483
+
484
+ **Prerequisites:**
485
+ * An Anthropic API key.
486
+
487
+ **Usage:**
488
+
489
+ ```python
490
+ from lollms_client import LollmsClient
491
+
492
+ CLAUDE_CONFIG = {
493
+ "service_key": "your_anthropic_api_key_here"
494
+ }
495
+
496
+ lc = LollmsClient(
497
+ binding_name="claude",
498
+ model_name="claude-3-5-sonnet-20240620",
499
+ llm_binding_config=CLAUDE_CONFIG
500
+ )
501
+
502
+ response = lc.generate_text("What are the core principles of constitutional AI?")
503
+ print(response)
504
+ ```
505
+
506
+ ---
507
+
508
+ ### 3. API Aggregator Bindings
509
+
510
+ These bindings connect to services that provide access to many different models through a single API.
511
+
512
+ #### **OpenRouter**
513
+
514
+ OpenRouter provides a unified, OpenAI-compatible interface to access models from dozens of providers (Google, Anthropic, Mistral, Groq, etc.) with one API key.
515
+
516
+ **Prerequisites:**
517
+ * An OpenRouter API key (starts with `sk-or-...`).
518
+
519
+ **Usage:**
520
+ Model names must be specified in the format `provider/model-name`.
521
+
522
+ ```python
523
+ from lollms_client import LollmsClient
524
+
525
+ OPENROUTER_CONFIG = {
526
+ "open_router_api_key": "your_openrouter_api_key_here"
527
+ }
528
+
529
+ # Example using a Claude model through OpenRouter
530
+ lc = LollmsClient(
531
+ binding_name="open_router",
532
+ model_name="anthropic/claude-3-haiku-20240307",
533
+ llm_binding_config=OPENROUTER_CONFIG
534
+ )
535
+
536
+ response = lc.generate_text("Explain what an API aggregator is, as if to a beginner.")
537
+ print(response)
538
+ ```
539
+
540
+ #### **Groq**
541
+
542
+ While Groq is a direct provider, it's famous as an aggregator of speed. It runs open-source models on custom LPU hardware for exceptionally fast inference.
543
+
544
+ **Prerequisites:**
545
+ * A Groq API key.
546
+
547
+ **Usage:**
548
+
549
+ ```python
550
+ from lollms_client import LollmsClient
551
+
552
+ GROQ_CONFIG = {
553
+ "groq_api_key": "your_groq_api_key_here"
554
+ }
555
+
556
+ lc = LollmsClient(
557
+ binding_name="groq",
558
+ model_name="llama3-8b-8192",
559
+ llm_binding_config=GROQ_CONFIG
560
+ )
561
+
562
+ response = lc.generate_text("Write a 3-line poem about incredible speed.")
563
+ print(response)
564
+ ```
565
+
566
+ #### **Hugging Face Inference API**
567
+
568
+ This connects to the serverless Hugging Face Inference API, allowing experimentation with thousands of open-source models without local hardware.
569
+
570
+ **Note:** This API can have "cold starts," so the first request might be slow.
571
+
572
+ **Prerequisites:**
573
+ * A Hugging Face User Access Token (starts with `hf_...`).
574
+
575
+ **Usage:**
576
+
577
+ ```python
578
+ from lollms_client import LollmsClient
579
+
580
+ HF_CONFIG = {
581
+ "hf_api_key": "your_hugging_face_token_here"
582
+ }
583
+
584
+ lc = LollmsClient(
585
+ binding_name="hugging_face_inference_api",
586
+ model_name="google/gemma-1.1-7b-it",
587
+ llm_binding_config=HF_CONFIG
588
+ )
589
+
590
+ response = lc.generate_text("Write a short story about a robot who discovers music.")
591
+ print(response)
592
+ ```
593
+
229
594
  ## Contributing
230
595
 
231
596
  Contributions are welcome! Whether it's bug reports, feature suggestions, documentation improvements, or new bindings, please feel free to open an issue or submit a pull request on our [GitHub repository](https://github.com/ParisNeo/lollms_client).