SimplerLLM 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,156 @@
1
+ Metadata-Version: 2.1
2
+ Name: SimplerLLM
3
+ Version: 0.1.0
4
+ Summary: An easy-to-use Library for interacting with language models.
5
+ Home-page: https://github.com/hassancs91/SimplerLLM
6
+ Author: Hasan Aboul Hasan
7
+ Author-email: hasan@learnwithhasan.com
8
+ Keywords: text generation,openai,LLM,RAG
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.6
14
+ Classifier: Programming Language :: Python :: 3.7
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
+ Requires-Python: >=3.6
20
+ Description-Content-Type: text/markdown
21
+
22
+
23
+ # ⚪ SimplerLLM (Beta)
24
+
25
+ ⚡ Your Easy Pass to Advanced AI ⚡
26
+
27
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
28
+
29
+
30
+ ## 🤔 What is SimplerLLM?
31
+
32
+ SimplerLLM is an open-source Python library designed to simplify interactions with Large Language Models (LLMs) for researchers and beginners. It offers a unified interface for different LLM providers and a suite of tools to enhance language model capabilities and make it Super easy for anyone to develop AI-powered tools and apps.
33
+
34
+ ## Easy Installation
35
+ With pip:
36
+ ```bash
37
+ pip install simplerllm
38
+ ```
39
+
40
+
41
+ ## Features
42
+
43
+ - **Unified LLM Interface**: Define an LLM instance in one line for providers like OpenAI and Google Gemini. Future versions will support more APIs and LLM providers.
44
+ - **Generic Text Loader**: Load text from various sources like DOCX, PDF, TXT files, YouTube scripts, or blog posts.
45
+ - **RapidAPI Connector**: Connect with AI services on RapidAPI.
46
+ - **SERP Integration**: Perform searches using DuckDuckGo, with more search engines coming soon.
47
+ - **Prompt Template Builder**: Easily create and manage prompt templates.
48
+ And Much More Coming Soon!
49
+
50
+
51
+ ### Creating an LLM Instance
52
+
53
+ ```python
54
+ from SimplerLLM import LLM, LLMProvider
55
+
56
+ # For OpenAI
57
+ llm_instance = LLM.create(provider=LLMProvider.OPENAI)
58
+ # For Google Gemini
59
+ gemini_instance = LLM.create(provider=LLMProvider.GEMENI, model_name="gemini-pro")
60
+
61
+ response = llm_instance.generate_text(user_prompt="generate a 5 words sentence")
62
+
63
+ ```
64
+
65
+ ### Using Tools
66
+
67
+ #### SERP
68
+ ```python
69
+ from SimplerLLM.tools.serp import search_with_duck_duck_go
70
+
71
+ search_results = search_with_duck_duck_go("penut",3)
72
+
73
+ # use the search results the way you want!
74
+
75
+ ```
76
+
77
+ #### Generic Text Loader
78
+ ```python
79
+ from SimplerLLM.tools.generic_text_loader import load_text
80
+
81
+ text_file = load_text("file.txt")
82
+
83
+ print(text_file.content)
84
+
85
+ ```
86
+
87
+ #### Calling any RapidAPI API
88
+ ```python
89
+ from SimplerLLM.tools.rapid_api import RapidAPIClient
90
+
91
+ api_url = "https://domain-authority1.p.rapidapi.com/seo/get-domain-info"
92
+ api_params = {
93
+ 'domain': 'learnwithhasan.com',
94
+ }
95
+
96
+ api_client = RapidAPIClient() # API key read from environment variable
97
+ response = api_client.call_api(api_url, method='GET', params=api_params)
98
+
99
+
100
+ ```
101
+
102
+
103
+ #### Prompt Template Builder
104
+
105
+ ```python
106
+ from SimplerLLM.prompts.prompt_builder import create_multi_value_prompts,create_prompt_template
107
+
108
+ basic_prompt = "Generate 5 titles for a blog about {topic} and {style}"
109
+
110
+ prompt_template = pr.create_prompt_template(basic_prompt)
111
+
112
+ prompt_template.assign_parms(topic = "marketing",style = "catchy")
113
+
114
+ print(prompt_template.content)
115
+
116
+
117
+ ## working with multiple value prompts
118
+ multi_value_prompt_template = """Hello {name}, your next meeting is on {date}.
119
+ and bring a {object} wit you"""
120
+
121
+ params_list = [
122
+ {"name": "Alice", "date": "January 10th", "object" : "dog"},
123
+ {"name": "Bob", "date": "January 12th", "object" : "bag"},
124
+ {"name": "Charlie", "date": "January 15th", "object" : "pen"}
125
+ ]
126
+
127
+
128
+ multi_value_prompt = create_multi_value_prompts(multi_value_prompt_template)
129
+ generated_prompts = multi_value_prompt.generate_prompts(params_list)
130
+
131
+ print(generated_prompts[0])
132
+
133
+ ```
134
+
135
+
136
+
137
+ ### Next Updates
138
+ - Adding More Tools
139
+ - Interacting With Local LLMs
140
+ - Prompt Optimization
141
+ - Response Evaluation
142
+ - GPT Trainer
143
+ - Document Chunker
144
+ - Advanced Document Loader
145
+ - Integration With More Providers
146
+
147
+
148
+
149
+ ## License
150
+ ### MIT
151
+
152
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
153
+
154
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
155
+
156
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
File without changes
File without changes
@@ -0,0 +1,136 @@
1
+ import SimplerLLM.langauge.llm_providers.openai_llm as openai_llm
2
+ import SimplerLLM.langauge.llm_providers.gemeni_llm as gemeni_llm
3
+ from enum import Enum
4
+
5
+
6
+ class LLMProvider(Enum):
7
+ OPENAI = 1
8
+ GEMENI = 2
9
+
10
+
11
+ class LLM:
12
+ def __init__(self, provider=LLMProvider.OPENAI, model_name="gpt-3.5-turbo", temperature=0.7,top_p=1.0):
13
+ self.provider = provider
14
+ self.model_name = model_name
15
+ self.temperature = temperature
16
+ self.top_p = top_p
17
+
18
+
19
+
20
+ @staticmethod
21
+ def create(provider=LLMProvider.OPENAI, model_name="gpt-3.5-turbo", temperature=0.7,top_p=1.0):
22
+ if provider == LLMProvider.OPENAI:
23
+ return OpenAILLM(provider, model_name, temperature, top_p)
24
+ if provider == LLMProvider.GEMENI:
25
+ return GemeniLLM(provider, model_name, temperature,top_p)
26
+ else:
27
+ return LLM(provider, model_name, temperature)
28
+
29
+ def set_model(self, provider):
30
+ if not isinstance(provider, LLMProvider):
31
+ raise ValueError("Provider must be an instance of LLMProvider Enum")
32
+ self.provider = provider
33
+
34
+
35
+ def generate_text(self, input_text):
36
+ if self.provider == LLMProvider.OPENAI:
37
+ return openai_llm.generate(input_text)
38
+ elif self.provider == LLMProvider.GEMENI:
39
+ return "generated with Gemeni"
40
+ else:
41
+ raise ValueError("Unsupported model")
42
+
43
+
44
+
45
+
46
+
47
+
48
+ class OpenAILLM(LLM):
49
+ def __init__(self, model, model_name, temperature,top_p):
50
+ super().__init__(model, model_name, temperature,top_p)
51
+
52
+
53
+
54
+ def generate_text(self, user_prompt, system_prompt="", model_name=None, temperature=None, top_p=None, max_tokens=500):
55
+ # Use instance values as defaults if not provided
56
+ model_name = model_name if model_name is not None else self.model_name
57
+ temperature = temperature if temperature is not None else self.temperature
58
+ top_p = top_p if top_p is not None else self.top_p
59
+
60
+ return openai_llm.generate_text(user_prompt=user_prompt, system_prompt=system_prompt,
61
+ model=model_name, temperature=temperature,
62
+ top_p=top_p, max_tokens=max_tokens)
63
+
64
+ async def generate_text_async(self, user_prompt, system_prompt="", model_name=None, temperature=None, top_p=None, max_tokens=500):
65
+ # Use instance values as defaults if not provided
66
+ model_name = model_name if model_name is not None else self.model_name
67
+ temperature = temperature if temperature is not None else self.temperature
68
+ top_p = top_p if top_p is not None else self.top_p
69
+
70
+ return await openai_llm.generate_text_async(user_prompt=user_prompt, system_prompt=system_prompt,
71
+ model=model_name, temperature=temperature,
72
+ top_p=top_p, max_tokens=max_tokens)
73
+
74
+ def generate_full_response(self, user_prompt, system_prompt="", model_name=None, temperature=None, top_p=None, max_tokens=500):
75
+ # Use instance values as defaults if not provided
76
+ model_name = model_name if model_name is not None else self.model_name
77
+ temperature = temperature if temperature is not None else self.temperature
78
+ top_p = top_p if top_p is not None else self.top_p
79
+
80
+ return openai_llm.generate_full_response(user_prompt=user_prompt, system_prompt=system_prompt,
81
+ model=model_name, temperature=temperature,
82
+ top_p=top_p, max_tokens=max_tokens)
83
+
84
+ async def generate_full_response_async(self, user_prompt, system_prompt="", model_name=None, temperature=None, top_p=None, max_tokens=500):
85
+ # Use instance values as defaults if not provided
86
+ model_name = model_name if model_name is not None else self.model_name
87
+ temperature = temperature if temperature is not None else self.temperature
88
+ top_p = top_p if top_p is not None else self.top_p
89
+
90
+ return await openai_llm.generate_full_response_async(user_prompt=user_prompt, system_prompt=system_prompt,
91
+ model=model_name, temperature=temperature,
92
+ top_p=top_p, max_tokens=max_tokens)
93
+
94
+ def generate_json_with_pydantic(self, user_prompt, pydantic_model,model_name):
95
+ return openai_llm.generate_json_with_pydantic(user_prompt=user_prompt,pydantic_model = pydantic_model,model_name=model_name)
96
+
97
+ async def generate_json_with_pydantic_async(self, user_prompt, pydantic_model,model_name):
98
+ return await openai_llm.generate_json_with_pydantic_async(user_prompt=user_prompt,pydantic_model = pydantic_model,model_name=model_name)
99
+
100
+
101
+
102
+
103
+
104
+
105
+ class GemeniLLM(LLM):
106
+ def __init__(self, model, model_name, temperature,top_p):
107
+ super().__init__(model, model_name, temperature,top_p)
108
+
109
+ def generate_text(self, user_prompt, model_name=None, temperature=None, top_p=None, max_tokens=500):
110
+ # Use instance values as defaults if not provided
111
+ model_name = model_name if model_name is not None else self.model_name
112
+ temperature = temperature if temperature is not None else self.temperature
113
+ top_p = top_p if top_p is not None else self.top_p
114
+ return gemeni_llm.generate_text(user_prompt=user_prompt,
115
+ model=model_name,temperature=temperature,
116
+ top_p=top_p, max_tokens=max_tokens)
117
+
118
+ def generate_full_response(self, user_prompt, model_name=None, temperature=None, top_p=None, max_tokens=500):
119
+ # Use instance values as defaults if not provided
120
+ model_name = model_name if model_name is not None else self.model_name
121
+ temperature = temperature if temperature is not None else self.temperature
122
+ top_p = top_p if top_p is not None else self.top_p
123
+
124
+ return gemeni_llm.generate_full_response(user_prompt=user_prompt,
125
+ model=model_name, temperature=temperature,
126
+ top_p=top_p, max_tokens=max_tokens)
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+
135
+
136
+
@@ -0,0 +1,56 @@
1
+ import time
2
+ from typing import Type
3
+ from pydantic import BaseModel
4
+ from SimplerLLM.langauge.llm import LLM
5
+
6
+ from SimplerLLM.tools.json_helpers import (
7
+ extract_json_from_text,
8
+ convert_json_to_pydantic_model,
9
+ validate_json_with_pydantic_model,
10
+ generate_json_example_from_pydantic
11
+ )
12
+
13
+
14
+ def generate_basic_pydantic_json_model(model_class: Type[BaseModel], prompt: str, llm_instance : LLM, max_retries: int = 3, initial_delay: float = 1.0) -> BaseModel:
15
+ """
16
+ Generates a model instance based on a given prompt, retrying on validation errors.
17
+
18
+ :param model_class: The Pydantic model class to be used for validation and conversion.
19
+ :param prompt: The fully formatted prompt including the topic.
20
+ :param llm_instance: Instance of a large language model.
21
+ :param max_retries: Maximum number of retries on validation errors.
22
+ :param initial_delay: Initial delay in seconds before the first retry.
23
+ :return: Tuple containing either (model instance, None) or (None, error message).
24
+ """
25
+ for attempt in range(max_retries + 1):
26
+ try:
27
+ json_model = generate_json_example_from_pydantic(model_class)
28
+ optimized_prompt = prompt + f'\n\n.The response should me a structured JSON format that matches the following JSON: {json_model}'
29
+ ai_response = llm_instance.generate_text(optimized_prompt)
30
+
31
+ if ai_response:
32
+ json_object = extract_json_from_text(ai_response)
33
+
34
+ validated, errors = validate_json_with_pydantic_model(model_class, json_object)
35
+
36
+ if not errors:
37
+ model_object = convert_json_to_pydantic_model(model_class, json_object[0])
38
+ return model_object
39
+
40
+ except Exception as e: # Replace with specific exception if possible
41
+ return f"Exception occurred: {e}"
42
+
43
+ if not ai_response and attempt < max_retries:
44
+ time.sleep(initial_delay * (2 ** attempt)) # Exponential backoff
45
+ continue
46
+ elif errors:
47
+ return f"Validation failed after {max_retries} retries: {errors}"
48
+
49
+ # Retry logic for validation errors
50
+ if errors and attempt < max_retries:
51
+ time.sleep(initial_delay * (2 ** attempt)) # Exponential backoff
52
+ continue
53
+ elif errors:
54
+ return f"Validation failed after {max_retries} retries: {errors}"
55
+
56
+
@@ -0,0 +1,125 @@
1
+ import google.generativeai as genai
2
+ import os
3
+ from dotenv import load_dotenv
4
+ import os
5
+ import asyncio
6
+ import time
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+ # Constants
12
+ GEMENI_API_KEY = os.getenv('GEMENI_API_KEY')
13
+ if GEMENI_API_KEY is None:
14
+ raise ValueError("Please set the OPENAI_API_KEY in .env file.")
15
+
16
+ MAX_RETRIES = os.getenv('MAX_RETRIES')
17
+ if MAX_RETRIES is not None:
18
+ MAX_RETRIES = int(MAX_RETRIES)
19
+ else:
20
+ MAX_RETRIES = 3 # Default value
21
+
22
+
23
+ RETRY_DELAY = os.getenv('RETRY_DELAY')
24
+ if RETRY_DELAY is not None:
25
+ RETRY_DELAY = int(RETRY_DELAY)
26
+ else:
27
+ RETRY_DELAY = 2 # Default value
28
+
29
+
30
+ STREAMING_DELAY = os.getenv('STREAMING_DELAY')
31
+ if STREAMING_DELAY is not None:
32
+ STREAMING_DELAY = float(RETRY_DELAY)
33
+ else:
34
+ STREAMING_DELAY = 0.1 # Default value
35
+
36
+
37
+ genai.configure(api_key=GEMENI_API_KEY)
38
+
39
+
40
+ def generate_text_basic(user_prompt, model):
41
+ if not user_prompt or not isinstance(user_prompt, str):
42
+ raise ValueError("user_prompt must be a non-empty string.")
43
+
44
+ if not model or not isinstance(model, str):
45
+ raise ValueError("model must be a non-empty string.")
46
+
47
+ model = genai.GenerativeModel(model)
48
+
49
+ for attempt in range(MAX_RETRIES):
50
+ try:
51
+ response = model.generate_content(
52
+ user_prompt
53
+ )
54
+ return response.text
55
+
56
+ except Exception as e: # Consider catching more specific exceptions
57
+ if attempt < MAX_RETRIES - 1:
58
+ time.sleep(RETRY_DELAY * (2**attempt))
59
+ else:
60
+ # Log the error or inform the user
61
+ print(f"Failed to generate response after {MAX_RETRIES} attempts due to: {e}")
62
+ return None
63
+
64
+ def generate_text(user_prompt, model, max_tokens=2000, top_p=1.0, temperature=0.7):
65
+ if not user_prompt or not isinstance(user_prompt, str):
66
+ raise ValueError("user_prompt must be a non-empty string.")
67
+
68
+ if not model or not isinstance(model, str):
69
+ raise ValueError("model must be a non-empty string.")
70
+
71
+ model = genai.GenerativeModel(model)
72
+
73
+
74
+ for attempt in range(MAX_RETRIES):
75
+ try:
76
+ response = model.generate_content(
77
+ user_prompt,
78
+ generation_config=genai.types.GenerationConfig(
79
+ candidate_count=1,
80
+ max_output_tokens=max_tokens,
81
+ temperature=temperature,top_p=top_p)
82
+ )
83
+ return response.text
84
+
85
+ except Exception as e: # Consider catching more specific exceptions
86
+ if attempt < MAX_RETRIES - 1:
87
+ time.sleep(RETRY_DELAY * (2**attempt))
88
+ else:
89
+ # Log the error or inform the user
90
+ print(f"Failed to generate response after {MAX_RETRIES} attempts due to: {e}")
91
+ return None
92
+
93
+ def generate_full_response(user_prompt, model, max_tokens=2000, top_p=1.0, temperature=0.7):
94
+ if not user_prompt or not isinstance(user_prompt, str):
95
+ raise ValueError("user_prompt must be a non-empty string.")
96
+
97
+ if not model or not isinstance(model, str):
98
+ raise ValueError("model must be a non-empty string.")
99
+
100
+ model = genai.GenerativeModel(model)
101
+
102
+ for attempt in range(MAX_RETRIES):
103
+ try:
104
+ response = model.generate_content(
105
+ user_prompt,
106
+ generation_config=genai.types.GenerationConfig(
107
+ candidate_count=1,
108
+ max_output_tokens=max_tokens,
109
+ temperature=temperature,top_p=top_p)
110
+ )
111
+ return response
112
+
113
+ except Exception as e: # Consider catching more specific exceptions
114
+ if attempt < MAX_RETRIES - 1:
115
+ time.sleep(RETRY_DELAY * (2**attempt))
116
+ else:
117
+ # Log the error or inform the user
118
+ print(f"Failed to generate response after {MAX_RETRIES} attempts due to: {e}")
119
+ return None
120
+
121
+
122
+
123
+
124
+
125
+