SimplerLLM 0.2.0__tar.gz → 0.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simplerllm-0.2.3/LICENSE +21 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/PKG-INFO +30 -1
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/image/img_helper_funcs.py +33 -33
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/image/stability_ai.py +84 -84
- simplerllm-0.2.3/SimplerLLM/language/embeddings.py +66 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm.py +2 -0
- simplerllm-0.2.3/SimplerLLM/language/llm_addons.py +131 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm_providers/anthropic_llm.py +4 -4
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm_providers/llm_response_models.py +16 -9
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm_providers/openai_llm.py +56 -20
- simplerllm-0.2.3/SimplerLLM/language/llm_providers/transformers_llm.py +54 -0
- simplerllm-0.2.3/SimplerLLM/tools/file_functions.py +38 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/tools/file_loader.py +34 -34
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/tools/json_helpers.py +34 -24
- simplerllm-0.2.3/SimplerLLM/tools/text_chunker.py +224 -0
- simplerllm-0.2.3/SimplerLLM/tools/web_crawler.py +41 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM.egg-info/PKG-INFO +30 -1
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM.egg-info/SOURCES.txt +4 -1
- simplerllm-0.2.3/SimplerLLM.egg-info/requires.txt +12 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM.egg-info/top_level.txt +0 -1
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/setup.py +7 -1
- SimplerLLM-0.2.0/SimplerLLM/language/embeddings.py +0 -251
- SimplerLLM-0.2.0/SimplerLLM/language/llm_addons.py +0 -67
- SimplerLLM-0.2.0/SimplerLLM/tools/text_chunker.py +0 -228
- SimplerLLM-0.2.0/SimplerLLM.egg-info/requires.txt +0 -11
- SimplerLLM-0.2.0/my_tests/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/image/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm_providers/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm_providers/gemini_llm.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/prompts/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/prompts/prompt_builder.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/tools/__init__.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/tools/generic_loader.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/tools/rapid_api.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/tools/serp.py +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM.egg-info/dependency_links.txt +0 -0
- {SimplerLLM-0.2.0 → simplerllm-0.2.3}/setup.cfg +0 -0
simplerllm-0.2.3/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Hasan Aboul Hasan
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: SimplerLLM
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: An easy-to-use Library for interacting with language models.
|
|
5
5
|
Home-page: https://github.com/hassancs91/SimplerLLM
|
|
6
6
|
Author: Hasan Aboul Hasan
|
|
7
7
|
Author-email: hasan@learnwithhasan.com
|
|
8
|
+
License: MIT
|
|
8
9
|
Keywords: text generation,openai,LLM,RAG
|
|
9
10
|
Classifier: Development Status :: 4 - Beta
|
|
10
11
|
Classifier: Intended Audience :: Developers
|
|
@@ -15,15 +16,35 @@ Classifier: Programming Language :: Python :: 3.7
|
|
|
15
16
|
Classifier: Programming Language :: Python :: 3.8
|
|
16
17
|
Classifier: Programming Language :: Python :: 3.9
|
|
17
18
|
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
21
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
22
|
Requires-Python: >=3.6
|
|
20
23
|
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: aiohttp==3.9.4
|
|
26
|
+
Requires-Dist: duckduckgo_search==5.3.0
|
|
27
|
+
Requires-Dist: newspaper3k==0.2.8
|
|
28
|
+
Requires-Dist: numpy==1.26.4
|
|
29
|
+
Requires-Dist: openai==1.25.0
|
|
30
|
+
Requires-Dist: pydantic==2.7.1
|
|
31
|
+
Requires-Dist: PyPDF2==3.0.1
|
|
32
|
+
Requires-Dist: python-dotenv==1.0.1
|
|
33
|
+
Requires-Dist: python_docx==1.1.0
|
|
34
|
+
Requires-Dist: pytube==15.0.0
|
|
35
|
+
Requires-Dist: Requests==2.31.0
|
|
36
|
+
Requires-Dist: youtube_transcript_api==0.6.2
|
|
21
37
|
|
|
22
38
|
# ⚪ SimplerLLM (Beta)
|
|
23
39
|
|
|
24
40
|
⚡ Your Easy Pass to Advanced AI ⚡
|
|
25
41
|
|
|
42
|
+
|
|
26
43
|
[](https://opensource.org/licenses/MIT)
|
|
44
|
+
[](https://discord.gg/HUrtZXyp3j)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
|
|
27
48
|
|
|
28
49
|
## 🤔 What is SimplerLLM?
|
|
29
50
|
|
|
@@ -171,6 +192,10 @@ This function splits the text into chunks based on sentences.
|
|
|
171
192
|
|
|
172
193
|
This function splits text into chunks based on paragraphs.
|
|
173
194
|
|
|
195
|
+
### chunk_by_semantics
|
|
196
|
+
|
|
197
|
+
This functions splits text into chunks based on semantics.
|
|
198
|
+
|
|
174
199
|
Example
|
|
175
200
|
|
|
176
201
|
```python
|
|
@@ -197,3 +222,7 @@ chunks = chunker.chunk_by_max_chunk_size(text, 100, True)
|
|
|
197
222
|
- Document Chunker
|
|
198
223
|
- Advanced Document Loader
|
|
199
224
|
- Integration With More Providers
|
|
225
|
+
- Simple RAG With SimplerVectors
|
|
226
|
+
- Integration with Vector Databases
|
|
227
|
+
- Agent Builder
|
|
228
|
+
- LLM Server
|
|
@@ -1,33 +1,33 @@
|
|
|
1
|
-
import base64
|
|
2
|
-
import os
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
def save_image_from_base64(
|
|
6
|
-
base64_str: str, file_path: str = None, image_type: str = "png"
|
|
7
|
-
):
|
|
8
|
-
"""
|
|
9
|
-
Saves an image from a base64 encoded string to a file.
|
|
10
|
-
If file_path is not provided, the image is saved in the current working directory.
|
|
11
|
-
|
|
12
|
-
:param base64_str: The base64 encoded string of the image.
|
|
13
|
-
:param file_path: The path (including file name) where the image will be saved.
|
|
14
|
-
If None, saves in the current working directory with a default name.
|
|
15
|
-
:param image_type: The image type/format (e.g., 'png', 'jpg'). Default is 'png'.
|
|
16
|
-
"""
|
|
17
|
-
# Decode the base64 string
|
|
18
|
-
image_data = base64.b64decode(base64_str)
|
|
19
|
-
|
|
20
|
-
# Set the default file path if not provided
|
|
21
|
-
if file_path is None:
|
|
22
|
-
file_path = os.path.join(os.getcwd(), f"default_image.{image_type}")
|
|
23
|
-
|
|
24
|
-
# Ensure the directory exists
|
|
25
|
-
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
26
|
-
|
|
27
|
-
# Append the file extension if not present
|
|
28
|
-
if not file_path.lower().endswith(f".{image_type.lower()}"):
|
|
29
|
-
file_path += f".{image_type}"
|
|
30
|
-
|
|
31
|
-
# Write the image data to a file
|
|
32
|
-
with open(file_path, "wb") as file:
|
|
33
|
-
file.write(image_data)
|
|
1
|
+
import base64
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def save_image_from_base64(
|
|
6
|
+
base64_str: str, file_path: str = None, image_type: str = "png"
|
|
7
|
+
):
|
|
8
|
+
"""
|
|
9
|
+
Saves an image from a base64 encoded string to a file.
|
|
10
|
+
If file_path is not provided, the image is saved in the current working directory.
|
|
11
|
+
|
|
12
|
+
:param base64_str: The base64 encoded string of the image.
|
|
13
|
+
:param file_path: The path (including file name) where the image will be saved.
|
|
14
|
+
If None, saves in the current working directory with a default name.
|
|
15
|
+
:param image_type: The image type/format (e.g., 'png', 'jpg'). Default is 'png'.
|
|
16
|
+
"""
|
|
17
|
+
# Decode the base64 string
|
|
18
|
+
image_data = base64.b64decode(base64_str)
|
|
19
|
+
|
|
20
|
+
# Set the default file path if not provided
|
|
21
|
+
if file_path is None:
|
|
22
|
+
file_path = os.path.join(os.getcwd(), f"default_image.{image_type}")
|
|
23
|
+
|
|
24
|
+
# Ensure the directory exists
|
|
25
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
26
|
+
|
|
27
|
+
# Append the file extension if not present
|
|
28
|
+
if not file_path.lower().endswith(f".{image_type.lower()}"):
|
|
29
|
+
file_path += f".{image_type}"
|
|
30
|
+
|
|
31
|
+
# Write the image data to a file
|
|
32
|
+
with open(file_path, "wb") as file:
|
|
33
|
+
file.write(image_data)
|
|
@@ -1,84 +1,84 @@
|
|
|
1
|
-
from pydantic import BaseModel
|
|
2
|
-
import base64
|
|
3
|
-
import requests
|
|
4
|
-
from typing import List
|
|
5
|
-
import os
|
|
6
|
-
import requests
|
|
7
|
-
from dotenv import load_dotenv
|
|
8
|
-
|
|
9
|
-
# Load environment variables
|
|
10
|
-
load_dotenv()
|
|
11
|
-
|
|
12
|
-
# Constants
|
|
13
|
-
STABILITY_API_KEY = os.getenv("STABILITY_API_KEY")
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ImageData(BaseModel):
|
|
17
|
-
base64_str: str
|
|
18
|
-
size_kb: float
|
|
19
|
-
width: int
|
|
20
|
-
height: int
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class ImageList(BaseModel):
|
|
24
|
-
images: List[ImageData]
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def generate_images(
|
|
28
|
-
model_name: str,
|
|
29
|
-
prompt: str,
|
|
30
|
-
negative_prompt: str = "",
|
|
31
|
-
width: int = 512,
|
|
32
|
-
height: int = 512,
|
|
33
|
-
samples: int = 1,
|
|
34
|
-
steps: int = 30,
|
|
35
|
-
cfg_scale: int = 7,
|
|
36
|
-
seed: int = 0,
|
|
37
|
-
style_preset: str = None,
|
|
38
|
-
) -> ImageList:
|
|
39
|
-
|
|
40
|
-
engine_id = model_name
|
|
41
|
-
api_host = "https://api.stability.ai"
|
|
42
|
-
api_key = STABILITY_API_KEY
|
|
43
|
-
|
|
44
|
-
if api_key is None:
|
|
45
|
-
raise Exception("Missing Stability API key.")
|
|
46
|
-
|
|
47
|
-
response = requests.post(
|
|
48
|
-
f"{api_host}/v1/generation/{engine_id}/text-to-image",
|
|
49
|
-
headers={
|
|
50
|
-
"Content-Type": "application/json",
|
|
51
|
-
"Accept": "application/json",
|
|
52
|
-
"Authorization": f"Bearer {api_key}",
|
|
53
|
-
},
|
|
54
|
-
json={
|
|
55
|
-
"text_prompts": [{"text": prompt}],
|
|
56
|
-
"negative_prompt": negative_prompt,
|
|
57
|
-
"cfg_scale": cfg_scale,
|
|
58
|
-
"height": height,
|
|
59
|
-
"width": width,
|
|
60
|
-
"samples": samples,
|
|
61
|
-
"steps": steps,
|
|
62
|
-
"seed": seed,
|
|
63
|
-
"style_preset": style_preset,
|
|
64
|
-
},
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
if response.status_code != 200:
|
|
68
|
-
raise Exception("Non-200 response: " + str(response.text))
|
|
69
|
-
|
|
70
|
-
data = response.json()
|
|
71
|
-
|
|
72
|
-
images_data = []
|
|
73
|
-
for image in data["artifacts"]:
|
|
74
|
-
base64_str = image["base64"]
|
|
75
|
-
decoded_image = base64.b64decode(base64_str)
|
|
76
|
-
size_kb = len(decoded_image) / 1024
|
|
77
|
-
|
|
78
|
-
images_data.append(
|
|
79
|
-
ImageData(
|
|
80
|
-
base64_str=base64_str, size_kb=size_kb, width=width, height=height
|
|
81
|
-
)
|
|
82
|
-
)
|
|
83
|
-
|
|
84
|
-
return ImageList(images=images_data)
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
import base64
|
|
3
|
+
import requests
|
|
4
|
+
from typing import List
|
|
5
|
+
import os
|
|
6
|
+
import requests
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
|
|
9
|
+
# Load environment variables
|
|
10
|
+
load_dotenv()
|
|
11
|
+
|
|
12
|
+
# Constants
|
|
13
|
+
STABILITY_API_KEY = os.getenv("STABILITY_API_KEY")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ImageData(BaseModel):
|
|
17
|
+
base64_str: str
|
|
18
|
+
size_kb: float
|
|
19
|
+
width: int
|
|
20
|
+
height: int
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ImageList(BaseModel):
|
|
24
|
+
images: List[ImageData]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def generate_images(
|
|
28
|
+
model_name: str,
|
|
29
|
+
prompt: str,
|
|
30
|
+
negative_prompt: str = "",
|
|
31
|
+
width: int = 512,
|
|
32
|
+
height: int = 512,
|
|
33
|
+
samples: int = 1,
|
|
34
|
+
steps: int = 30,
|
|
35
|
+
cfg_scale: int = 7,
|
|
36
|
+
seed: int = 0,
|
|
37
|
+
style_preset: str = None,
|
|
38
|
+
) -> ImageList:
|
|
39
|
+
|
|
40
|
+
engine_id = model_name
|
|
41
|
+
api_host = "https://api.stability.ai"
|
|
42
|
+
api_key = STABILITY_API_KEY
|
|
43
|
+
|
|
44
|
+
if api_key is None:
|
|
45
|
+
raise Exception("Missing Stability API key.")
|
|
46
|
+
|
|
47
|
+
response = requests.post(
|
|
48
|
+
f"{api_host}/v1/generation/{engine_id}/text-to-image",
|
|
49
|
+
headers={
|
|
50
|
+
"Content-Type": "application/json",
|
|
51
|
+
"Accept": "application/json",
|
|
52
|
+
"Authorization": f"Bearer {api_key}",
|
|
53
|
+
},
|
|
54
|
+
json={
|
|
55
|
+
"text_prompts": [{"text": prompt}],
|
|
56
|
+
"negative_prompt": negative_prompt,
|
|
57
|
+
"cfg_scale": cfg_scale,
|
|
58
|
+
"height": height,
|
|
59
|
+
"width": width,
|
|
60
|
+
"samples": samples,
|
|
61
|
+
"steps": steps,
|
|
62
|
+
"seed": seed,
|
|
63
|
+
"style_preset": style_preset,
|
|
64
|
+
},
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if response.status_code != 200:
|
|
68
|
+
raise Exception("Non-200 response: " + str(response.text))
|
|
69
|
+
|
|
70
|
+
data = response.json()
|
|
71
|
+
|
|
72
|
+
images_data = []
|
|
73
|
+
for image in data["artifacts"]:
|
|
74
|
+
base64_str = image["base64"]
|
|
75
|
+
decoded_image = base64.b64decode(base64_str)
|
|
76
|
+
size_kb = len(decoded_image) / 1024
|
|
77
|
+
|
|
78
|
+
images_data.append(
|
|
79
|
+
ImageData(
|
|
80
|
+
base64_str=base64_str, size_kb=size_kb, width=width, height=height
|
|
81
|
+
)
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return ImageList(images=images_data)
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import SimplerLLM.language.llm_providers.openai_llm as openai_llm
|
|
2
|
+
from enum import Enum
|
|
3
|
+
|
|
4
|
+
class EmbeddingsProvider(Enum):
|
|
5
|
+
OPENAI = 1
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class LLM:
|
|
9
|
+
def __init__(
|
|
10
|
+
self, provider=EmbeddingsProvider.OPENAI, model_name="text-embedding-3-small"
|
|
11
|
+
):
|
|
12
|
+
self.provider = provider
|
|
13
|
+
self.model_name = model_name
|
|
14
|
+
|
|
15
|
+
@staticmethod
|
|
16
|
+
def create(
|
|
17
|
+
provider=None,
|
|
18
|
+
model_name=None,
|
|
19
|
+
):
|
|
20
|
+
if provider == EmbeddingsProvider.OPENAI:
|
|
21
|
+
return OpenAILLM(provider, model_name)
|
|
22
|
+
else:
|
|
23
|
+
return None
|
|
24
|
+
|
|
25
|
+
def set_model(self, provider):
|
|
26
|
+
if not isinstance(provider, EmbeddingsProvider):
|
|
27
|
+
raise ValueError("Provider must be an instance of EmbeddingsProvider Enum")
|
|
28
|
+
self.provider = provider
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class OpenAILLM(LLM):
|
|
32
|
+
def __init__(self, model, model_name):
|
|
33
|
+
super().__init__(model, model_name)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def generate_embeddings(
|
|
37
|
+
self,
|
|
38
|
+
user_input,
|
|
39
|
+
model_name=None,
|
|
40
|
+
full_response=False,
|
|
41
|
+
):
|
|
42
|
+
# Use instance values as defaults if not provided
|
|
43
|
+
model_name = model_name if model_name is not None else self.model_name
|
|
44
|
+
|
|
45
|
+
return openai_llm.generate_embeddings(
|
|
46
|
+
user_input=user_input,
|
|
47
|
+
model_name=model_name,
|
|
48
|
+
full_response=full_response,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
async def generate_embeddings_async(
|
|
52
|
+
self,
|
|
53
|
+
user_input,
|
|
54
|
+
model_name=None,
|
|
55
|
+
full_response=False,
|
|
56
|
+
):
|
|
57
|
+
# Use instance values as defaults if not provided
|
|
58
|
+
model_name = model_name if model_name is not None else self.model_name
|
|
59
|
+
|
|
60
|
+
return await openai_llm.generate_embeddings_async(
|
|
61
|
+
user_input=user_input,
|
|
62
|
+
model_name=model_name,
|
|
63
|
+
full_response=full_response,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from typing import Type
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
from SimplerLLM.language.llm import LLM
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
from SimplerLLM.tools.json_helpers import (
|
|
8
|
+
extract_json_from_text,
|
|
9
|
+
convert_json_to_pydantic_model,
|
|
10
|
+
validate_json_with_pydantic_model,
|
|
11
|
+
generate_json_example_from_pydantic,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def generate_pydantic_json_model(
|
|
17
|
+
model_class: Type[BaseModel],
|
|
18
|
+
prompt: str,
|
|
19
|
+
llm_instance: LLM,
|
|
20
|
+
max_retries: int = 3,
|
|
21
|
+
initial_delay: float = 1.0,
|
|
22
|
+
custom_prompt_suffix: str = None,
|
|
23
|
+
) -> BaseModel:
|
|
24
|
+
"""
|
|
25
|
+
Generates a model instance based on a given prompt, retrying on validation errors.
|
|
26
|
+
|
|
27
|
+
:param model_class: The Pydantic model class to be used for validation and conversion.
|
|
28
|
+
:param prompt: The fully formatted prompt including the topic.
|
|
29
|
+
:param llm_instance: Instance of a large language model.
|
|
30
|
+
:param max_retries: Maximum number of retries on validation errors.
|
|
31
|
+
:param initial_delay: Initial delay in seconds before the first retry.
|
|
32
|
+
:param custom_prompt_suffix: Optional string to customize or override the generated prompt extension.
|
|
33
|
+
|
|
34
|
+
:return: BaseModel object if successful, otherwise error message.
|
|
35
|
+
"""
|
|
36
|
+
# Concatenate prompt and JSON model outside the loop
|
|
37
|
+
json_model = generate_json_example_from_pydantic(model_class)
|
|
38
|
+
optimized_prompt = custom_prompt_suffix or (prompt + f"\n\nThe response should be in a structured JSON format that matches the following JSON: {json_model}")
|
|
39
|
+
|
|
40
|
+
# Calculate exponential backoff before the loop
|
|
41
|
+
backoff_delays = [initial_delay * (2**attempt) for attempt in range(max_retries + 1)]
|
|
42
|
+
|
|
43
|
+
for attempt, delay in enumerate(backoff_delays):
|
|
44
|
+
try:
|
|
45
|
+
ai_response = llm_instance.generate_response(prompt=optimized_prompt)
|
|
46
|
+
|
|
47
|
+
if ai_response:
|
|
48
|
+
json_object = extract_json_from_text(ai_response)
|
|
49
|
+
|
|
50
|
+
validated, errors = validate_json_with_pydantic_model(
|
|
51
|
+
model_class, json_object
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
if not errors:
|
|
55
|
+
model_object = convert_json_to_pydantic_model(
|
|
56
|
+
model_class, json_object[0]
|
|
57
|
+
)
|
|
58
|
+
return model_object
|
|
59
|
+
|
|
60
|
+
except Exception as e: # Replace SpecificException with the appropriate exception
|
|
61
|
+
return f"Exception occurred: {e}"
|
|
62
|
+
|
|
63
|
+
# Retry logic for empty AI response or validation errors
|
|
64
|
+
if not ai_response or (errors and attempt < max_retries):
|
|
65
|
+
time.sleep(delay)
|
|
66
|
+
elif errors:
|
|
67
|
+
return f"Validation failed after {max_retries} retries: {errors}"
|
|
68
|
+
|
|
69
|
+
return "Maximum retries exceeded without successful validation."
|
|
70
|
+
|
|
71
|
+
async def generate_pydantic_json_model_async(
|
|
72
|
+
model_class: Type[BaseModel],
|
|
73
|
+
prompt: str,
|
|
74
|
+
llm_instance: LLM,
|
|
75
|
+
max_retries: int = 3,
|
|
76
|
+
initial_delay: float = 1.0,
|
|
77
|
+
custom_prompt_suffix: str = None,
|
|
78
|
+
) -> BaseModel:
|
|
79
|
+
"""
|
|
80
|
+
Generates a model instance based on a given prompt, retrying on validation errors.
|
|
81
|
+
|
|
82
|
+
:param model_class: The Pydantic model class to be used for validation and conversion.
|
|
83
|
+
:param prompt: The fully formatted prompt including the topic.
|
|
84
|
+
:param llm_instance: Instance of a large language model.
|
|
85
|
+
:param max_retries: Maximum number of retries on validation errors.
|
|
86
|
+
:param initial_delay: Initial delay in seconds before the first retry.
|
|
87
|
+
:param custom_prompt_suffix: Optional string to customize or override the generated prompt extension.
|
|
88
|
+
|
|
89
|
+
:return: BaseModel object if successful, otherwise error message.
|
|
90
|
+
"""
|
|
91
|
+
# Concatenate prompt and JSON model outside the loop
|
|
92
|
+
json_model = generate_json_example_from_pydantic(model_class)
|
|
93
|
+
optimized_prompt = custom_prompt_suffix or (prompt + f"\n\nThe response should be in a structured JSON format that matches the following JSON: {json_model}")
|
|
94
|
+
|
|
95
|
+
# Calculate exponential backoff before the loop
|
|
96
|
+
backoff_delays = [initial_delay * (2**attempt) for attempt in range(max_retries + 1)]
|
|
97
|
+
|
|
98
|
+
for attempt, delay in enumerate(backoff_delays):
|
|
99
|
+
try:
|
|
100
|
+
ai_response = await llm_instance.generate_response_async(prompt=optimized_prompt)
|
|
101
|
+
|
|
102
|
+
if ai_response:
|
|
103
|
+
json_object = extract_json_from_text(ai_response)
|
|
104
|
+
|
|
105
|
+
validated, errors = validate_json_with_pydantic_model(
|
|
106
|
+
model_class, json_object
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if not errors:
|
|
110
|
+
model_object = convert_json_to_pydantic_model(
|
|
111
|
+
model_class, json_object[0]
|
|
112
|
+
)
|
|
113
|
+
return model_object
|
|
114
|
+
|
|
115
|
+
except Exception as e: # Replace SpecificException with the appropriate exception
|
|
116
|
+
return f"Exception occurred: {e}"
|
|
117
|
+
|
|
118
|
+
# Retry logic for empty AI response or validation errors
|
|
119
|
+
if not ai_response or (errors and attempt < max_retries):
|
|
120
|
+
await asyncio.sleep(delay)
|
|
121
|
+
elif errors:
|
|
122
|
+
return f"Validation failed after {max_retries} retries: {errors}"
|
|
123
|
+
|
|
124
|
+
return "Maximum retries exceeded without successful validation."
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
|
|
@@ -11,7 +11,7 @@ from .llm_response_models import LLMFullResponse
|
|
|
11
11
|
load_dotenv()
|
|
12
12
|
|
|
13
13
|
# Constants
|
|
14
|
-
|
|
14
|
+
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
|
|
15
15
|
MAX_RETRIES = int(os.getenv("MAX_RETRIES", 3))
|
|
16
16
|
RETRY_DELAY = int(os.getenv("RETRY_DELAY", 2))
|
|
17
17
|
|
|
@@ -46,7 +46,7 @@ def generate_response(
|
|
|
46
46
|
# Define the URL and headers
|
|
47
47
|
url = "https://api.anthropic.com/v1/messages"
|
|
48
48
|
headers = {
|
|
49
|
-
"x-api-key":
|
|
49
|
+
"x-api-key": ANTHROPIC_API_KEY,
|
|
50
50
|
"anthropic-version": "2023-06-01",
|
|
51
51
|
"content-type": "application/json",
|
|
52
52
|
}
|
|
@@ -77,7 +77,7 @@ def generate_response(
|
|
|
77
77
|
else:
|
|
78
78
|
return response.json()["content"][0]["text"]
|
|
79
79
|
|
|
80
|
-
except
|
|
80
|
+
except Exception as e:
|
|
81
81
|
print(f"Attempt {attempt + 1} failed: {e}")
|
|
82
82
|
time.sleep(retry_delay)
|
|
83
83
|
retry_delay *= 2 # Double the delay each retry
|
|
@@ -116,7 +116,7 @@ async def generate_response_async(
|
|
|
116
116
|
# Define the URL and headers
|
|
117
117
|
url = "https://api.anthropic.com/v1/messages"
|
|
118
118
|
headers = {
|
|
119
|
-
"x-api-key":
|
|
119
|
+
"x-api-key": ANTHROPIC_API_KEY,
|
|
120
120
|
"anthropic-version": "2023-06-01",
|
|
121
121
|
"content-type": "application/json",
|
|
122
122
|
}
|
{SimplerLLM-0.2.0 → simplerllm-0.2.3}/SimplerLLM/language/llm_providers/llm_response_models.py
RENAMED
|
@@ -1,9 +1,16 @@
|
|
|
1
|
-
from pydantic import BaseModel
|
|
2
|
-
from typing import Any
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
class LLMFullResponse(BaseModel):
|
|
6
|
-
generated_text: str
|
|
7
|
-
model: str
|
|
8
|
-
process_time: float
|
|
9
|
-
llm_provider_response: Any
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class LLMFullResponse(BaseModel):
|
|
6
|
+
generated_text: str
|
|
7
|
+
model: str
|
|
8
|
+
process_time: float
|
|
9
|
+
llm_provider_response: Any
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LLMEmbeddingsResponse(BaseModel):
|
|
13
|
+
generated_embedding: Any
|
|
14
|
+
model: str
|
|
15
|
+
process_time: float
|
|
16
|
+
llm_provider_response: Any
|