SimplerLLM 0.2.5__tar.gz → 0.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {simplerllm-0.2.5 → simplerllm-0.2.6}/PKG-INFO +1 -1
  2. simplerllm-0.2.6/SimplerLLM/agents/agent.py +121 -0
  3. simplerllm-0.2.6/SimplerLLM/language/llm.py +333 -0
  4. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/llm_addons.py +2 -1
  5. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/llm_providers/anthropic_llm.py +1 -19
  6. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/llm_providers/gemini_llm.py +11 -51
  7. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/llm_providers/openai_llm.py +2 -31
  8. simplerllm-0.2.6/SimplerLLM/prompts/messages_template.py +41 -0
  9. simplerllm-0.2.6/SimplerLLM/tools/__init__.py +0 -0
  10. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/generic_loader.py +25 -0
  11. simplerllm-0.2.6/SimplerLLM/tools/predefined_tools.py +6 -0
  12. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM.egg-info/PKG-INFO +1 -1
  13. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM.egg-info/SOURCES.txt +4 -0
  14. {simplerllm-0.2.5 → simplerllm-0.2.6}/setup.py +1 -1
  15. simplerllm-0.2.5/SimplerLLM/language/llm.py +0 -208
  16. {simplerllm-0.2.5 → simplerllm-0.2.6}/LICENSE +0 -0
  17. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/__init__.py +0 -0
  18. {simplerllm-0.2.5/SimplerLLM/image → simplerllm-0.2.6/SimplerLLM/agents}/__init__.py +0 -0
  19. {simplerllm-0.2.5/SimplerLLM/language → simplerllm-0.2.6/SimplerLLM/image}/__init__.py +0 -0
  20. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/image/img_helper_funcs.py +0 -0
  21. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/image/stability_ai.py +0 -0
  22. {simplerllm-0.2.5/SimplerLLM/language/llm_providers → simplerllm-0.2.6/SimplerLLM/language}/__init__.py +0 -0
  23. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/embeddings.py +0 -0
  24. {simplerllm-0.2.5/SimplerLLM/prompts → simplerllm-0.2.6/SimplerLLM/language/llm_providers}/__init__.py +0 -0
  25. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/llm_providers/llm_response_models.py +0 -0
  26. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/language/llm_providers/transformers_llm.py +0 -0
  27. {simplerllm-0.2.5/SimplerLLM/tools → simplerllm-0.2.6/SimplerLLM/prompts}/__init__.py +0 -0
  28. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/prompts/prompt_builder.py +0 -0
  29. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/file_functions.py +0 -0
  30. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/file_loader.py +0 -0
  31. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/json_helpers.py +0 -0
  32. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/rapid_api.py +0 -0
  33. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/serp.py +0 -0
  34. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/text_chunker.py +0 -0
  35. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM/tools/web_crawler.py +0 -0
  36. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM.egg-info/dependency_links.txt +0 -0
  37. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM.egg-info/requires.txt +0 -0
  38. {simplerllm-0.2.5 → simplerllm-0.2.6}/SimplerLLM.egg-info/top_level.txt +0 -0
  39. {simplerllm-0.2.5 → simplerllm-0.2.6}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: SimplerLLM
3
- Version: 0.2.5
3
+ Version: 0.2.6
4
4
  Summary: An easy-to-use Library for interacting with language models.
5
5
  Home-page: https://github.com/hassancs91/SimplerLLM
6
6
  Author: Hasan Aboul Hasan
@@ -0,0 +1,121 @@
1
+ from SimplerLLM.language.llm import LLM, LLMProvider
2
+ from SimplerLLM.tools.json_helpers import extract_json_from_text
3
+ from SimplerLLM.tools.predefined_tools import PREDEFINED_TOOLS
4
+
5
+
6
+
7
+ class Agent:
8
+ def __init__(self, llm_instace : LLM, verbose : bool = False):
9
+ self.verbose = verbose
10
+ self.llm_instance = llm_instace
11
+ self.available_actions = {}
12
+ self.system_prompt_template = """
13
+ You run in a loop of Thought, Action, PAUSE, Action_Response.
14
+ At the end of the loop you output an Answer.
15
+
16
+ Use Thought to understand the question you have been asked.
17
+ Use Action to run one of the actions available to you - then return PAUSE.
18
+ Action_Response will be the result of running those actions.
19
+
20
+ Your available actions are:
21
+
22
+ {actions_list}
23
+
24
+ To use an action, please use the following format:
25
+
26
+ Action:
27
+
28
+ {{
29
+ "function_name": tool_name,
30
+ "function_parms": {{
31
+ "param": "value"
32
+ }}
33
+ }}
34
+
35
+ Action_Response: the result of the action.
36
+
37
+ Final Answer:\n
38
+
39
+
40
+ """.strip()
41
+
42
+
43
+
44
+
45
+
46
+ def add_tool(self, tool_function):
47
+ if tool_function in PREDEFINED_TOOLS.values():
48
+ tool_name = [name for name, func in PREDEFINED_TOOLS.items() if func == tool_function][0]
49
+ description = tool_function.__doc__.strip()
50
+ elif hasattr(tool_function, 'is_custom_tool') and tool_function.is_custom_tool:
51
+ tool_name = tool_function.__name__
52
+ description = tool_function.description
53
+ else:
54
+ raise ValueError("Tool function must be predefined or decorated as a custom tool.")
55
+
56
+ self.available_actions[tool_name] = {
57
+ "function": tool_function,
58
+ "description": description
59
+ }
60
+
61
+
62
+
63
+ def construct_system_prompt(self):
64
+ actions_description = "\n".join(
65
+ [f"{name}:\n {details['description']}"
66
+ for name, details in self.available_actions.items()]
67
+ )
68
+ return self.system_prompt_template.format(actions_list=actions_description)
69
+
70
+ def generate_response(self, user_query, max_turns=5):
71
+ final_response = ""
72
+ react_system_prompt = self.construct_system_prompt()
73
+ messages = [
74
+ {"role": "system", "content": react_system_prompt},
75
+ {"role": "user", "content": user_query}
76
+ ]
77
+ turn_count = 1
78
+
79
+ while turn_count <= max_turns:
80
+ if self.verbose:
81
+ print(f"Loop: {turn_count}")
82
+ print("----------------------")
83
+
84
+ turn_count += 1
85
+
86
+ agent_response = self.llm_instance.generate_response(messages=messages)
87
+ messages.append({"role": "assistant", "content": agent_response})
88
+ final_response = agent_response
89
+ if self.verbose:
90
+ print(agent_response)
91
+
92
+ # Extract action JSON from text response.
93
+ action_json = extract_json_from_text(agent_response)
94
+ if action_json:
95
+ if 'function_name' in action_json[0]:
96
+ function_name = action_json[0]['function_name']
97
+ function_parms = action_json[0]['function_parms']
98
+ if function_name not in self.available_actions:
99
+ raise Exception(f"Unknown action: {function_name}: {function_parms}")
100
+ if self.verbose:
101
+ print(f" -- running {function_name} with {function_parms}")
102
+ action_function = self.available_actions[function_name]["function"]
103
+ result = action_function(**function_parms)
104
+ if self.verbose:
105
+ print("Action_Response:", result)
106
+ function_result_message = f"Action_Response: {result}"
107
+ messages.append({"role": "user", "content": function_result_message})
108
+ if self.verbose:
109
+ print("----------------------")
110
+ else:
111
+ break
112
+ else:
113
+ break
114
+
115
+ #extract final answer from json
116
+ #answer_json = extract_json_from_text(final_response)
117
+ #answer = final_response.strip().split(":", 1)[1].strip()
118
+
119
+ return final_response
120
+
121
+
@@ -0,0 +1,333 @@
1
+ import SimplerLLM.language.llm_providers.openai_llm as openai_llm
2
+ import SimplerLLM.language.llm_providers.gemini_llm as gemini_llm
3
+ import SimplerLLM.language.llm_providers.anthropic_llm as anthropic_llm
4
+ from SimplerLLM.prompts.messages_template import MessagesTemplate
5
+ from enum import Enum
6
+
7
+
8
+ class LLMProvider(Enum):
9
+ OPENAI = 1
10
+ GEMINI = 2
11
+ ANTHROPIC = 3
12
+
13
+
14
+ class LLM:
15
+ def __init__(
16
+ self,
17
+ provider=LLMProvider.OPENAI,
18
+ model_name="gpt-3.5-turbo",
19
+ temperature=0.7,
20
+ top_p=1.0,
21
+ ):
22
+ self.provider = provider
23
+ self.model_name = model_name
24
+ self.temperature = temperature
25
+ self.top_p = top_p
26
+
27
+ @staticmethod
28
+ def create(
29
+ provider=None,
30
+ model_name=None,
31
+ temperature=0.7,
32
+ top_p=1.0,
33
+ ):
34
+ if provider == LLMProvider.OPENAI:
35
+ return OpenAILLM(provider, model_name, temperature, top_p)
36
+ if provider == LLMProvider.GEMINI:
37
+ return GeminiLLM(provider, model_name, temperature, top_p)
38
+ if provider == LLMProvider.ANTHROPIC:
39
+ return AnthropicLLM(provider, model_name, temperature, top_p)
40
+ else:
41
+ return None
42
+
43
+ def set_model(self, provider):
44
+ if not isinstance(provider, LLMProvider):
45
+ raise ValueError("Provider must be an instance of LLMProvider Enum")
46
+ self.provider = provider
47
+
48
+ def prepare_params(self, model_name, temperature, top_p):
49
+ # Use instance values as defaults if parameters are not provided
50
+ return {
51
+ "model_name": model_name if model_name else self.model_name,
52
+ "temperature": temperature if temperature else self.temperature,
53
+ "top_p": top_p if top_p else self.top_p,
54
+ }
55
+
56
+
57
+ class OpenAILLM(LLM):
58
+ def __init__(self, model, model_name, temperature, top_p):
59
+ super().__init__(model, model_name, temperature, top_p)
60
+
61
+
62
+ def append_messages(self, system_prompt : str, messages: list):
63
+ model_messages = [{"role": "system", "content": system_prompt}]
64
+ if messages:
65
+ model_messages.extend(messages)
66
+ return model_messages
67
+
68
+
69
+
70
+ def generate_response(
71
+ self,
72
+ model_name: str =None,
73
+ prompt: str = None,
74
+ messages: list = None,
75
+ system_prompt: str="You are a helpful AI Assistant",
76
+ temperature: float=0.7,
77
+ max_tokens: int=300,
78
+ top_p: float=1.0,
79
+ full_response: bool=False,
80
+ ):
81
+ params = self.prepare_params(model_name, temperature, top_p)
82
+
83
+ # Validate inputs
84
+ if prompt and messages:
85
+ raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
86
+ if not prompt and not messages:
87
+ raise ValueError("Either 'prompt' or 'messages' must be provided.")
88
+
89
+ # Prepare messages based on input type
90
+ if prompt:
91
+ model_messages = [
92
+ {"role": "system", "content": system_prompt},
93
+ {"role": "user", "content": prompt},
94
+ ]
95
+
96
+ if messages:
97
+ model_messages = self.append_messages(system_prompt, messages)
98
+
99
+
100
+
101
+ params.update(
102
+ {
103
+ "messages": model_messages,
104
+ "max_tokens": max_tokens,
105
+ "full_response": full_response,
106
+ }
107
+ )
108
+ return openai_llm.generate_response(**params)
109
+
110
+ async def generate_response_async(
111
+ self,
112
+ model_name: str =None,
113
+ prompt: str = None,
114
+ messages: list = None,
115
+ system_prompt: str="You are a helpful AI Assistant",
116
+ temperature: float=0.7,
117
+ max_tokens: int=300,
118
+ top_p: float=1.0,
119
+ full_response: bool=False,
120
+ ):
121
+ params = self.prepare_params(model_name, temperature, top_p)
122
+
123
+ # Validate inputs
124
+ if prompt and messages:
125
+ raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
126
+ if not prompt and not messages:
127
+ raise ValueError("Either 'prompt' or 'messages' must be provided.")
128
+
129
+ # Prepare messages based on input type
130
+ if prompt:
131
+ model_messages = [
132
+ {"role": "system", "content": system_prompt},
133
+ {"role": "user", "content": prompt},
134
+ ]
135
+
136
+ if messages:
137
+ model_messages = self.append_messages(system_prompt, messages)
138
+
139
+
140
+ params.update(
141
+ {
142
+ "messages": model_messages,
143
+ "max_tokens": max_tokens,
144
+ "full_response": full_response,
145
+ }
146
+ )
147
+ return await openai_llm.generate_response_async(**params)
148
+
149
+
150
+ class GeminiLLM(LLM):
151
+
152
+ def __init__(self, model, model_name, temperature, top_p):
153
+ super().__init__(model, model_name, temperature, top_p)
154
+
155
+ def convert_messages_template(self, messages):
156
+ # Convert the unified message template to Gemini's format
157
+ #return [{"role": msg["role"], "parts": [msg["content"]]} for msg in messages]
158
+ return [{"role": msg["role"], "parts": [{"text": msg["content"]}]} for msg in messages]
159
+
160
+ def append_messages(self, system_prompt, messages):
161
+ model_messages = [
162
+ {"role": "user", "parts": [{"text": system_prompt}]},
163
+ {"role": "model", "parts": [{"text": "ok, confirmed."}]},
164
+ ]
165
+ messages = self.convert_messages_template(messages)
166
+ model_messages.extend(messages)
167
+ return model_messages
168
+
169
+
170
+
171
+ def generate_response(
172
+ self,
173
+ model_name=None,
174
+ prompt=None,
175
+ messages=None,
176
+ system_prompt="You are a helpful AI Assistant",
177
+ temperature=0.7,
178
+ max_tokens=300,
179
+ top_p=1.0,
180
+ full_response=False,
181
+ ):
182
+ params = self.prepare_params(model_name, temperature, top_p)
183
+ # Validate inputs
184
+ if prompt and messages:
185
+ raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
186
+ if not prompt and not messages:
187
+ raise ValueError("Either 'prompt' or 'messages' must be provided.")
188
+
189
+ if prompt:
190
+ model_messages = [
191
+ {"role": "user", "parts": [{"text": system_prompt}]},
192
+ {"role": "model", "parts": [{"text": "ok, confirmed."}]},
193
+ {"role": "user", "parts": [{"text": prompt}]}
194
+ ]
195
+
196
+ if messages:
197
+ model_messages = self.append_messages(system_prompt, messages)
198
+
199
+ params.update(
200
+ {
201
+ "messages": model_messages,
202
+ "system_prompt": system_prompt,
203
+ "max_tokens": max_tokens,
204
+ "full_response": full_response,
205
+ }
206
+ )
207
+ return gemini_llm.generate_response(**params)
208
+
209
+ async def generate_response_async(
210
+ self,
211
+ model_name=None,
212
+ prompt=None,
213
+ messages=None,
214
+ system_prompt="You are a helpful AI Assistant",
215
+ temperature=0.7,
216
+ max_tokens=300,
217
+ top_p=1.0,
218
+ full_response=False,
219
+ ):
220
+ params = self.prepare_params(model_name, temperature, top_p)
221
+ # Validate inputs
222
+ if prompt and messages:
223
+ raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
224
+ if not prompt and not messages:
225
+ raise ValueError("Either 'prompt' or 'messages' must be provided.")
226
+
227
+ if prompt:
228
+ model_messages = [
229
+ {"role": "user", "parts": [{"text": system_prompt}]},
230
+ {"role": "model", "parts": [{"text": "ok, confirmed."}]},
231
+ {"role": "user", "parts": [{"text": prompt}]}
232
+ ]
233
+
234
+ if messages:
235
+ model_messages = self.append_messages(system_prompt, messages)
236
+ params.update(
237
+ {
238
+ "messages": model_messages,
239
+ "system_prompt": system_prompt,
240
+ "max_tokens": max_tokens,
241
+ "full_response": full_response,
242
+ }
243
+ )
244
+ return await gemini_llm.generate_response_async(**params)
245
+
246
+
247
+ class AnthropicLLM(LLM):
248
+ def __init__(self, model, model_name, temperature, top_p):
249
+ super().__init__(model, model_name, temperature, top_p)
250
+
251
+ def append_messages(self, messages: list):
252
+ model_messages = []
253
+ if messages:
254
+ model_messages.extend(messages)
255
+ return model_messages
256
+
257
+ def generate_response(
258
+ self,
259
+ model_name: str =None,
260
+ prompt: str = None,
261
+ messages: list = None,
262
+ system_prompt: str="You are a helpful AI Assistant",
263
+ temperature: float=0.7,
264
+ max_tokens: int=300,
265
+ top_p: float=1.0,
266
+ full_response: bool=False,
267
+ ):
268
+ params = self.prepare_params(model_name, temperature, top_p)
269
+
270
+ # Validate inputs
271
+ if prompt and messages:
272
+ raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
273
+ if not prompt and not messages:
274
+ raise ValueError("Either 'prompt' or 'messages' must be provided.")
275
+
276
+ # Prepare messages based on input type
277
+ if prompt:
278
+ model_messages = [
279
+ {"role": "user", "content": prompt},
280
+ ]
281
+
282
+ if messages:
283
+ model_messages = self.append_messages(messages)
284
+
285
+ params.update(
286
+ {
287
+ "system_prompt": system_prompt,
288
+ "messages": model_messages,
289
+ "max_tokens": max_tokens,
290
+ "full_response": full_response,
291
+ }
292
+ )
293
+ return anthropic_llm.generate_response(**params)
294
+
295
+ async def generate_response_async(
296
+ self,
297
+ model_name=None,
298
+ prompt=None,
299
+ messages: list = None,
300
+ system_prompt="You are a helpful AI Assistant",
301
+ temperature=0.7,
302
+ max_tokens=300,
303
+ top_p=1.0,
304
+ full_response=False,
305
+ ):
306
+ params = self.prepare_params(model_name, temperature, top_p)
307
+
308
+ # Validate inputs
309
+ if prompt and messages:
310
+ raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
311
+ if not prompt and not messages:
312
+ raise ValueError("Either 'prompt' or 'messages' must be provided.")
313
+
314
+ # Prepare messages based on input type
315
+ if prompt:
316
+ model_messages = [
317
+ {"role": "user", "content": prompt},
318
+ ]
319
+
320
+ if messages:
321
+ model_messages = self.append_messages(messages)
322
+
323
+ params.update(
324
+ {
325
+ "system_prompt": system_prompt,
326
+ "messages": model_messages,
327
+ "max_tokens": max_tokens,
328
+ "full_response": full_response,
329
+ }
330
+ )
331
+ return await anthropic_llm.generate_response_async(**params)
332
+
333
+
@@ -18,6 +18,7 @@ def generate_pydantic_json_model(
18
18
  prompt: str,
19
19
  llm_instance: LLM,
20
20
  max_retries: int = 3,
21
+ max_tokens: int = 4096,
21
22
  initial_delay: float = 1.0,
22
23
  custom_prompt_suffix: str = None,
23
24
  ) -> BaseModel:
@@ -42,7 +43,7 @@ def generate_pydantic_json_model(
42
43
 
43
44
  for attempt, delay in enumerate(backoff_delays):
44
45
  try:
45
- ai_response = llm_instance.generate_response(prompt=optimized_prompt)
46
+ ai_response = llm_instance.generate_response(prompt=optimized_prompt, max_tokens = max_tokens)
46
47
 
47
48
  if ai_response:
48
49
  json_object = extract_json_from_text(ai_response)
@@ -18,7 +18,6 @@ RETRY_DELAY = int(os.getenv("RETRY_DELAY", 2))
18
18
 
19
19
  def generate_response(
20
20
  model_name: str,
21
- prompt: Optional[str] = None,
22
21
  system_prompt: str = "You are a helpful AI Assistant",
23
22
  messages=None,
24
23
  temperature: float = 0.7,
@@ -34,14 +33,7 @@ def generate_response(
34
33
  retry_attempts = 3
35
34
  retry_delay = 1 # initial delay between retries in seconds
36
35
 
37
- if messages is None:
38
- if prompt is None:
39
- raise ValueError("Either 'prompt' or 'messages' must be provided.")
40
- messages = [{"role": "user", "content": prompt}]
41
- else:
42
- if prompt is not None:
43
- raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
44
- messages = messages
36
+
45
37
 
46
38
  # Define the URL and headers
47
39
  url = "https://api.anthropic.com/v1/messages"
@@ -88,7 +80,6 @@ def generate_response(
88
80
 
89
81
  async def generate_response_async(
90
82
  model_name: str,
91
- prompt: Optional[str] = None,
92
83
  system_prompt: str = "You are a helpful AI Assistant",
93
84
  messages=None,
94
85
  temperature: float = 0.7,
@@ -104,15 +95,6 @@ async def generate_response_async(
104
95
  retry_attempts = 3
105
96
  retry_delay = 1 # initial delay between retries in seconds
106
97
 
107
- if messages is None:
108
- if prompt is None:
109
- raise ValueError("Either 'prompt' or 'messages' must be provided.")
110
- messages = [{"role": "user", "content": prompt}]
111
- else:
112
- if prompt is not None:
113
- raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
114
- messages = messages
115
-
116
98
  # Define the URL and headers
117
99
  url = "https://api.anthropic.com/v1/messages"
118
100
  headers = {
@@ -19,7 +19,6 @@ RETRY_DELAY = int(os.getenv("RETRY_DELAY", 2))
19
19
 
20
20
  def generate_response(
21
21
  model_name: str,
22
- prompt: Optional[str] = None,
23
22
  system_prompt: str = "You are a helpful AI Assistant",
24
23
  messages: Optional[List[Dict]] = None,
25
24
  temperature: float = 0.7,
@@ -52,32 +51,17 @@ def generate_response(
52
51
  url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_name}:generateContent"
53
52
  headers = {"Content-Type": "application/json"}
54
53
 
55
- if messages is None:
56
- if prompt is None:
57
- raise ValueError("Either 'prompt' or 'messages' must be provided.")
58
- if system_prompt:
59
- contents = [
60
- {"role": "user", "parts": [{"text": system_prompt}]},
61
- {"role": "model", "parts": [{"text": "ok"}]},
62
- {"role": "user", "parts": [{"text": prompt}]},
63
- ]
64
- else:
65
- contents = [{"role": "user", "parts": [{"text": prompt}]}]
66
-
67
- else:
68
- if prompt is not None:
69
- raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
70
- if system_prompt:
71
- contents = [
72
- {"role": "user", "parts": [{"text": system_prompt}]},
73
- {"role": "model", "parts": [{"text": "ok"}]},
74
- ]
75
- contents.append(messages)
76
- else:
77
- contents = messages
78
54
 
79
55
  payload = {
80
- "contents": contents,
56
+ "contents": messages,
57
+ # "system_instruction":
58
+ # {
59
+ # "parts": [
60
+ # {
61
+ # "text": system_prompt
62
+ # }
63
+ # ]
64
+ # },
81
65
  "generationConfig": {
82
66
  "temperature": temperature,
83
67
  "maxOutputTokens": max_tokens,
@@ -116,7 +100,6 @@ def generate_response(
116
100
 
117
101
  async def generate_response_async(
118
102
  model_name: str,
119
- prompt: Optional[str] = None,
120
103
  system_prompt: str = "You are a helpful AI Assistant",
121
104
  messages: Optional[List[Dict]] = None,
122
105
  temperature: float = 0.7,
@@ -150,32 +133,9 @@ async def generate_response_async(
150
133
  url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_name}:generateContent"
151
134
  headers = {"Content-Type": "application/json"}
152
135
 
153
- if messages is None:
154
- if prompt is None:
155
- raise ValueError("Either 'prompt' or 'messages' must be provided.")
156
- if system_prompt:
157
- contents = [
158
- {"role": "user", "parts": [{"text": system_prompt}]},
159
- {"role": "model", "parts": [{"text": "ok"}]},
160
- {"role": "user", "parts": [{"text": prompt}]},
161
- ]
162
- else:
163
- contents = [{"role": "user", "parts": [{"text": prompt}]}]
164
-
165
- else:
166
- if prompt is not None:
167
- raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
168
- if system_prompt:
169
- contents = [
170
- {"role": "user", "parts": [{"text": system_prompt}]},
171
- {"role": "model", "parts": [{"text": "ok"}]},
172
- ]
173
- contents.append(messages)
174
- else:
175
- contents = messages
176
-
136
+
177
137
  payload = {
178
- "contents": contents,
138
+ "contents": messages,
179
139
  "generationConfig": {
180
140
  "temperature": temperature,
181
141
  "maxOutputTokens": max_tokens,
@@ -28,9 +28,7 @@ async_openai_client, openai_client = initialize_openai_clients()
28
28
 
29
29
  def generate_response(
30
30
  model_name,
31
- prompt=None,
32
31
  messages=None,
33
- system_prompt="You are a helpful AI Assistant",
34
32
  temperature=0.7,
35
33
  max_tokens=300,
36
34
  top_p=1.0,
@@ -38,19 +36,7 @@ def generate_response(
38
36
  ):
39
37
  start_time = time.time() if full_response else None
40
38
 
41
- # Validate inputs
42
- if prompt and messages:
43
- raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
44
- if not prompt and not messages:
45
- raise ValueError("Either 'prompt' or 'messages' must be provided.")
46
-
47
- # Prepare messages based on input type
48
- if prompt:
49
- messages = [
50
- {"role": "system", "content": system_prompt},
51
- {"role": "user", "content": prompt},
52
- ]
53
-
39
+
54
40
  for attempt in range(MAX_RETRIES):
55
41
  try:
56
42
  completion = openai_client.chat.completions.create(
@@ -86,12 +72,9 @@ def generate_response(
86
72
  print(error_msg)
87
73
  return None
88
74
 
89
-
90
75
  async def generate_response_async(
91
76
  model_name,
92
- prompt=None,
93
77
  messages=None,
94
- system_prompt="You are a helpful AI Assistant",
95
78
  temperature=0.7,
96
79
  max_tokens=300,
97
80
  top_p=1.0,
@@ -99,19 +82,7 @@ async def generate_response_async(
99
82
  ):
100
83
  start_time = time.time() if full_response else None
101
84
 
102
- # Validate inputs
103
- if prompt and messages:
104
- raise ValueError("Only one of 'prompt' or 'messages' should be provided.")
105
- if not prompt and not messages:
106
- raise ValueError("Either 'prompt' or 'messages' must be provided.")
107
-
108
- # Prepare messages based on input type
109
- if prompt:
110
- messages = [
111
- {"role": "system", "content": system_prompt},
112
- {"role": "user", "content": prompt},
113
- ]
114
-
85
+
115
86
  for attempt in range(MAX_RETRIES):
116
87
  try:
117
88
  completion = await async_openai_client.chat.completions.create(
@@ -0,0 +1,41 @@
1
+ class MessagesTemplate:
2
+ def __init__(self):
3
+ self.messages = []
4
+
5
+ def add_user_message(self, content):
6
+ self.messages.append({"role": "user", "content": content})
7
+
8
+ def add_assistant_message(self, content):
9
+ self.messages.append({"role": "assistant", "content": content})
10
+
11
+ def validate_alternation(self):
12
+ if not self.messages:
13
+ return False, "MessageTemplate is empty."
14
+
15
+ if self.messages[0]["role"] != "user":
16
+ return False, "MessageTemplate must start with a user message."
17
+
18
+ if self.messages[-1]["role"] != "user":
19
+ return False, "MessageTemplate must end with a user message."
20
+
21
+ for i in range(len(self.messages) - 1):
22
+ if self.messages[i]["role"] == self.messages[i + 1]["role"]:
23
+ return False, f"Consecutive messages found at index {i} and {i + 1}."
24
+
25
+ return True, "MessageTemplate is valid."
26
+
27
+
28
+ def get_messages(self):
29
+ # Validate the message alternation
30
+ is_valid, validation_message = self.validate_alternation()
31
+ if is_valid:
32
+ return self.messages
33
+ else:
34
+ raise ValueError(validation_message)
35
+
36
+
37
+ def __repr__(self):
38
+ return f"MessageTemplate({self.messages})"
39
+
40
+
41
+
File without changes
@@ -20,6 +20,31 @@ class TextDocument(BaseModel):
20
20
 
21
21
 
22
22
  def load_content(input_path_or_url):
23
+ """
24
+ Load content from a given input path or URL and return a TextDocument object.
25
+
26
+ This function handles the following types of input:
27
+ - URLs: Supports both YouTube videos and blog articles.
28
+ - For YouTube videos, it retrieves the title and transcript.
29
+ - For blog articles, it retrieves the text and title.
30
+ - Local files: Supports .txt, .csv, .docx, and .pdf file extensions.
31
+ - For each file type, it reads the content and calculates the file size, word count, and character count.
32
+
33
+ Args:
34
+ input_path_or_url (str): The path to a local file or a URL to online content.
35
+
36
+ Returns:
37
+ TextDocument: An object containing the following attributes:
38
+ - word_count (int): The number of words in the content.
39
+ - character_count (int): The number of characters in the content.
40
+ - content (str): The loaded content.
41
+ - file_size (int): The size of the content in bytes.
42
+ - url_or_path (str): The original input path or URL.
43
+ - title (str, optional): The title of the content (for YouTube videos and blog articles).
44
+
45
+ Raises:
46
+ ValueError: If the input cannot be processed or an error occurs during processing.
47
+ """
23
48
  # Check if the input is a URL
24
49
  if re.match(r"http[s]?://", input_path_or_url):
25
50
  # Process based on URL content
@@ -0,0 +1,6 @@
1
+ from SimplerLLM.tools.generic_loader import load_content
2
+
3
+ # List of predefined tools
4
+ PREDEFINED_TOOLS = {
5
+ "load_content": load_content
6
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: SimplerLLM
3
- Version: 0.2.5
3
+ Version: 0.2.6
4
4
  Summary: An easy-to-use Library for interacting with language models.
5
5
  Home-page: https://github.com/hassancs91/SimplerLLM
6
6
  Author: Hasan Aboul Hasan
@@ -6,6 +6,8 @@ SimplerLLM.egg-info/SOURCES.txt
6
6
  SimplerLLM.egg-info/dependency_links.txt
7
7
  SimplerLLM.egg-info/requires.txt
8
8
  SimplerLLM.egg-info/top_level.txt
9
+ SimplerLLM/agents/__init__.py
10
+ SimplerLLM/agents/agent.py
9
11
  SimplerLLM/image/__init__.py
10
12
  SimplerLLM/image/img_helper_funcs.py
11
13
  SimplerLLM/image/stability_ai.py
@@ -20,12 +22,14 @@ SimplerLLM/language/llm_providers/llm_response_models.py
20
22
  SimplerLLM/language/llm_providers/openai_llm.py
21
23
  SimplerLLM/language/llm_providers/transformers_llm.py
22
24
  SimplerLLM/prompts/__init__.py
25
+ SimplerLLM/prompts/messages_template.py
23
26
  SimplerLLM/prompts/prompt_builder.py
24
27
  SimplerLLM/tools/__init__.py
25
28
  SimplerLLM/tools/file_functions.py
26
29
  SimplerLLM/tools/file_loader.py
27
30
  SimplerLLM/tools/generic_loader.py
28
31
  SimplerLLM/tools/json_helpers.py
32
+ SimplerLLM/tools/predefined_tools.py
29
33
  SimplerLLM/tools/rapid_api.py
30
34
  SimplerLLM/tools/serp.py
31
35
  SimplerLLM/tools/text_chunker.py
@@ -11,7 +11,7 @@ with open("README.md", encoding="utf-8") as f:
11
11
 
12
12
  setup(
13
13
  name="SimplerLLM",
14
- version="0.2.5",
14
+ version="0.2.6",
15
15
  author="Hasan Aboul Hasan",
16
16
  author_email="hasan@learnwithhasan.com",
17
17
  description="An easy-to-use Library for interacting with language models.",
@@ -1,208 +0,0 @@
1
- import SimplerLLM.language.llm_providers.openai_llm as openai_llm
2
- import SimplerLLM.language.llm_providers.gemini_llm as gemini_llm
3
- import SimplerLLM.language.llm_providers.anthropic_llm as anthropic_llm
4
- from enum import Enum
5
-
6
-
7
- class LLMProvider(Enum):
8
- OPENAI = 1
9
- GEMINI = 2
10
- ANTHROPIC = 3
11
-
12
-
13
- class LLM:
14
- def __init__(
15
- self,
16
- provider=LLMProvider.OPENAI,
17
- model_name="gpt-3.5-turbo",
18
- temperature=0.7,
19
- top_p=1.0,
20
- ):
21
- self.provider = provider
22
- self.model_name = model_name
23
- self.temperature = temperature
24
- self.top_p = top_p
25
-
26
- @staticmethod
27
- def create(
28
- provider=None,
29
- model_name=None,
30
- temperature=0.7,
31
- top_p=1.0,
32
- ):
33
- if provider == LLMProvider.OPENAI:
34
- return OpenAILLM(provider, model_name, temperature, top_p)
35
- if provider == LLMProvider.GEMINI:
36
- return GeminiLLM(provider, model_name, temperature, top_p)
37
- if provider == LLMProvider.ANTHROPIC:
38
- return AnthropicLLM(provider, model_name, temperature, top_p)
39
- else:
40
- return None
41
-
42
- def set_model(self, provider):
43
- if not isinstance(provider, LLMProvider):
44
- raise ValueError("Provider must be an instance of LLMProvider Enum")
45
- self.provider = provider
46
-
47
- def prepare_params(self, model_name, temperature, top_p):
48
- # Use instance values as defaults if parameters are not provided
49
- return {
50
- "model_name": model_name if model_name else self.model_name,
51
- "temperature": temperature if temperature else self.temperature,
52
- "top_p": top_p if top_p else self.top_p,
53
- }
54
-
55
-
56
- class OpenAILLM(LLM):
57
- def __init__(self, model, model_name, temperature, top_p):
58
- super().__init__(model, model_name, temperature, top_p)
59
-
60
- def generate_response(
61
- self,
62
- model_name=None,
63
- prompt=None,
64
- messages=None,
65
- system_prompt="You are a helpful AI Assistant",
66
- temperature=0.7,
67
- max_tokens=300,
68
- top_p=1.0,
69
- full_response=False,
70
- ):
71
- params = self.prepare_params(model_name, temperature, top_p)
72
- params.update(
73
- {
74
- "prompt": prompt,
75
- "messages": messages,
76
- "system_prompt": system_prompt,
77
- "max_tokens": max_tokens,
78
- "full_response": full_response,
79
- }
80
- )
81
- return openai_llm.generate_response(**params)
82
-
83
- async def generate_response_async(
84
- self,
85
- model_name=None,
86
- prompt=None,
87
- messages=None,
88
- system_prompt="You are a helpful AI Assistant",
89
- temperature=0.7,
90
- max_tokens=300,
91
- top_p=1.0,
92
- full_response=False,
93
- ):
94
- params = self.prepare_params(model_name, temperature, top_p)
95
- params.update(
96
- {
97
- "prompt": prompt,
98
- "messages": messages,
99
- "system_prompt": system_prompt,
100
- "max_tokens": max_tokens,
101
- "full_response": full_response,
102
- }
103
- )
104
- return await openai_llm.generate_response_async(**params)
105
-
106
-
107
- class GeminiLLM(LLM):
108
- def __init__(self, model, model_name, temperature, top_p):
109
- super().__init__(model, model_name, temperature, top_p)
110
-
111
- def generate_response(
112
- self,
113
- model_name=None,
114
- prompt=None,
115
- messages=None,
116
- system_prompt="You are a helpful AI Assistant",
117
- temperature=0.7,
118
- max_tokens=300,
119
- top_p=1.0,
120
- full_response=False,
121
- ):
122
- params = self.prepare_params(model_name, temperature, top_p)
123
- params.update(
124
- {
125
- "prompt": prompt,
126
- "messages": messages,
127
- "system_prompt": system_prompt,
128
- "max_tokens": max_tokens,
129
- "full_response": full_response,
130
- }
131
- )
132
- return gemini_llm.generate_response(**params)
133
-
134
- async def generate_response_async(
135
- self,
136
- model_name=None,
137
- prompt=None,
138
- messages=None,
139
- system_prompt="You are a helpful AI Assistant",
140
- temperature=0.7,
141
- max_tokens=300,
142
- top_p=1.0,
143
- full_response=False,
144
- ):
145
- params = self.prepare_params(model_name, temperature, top_p)
146
- params.update(
147
- {
148
- "prompt": prompt,
149
- "messages": messages,
150
- "system_prompt": system_prompt,
151
- "max_tokens": max_tokens,
152
- "full_response": full_response,
153
- }
154
- )
155
- return await gemini_llm.generate_response_async(**params)
156
-
157
-
158
- class AnthropicLLM(LLM):
159
- def __init__(self, model, model_name, temperature, top_p):
160
- super().__init__(model, model_name, temperature, top_p)
161
-
162
- def generate_response(
163
- self,
164
- model_name=None,
165
- prompt=None,
166
- messages=None,
167
- system_prompt="You are a helpful AI Assistant",
168
- temperature=0.7,
169
- max_tokens=300,
170
- top_p=1.0,
171
- full_response=False,
172
- ):
173
- params = self.prepare_params(model_name, temperature, top_p)
174
- params.update(
175
- {
176
- "prompt": prompt,
177
- "messages": messages,
178
- "system_prompt": system_prompt,
179
- "max_tokens": max_tokens,
180
- "full_response": full_response,
181
- }
182
- )
183
- return anthropic_llm.generate_response(**params)
184
-
185
- async def generate_response_async(
186
- self,
187
- model_name=None,
188
- prompt=None,
189
- messages=None,
190
- system_prompt="You are a helpful AI Assistant",
191
- temperature=0.7,
192
- max_tokens=300,
193
- top_p=1.0,
194
- full_response=False,
195
- ):
196
- params = self.prepare_params(model_name, temperature, top_p)
197
- params.update(
198
- {
199
- "prompt": prompt,
200
- "messages": messages,
201
- "system_prompt": system_prompt,
202
- "max_tokens": max_tokens,
203
- "full_response": full_response,
204
- }
205
- )
206
- return await anthropic_llm.generate_response_async(**params)
207
-
208
-
File without changes
File without changes