praisonaiagents 0.0.59__py3-none-any.whl → 0.0.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1040,4 +1040,174 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1040
1040
  if type(event) in event_types:
1041
1041
  litellm._async_success_callback.remove(event)
1042
1042
 
1043
- litellm.callbacks = events
1043
+ litellm.callbacks = events
1044
+
1045
+ # Response without tool calls
1046
+ def response(
1047
+ self,
1048
+ prompt: Union[str, List[Dict]],
1049
+ system_prompt: Optional[str] = None,
1050
+ temperature: float = 0.2,
1051
+ stream: bool = True,
1052
+ verbose: bool = True,
1053
+ markdown: bool = True,
1054
+ console: Optional[Console] = None,
1055
+ **kwargs
1056
+ ) -> str:
1057
+ """Simple function to get model response without tool calls or complex features"""
1058
+ try:
1059
+ import litellm
1060
+ import logging
1061
+ logger = logging.getLogger(__name__)
1062
+
1063
+ litellm.set_verbose = False
1064
+ start_time = time.time()
1065
+
1066
+ logger.debug("Using synchronous response function")
1067
+
1068
+ # Build messages list
1069
+ messages = []
1070
+ if system_prompt:
1071
+ messages.append({"role": "system", "content": system_prompt})
1072
+
1073
+ # Add prompt to messages
1074
+ if isinstance(prompt, list):
1075
+ messages.append({"role": "user", "content": prompt})
1076
+ else:
1077
+ messages.append({"role": "user", "content": prompt})
1078
+
1079
+ # Get response from LiteLLM
1080
+ if stream:
1081
+ response_text = ""
1082
+ if verbose:
1083
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1084
+ for chunk in litellm.completion(
1085
+ model=self.model,
1086
+ messages=messages,
1087
+ temperature=temperature,
1088
+ stream=True,
1089
+ **kwargs
1090
+ ):
1091
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1092
+ content = chunk.choices[0].delta.content
1093
+ response_text += content
1094
+ live.update(display_generating(response_text, start_time))
1095
+ else:
1096
+ for chunk in litellm.completion(
1097
+ model=self.model,
1098
+ messages=messages,
1099
+ temperature=temperature,
1100
+ stream=True,
1101
+ **kwargs
1102
+ ):
1103
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1104
+ response_text += chunk.choices[0].delta.content
1105
+ else:
1106
+ response = litellm.completion(
1107
+ model=self.model,
1108
+ messages=messages,
1109
+ temperature=temperature,
1110
+ stream=False,
1111
+ **kwargs
1112
+ )
1113
+ response_text = response.choices[0].message.content.strip()
1114
+
1115
+ if verbose:
1116
+ display_interaction(
1117
+ prompt if isinstance(prompt, str) else prompt[0].get("text", ""),
1118
+ response_text,
1119
+ markdown=markdown,
1120
+ generation_time=time.time() - start_time,
1121
+ console=console or self.console
1122
+ )
1123
+
1124
+ return response_text.strip()
1125
+
1126
+ except Exception as error:
1127
+ display_error(f"Error in response: {str(error)}")
1128
+ raise
1129
+
1130
+ # Async version of response function. Response without tool calls
1131
+ async def response_async(
1132
+ self,
1133
+ prompt: Union[str, List[Dict]],
1134
+ system_prompt: Optional[str] = None,
1135
+ temperature: float = 0.2,
1136
+ stream: bool = True,
1137
+ verbose: bool = True,
1138
+ markdown: bool = True,
1139
+ console: Optional[Console] = None,
1140
+ **kwargs
1141
+ ) -> str:
1142
+ """Async version of response function"""
1143
+ try:
1144
+ import litellm
1145
+ import logging
1146
+ logger = logging.getLogger(__name__)
1147
+
1148
+ litellm.set_verbose = False
1149
+ start_time = time.time()
1150
+
1151
+ logger.debug("Using asynchronous response function")
1152
+
1153
+ # Build messages list
1154
+ messages = []
1155
+ if system_prompt:
1156
+ messages.append({"role": "system", "content": system_prompt})
1157
+
1158
+ # Add prompt to messages
1159
+ if isinstance(prompt, list):
1160
+ messages.append({"role": "user", "content": prompt})
1161
+ else:
1162
+ messages.append({"role": "user", "content": prompt})
1163
+
1164
+ # Get response from LiteLLM
1165
+ if stream:
1166
+ response_text = ""
1167
+ if verbose:
1168
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1169
+ async for chunk in await litellm.acompletion(
1170
+ model=self.model,
1171
+ messages=messages,
1172
+ temperature=temperature,
1173
+ stream=True,
1174
+ **kwargs
1175
+ ):
1176
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1177
+ content = chunk.choices[0].delta.content
1178
+ response_text += content
1179
+ live.update(display_generating(response_text, start_time))
1180
+ else:
1181
+ async for chunk in await litellm.acompletion(
1182
+ model=self.model,
1183
+ messages=messages,
1184
+ temperature=temperature,
1185
+ stream=True,
1186
+ **kwargs
1187
+ ):
1188
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1189
+ response_text += chunk.choices[0].delta.content
1190
+ else:
1191
+ response = await litellm.acompletion(
1192
+ model=self.model,
1193
+ messages=messages,
1194
+ temperature=temperature,
1195
+ stream=False,
1196
+ **kwargs
1197
+ )
1198
+ response_text = response.choices[0].message.content.strip()
1199
+
1200
+ if verbose:
1201
+ display_interaction(
1202
+ prompt if isinstance(prompt, str) else prompt[0].get("text", ""),
1203
+ response_text,
1204
+ markdown=markdown,
1205
+ generation_time=time.time() - start_time,
1206
+ console=console or self.console
1207
+ )
1208
+
1209
+ return response_text.strip()
1210
+
1211
+ except Exception as error:
1212
+ display_error(f"Error in response_async: {str(error)}")
1213
+ raise
@@ -3,6 +3,16 @@ import json
3
3
  from datetime import datetime
4
4
  from openai import OpenAI
5
5
  from pydantic import BaseModel
6
+ import os
7
+ import logging
8
+
9
+ # Setup logging based on environment variable
10
+ log_level = os.getenv('LOGLEVEL', 'INFO').upper()
11
+ logging.basicConfig(
12
+ level=log_level,
13
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
14
+ )
15
+ logger = logging.getLogger(__name__)
6
16
 
7
17
  # Lazy loader for LLM
8
18
  def get_llm():
@@ -21,16 +31,24 @@ class GenerateCOT:
21
31
  qa_pairs: Optional[Dict[str, str]] = None,
22
32
  model: str = "gpt-4o-mini",
23
33
  api_key: Optional[str] = None,
24
- max_attempts: int = 3
34
+ max_attempts: int = 3,
35
+ verbose: bool = True,
36
+ temperature: float = 0.5
25
37
  ):
26
38
  self.qa_pairs = qa_pairs or {}
27
39
  self.max_attempts = max_attempts
28
40
  self.solutions = {}
29
41
  self.llm = get_llm()(model=model) # Get LLM class and instantiate
30
42
  self.model = model
31
-
43
+ self.temperature = temperature
44
+ self.verbose = verbose
45
+ logger.debug(f"Initialized GenerateCOT with model={model}, max_attempts={max_attempts}")
46
+
32
47
  def _ask_ai(self, prompt: str) -> str:
33
- return self.llm.get_response(prompt, temperature=0.7)
48
+ logger.debug(f"Sending prompt to LLM: {prompt[:100]}...")
49
+ response = self.llm.response(prompt, temperature=self.temperature)
50
+ logger.debug(f"Received response: {response[:100]}...")
51
+ return response
34
52
 
35
53
  def _build_solution_prompt(self, question: str, context: str) -> str:
36
54
  return f"""
@@ -245,23 +263,33 @@ class GenerateCOT:
245
263
 
246
264
  def cot_run_dict(self, question: str) -> dict:
247
265
  """Uses the dictionary-based solution approach, storing the final solution in self.solutions."""
266
+ logger.debug(f"Starting cot_run_dict for question: {question}")
267
+
248
268
  solution = self.cot_generate_dict(question)
269
+ logger.debug(f"Initial solution generated: {str(solution)[:100]}...")
270
+
249
271
  if self.cot_check(question, solution["final_answer"]):
272
+ logger.debug("Initial solution passed verification")
250
273
  self.solutions[question] = solution
251
274
  return solution
252
275
 
276
+ logger.debug("Initial solution failed verification, attempting improvement")
253
277
  improved = self.cot_improve_dict(question, solution["thought_process"])
254
278
  if self.cot_check(question, improved["final_answer"]):
279
+ logger.debug("Improved solution passed verification")
255
280
  self.solutions[question] = improved
256
281
  return improved
257
282
 
283
+ logger.debug("Checking for errors in improved solution")
258
284
  error_pos = self.cot_find_error(question, improved["thought_process"])
259
285
  if error_pos != -1:
286
+ logger.debug(f"Found error at position {error_pos}, generating final solution")
260
287
  partial_solution = '. '.join(improved["thought_process"].split('. ')[:error_pos]) + '.'
261
288
  final = self.cot_generate_dict(question, partial_solution)
262
289
  self.solutions[question] = final
263
290
  return final
264
291
 
292
+ logger.debug("Using improved solution as final result")
265
293
  self.solutions[question] = improved
266
294
  return improved
267
295
 
@@ -332,29 +360,30 @@ class GenerateCOT:
332
360
  answer: str,
333
361
  filepath: str = 'dataset.csv'
334
362
  ) -> Optional[str]:
335
- """
336
- Save a single question-answer pair with chain of thought to CSV file.
337
- Creates file with headers if it doesn't exist.
338
- """
363
+ """Save a single question-answer pair with chain of thought to CSV file."""
364
+ logger.debug(f"Saving QA pair to {filepath}")
339
365
  try:
340
- # Add the current QA pair to self.qa_pairs
341
366
  self.qa_pairs[question] = answer
367
+ logger.debug("Added QA pair to internal dictionary")
342
368
 
343
- # Generate solution
344
369
  solution = self.cot_run_dict(question)
370
+ logger.debug("Generated solution for question")
345
371
 
346
372
  import csv
347
373
  import os
348
374
  file_exists = os.path.exists(filepath)
375
+ logger.debug(f"File exists: {file_exists}")
349
376
 
350
377
  with open(filepath, 'a', newline='', encoding='utf-8') as f:
351
378
  writer = csv.writer(f)
352
379
  if not file_exists:
380
+ logger.debug("Creating new file with headers")
353
381
  writer.writerow(['instruction', 'input', 'output'])
354
382
  writer.writerow([question, '', solution.get("thought_process", "")])
383
+ logger.debug("Wrote solution to file")
355
384
  return filepath
356
385
  except Exception as e:
357
- print(f"Error appending to CSV: {e}")
386
+ logger.error(f"Error saving to CSV: {str(e)}")
358
387
  return None
359
388
 
360
389
  # Rename existing function to indicate it handles qa_pairs dictionary
@@ -393,33 +422,37 @@ class GenerateCOT:
393
422
  private: bool = False
394
423
  ) -> str:
395
424
  """Upload generated solutions to HuggingFace datasets."""
425
+ logger.debug(f"Attempting to upload {filepath} to HuggingFace as {dataset_name}")
396
426
  try:
397
427
  from datasets import Dataset
398
428
  from huggingface_hub import HfApi, login
399
429
  import pandas as pd
400
430
 
401
- # Determine file type and load data
431
+ logger.debug(f"Loading data from {filepath}")
402
432
  if filepath.endswith('.csv'):
403
433
  data = pd.read_csv(filepath)
434
+ logger.debug(f"Loaded CSV with {len(data)} rows")
404
435
  elif filepath.endswith('.json'):
405
436
  data = pd.read_json(filepath)
437
+ logger.debug(f"Loaded JSON with {len(data)} records")
406
438
  else:
407
439
  raise ValueError("Only CSV and JSON files are supported")
408
440
 
409
- # Convert to HuggingFace dataset
441
+ logger.debug("Converting to HuggingFace dataset")
410
442
  dataset = Dataset.from_pandas(data)
411
443
 
412
- # Upload to HuggingFace
413
444
  repo_id = f"{huggingface_username}/{dataset_name}"
445
+ logger.debug(f"Pushing to hub: {repo_id}")
414
446
  dataset.push_to_hub(
415
447
  repo_id,
416
448
  private=private
417
449
  )
418
450
 
451
+ logger.debug("Upload completed successfully")
419
452
  return f"Dataset uploaded successfully to {repo_id}"
420
453
 
421
454
  except Exception as e:
422
- print(f"Error uploading to HuggingFace: {e}")
455
+ logger.error(f"Error uploading to HuggingFace: {str(e)}")
423
456
  return None
424
457
 
425
458
  # Usage example:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.59
3
+ Version: 0.0.60
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -9,7 +9,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=G2wKMwitWBJRS6nOq9W77zXtsxvJwsVwXFOKYcllY0E,51386
12
+ praisonaiagents/llm/llm.py,sha256=t47T80kL2QEUSAGfNYHQG130d-CRUDxXi0AwStW0zkk,58156
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
15
  praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
@@ -35,8 +35,8 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents/tools/train/data/generatecot.py,sha256=k1gZHtgY1poVp5kajhgs4S9a4-epdA8NyZfYTa34lQU,17651
39
- praisonaiagents-0.0.59.dist-info/METADATA,sha256=w6DYqKW5P9b2Rqu02j5Lt4-6K7f-InDrtUfV7fYu_FM,830
40
- praisonaiagents-0.0.59.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- praisonaiagents-0.0.59.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
- praisonaiagents-0.0.59.dist-info/RECORD,,
38
+ praisonaiagents/tools/train/data/generatecot.py,sha256=EwrF6p-AWQQJktJeZu2O52ipbHGPd5y1IEmTLw-YSCs,19479
39
+ praisonaiagents-0.0.60.dist-info/METADATA,sha256=I7mcUMdieRsRLzjdMO2Qkzt6dEY8Tm2X1XJIkydeIAg,830
40
+ praisonaiagents-0.0.60.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
+ praisonaiagents-0.0.60.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
+ praisonaiagents-0.0.60.dist-info/RECORD,,