praisonaiagents 0.0.58__py3-none-any.whl → 0.0.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,7 +38,8 @@ class Task:
38
38
  loop_state: Optional[Dict[str, Union[str, int]]] = None,
39
39
  memory=None,
40
40
  quality_check=True,
41
- input_file: Optional[str] = None
41
+ input_file: Optional[str] = None,
42
+ rerun: bool = False # Renamed from can_rerun and logic inverted, default True for backward compatibility
42
43
  ):
43
44
  # Add check if memory config is provided
44
45
  if memory is not None or (config and config.get('memory_config')):
@@ -76,6 +77,7 @@ class Task:
76
77
  self.loop_state = loop_state if loop_state else {}
77
78
  self.memory = memory
78
79
  self.quality_check = quality_check
80
+ self.rerun = rerun # Assigning the rerun parameter
79
81
 
80
82
  # Set logger level based on config verbose level
81
83
  verbose = self.config.get("verbose", 0)
@@ -83,7 +85,7 @@ class Task:
83
85
  logger.setLevel(logging.INFO)
84
86
  else:
85
87
  logger.setLevel(logging.WARNING)
86
-
88
+
87
89
  # Also set third-party loggers to WARNING
88
90
  logging.getLogger('chromadb').setLevel(logging.WARNING)
89
91
  logging.getLogger('openai').setLevel(logging.WARNING)
@@ -148,7 +150,7 @@ class Task:
148
150
  logger.info(f"Task {self.id}: Initializing memory from config: {self.config['memory_config']}")
149
151
  self.memory = Memory(config=self.config['memory_config'])
150
152
  logger.info(f"Task {self.id}: Memory initialized successfully")
151
-
153
+
152
154
  # Verify database was created
153
155
  if os.path.exists(self.config['memory_config']['storage']['path']):
154
156
  logger.info(f"Task {self.id}: Memory database exists after initialization")
@@ -182,11 +184,11 @@ class Task:
182
184
  """Execute callback and store quality metrics if enabled"""
183
185
  logger.info(f"Task {self.id}: execute_callback called")
184
186
  logger.info(f"Quality check enabled: {self.quality_check}")
185
-
187
+
186
188
  # Initialize memory if not already initialized
187
189
  if not self.memory:
188
190
  self.memory = self.initialize_memory()
189
-
191
+
190
192
  logger.info(f"Memory object exists: {self.memory is not None}")
191
193
  if self.memory:
192
194
  logger.info(f"Memory config: {self.memory.cfg}")
@@ -202,24 +204,24 @@ class Task:
202
204
  except Exception as e:
203
205
  logger.error(f"Task {self.id}: Failed to store task output in memory: {e}")
204
206
  logger.exception(e)
205
-
207
+
206
208
  logger.info(f"Task output: {task_output.raw[:100]}...")
207
-
209
+
208
210
  if self.quality_check and self.memory:
209
211
  try:
210
212
  logger.info(f"Task {self.id}: Starting memory operations")
211
213
  logger.info(f"Task {self.id}: Calculating quality metrics for output: {task_output.raw[:100]}...")
212
-
214
+
213
215
  # Get quality metrics from LLM
214
216
  metrics = self.memory.calculate_quality_metrics(
215
217
  task_output.raw,
216
218
  self.expected_output
217
219
  )
218
220
  logger.info(f"Task {self.id}: Quality metrics calculated: {metrics}")
219
-
221
+
220
222
  quality_score = metrics.get("accuracy", 0.0)
221
223
  logger.info(f"Task {self.id}: Quality score: {quality_score}")
222
-
224
+
223
225
  # Store in both short and long-term memory with higher threshold
224
226
  logger.info(f"Task {self.id}: Finalizing task output in memory...")
225
227
  self.memory.finalize_task_output(
@@ -231,7 +233,7 @@ class Task:
231
233
  task_id=self.id
232
234
  )
233
235
  logger.info(f"Task {self.id}: Finalized task output in memory")
234
-
236
+
235
237
  # Store quality metrics separately
236
238
  logger.info(f"Task {self.id}: Storing quality metrics...")
237
239
  self.memory.store_quality(
@@ -240,7 +242,7 @@ class Task:
240
242
  task_id=self.id,
241
243
  metrics=metrics
242
244
  )
243
-
245
+
244
246
  # Store in both short and long-term memory with higher threshold
245
247
  self.memory.finalize_task_output(
246
248
  content=task_output.raw,
@@ -248,7 +250,7 @@ class Task:
248
250
  quality_score=quality_score,
249
251
  threshold=0.7 # Only high quality outputs in long-term memory
250
252
  )
251
-
253
+
252
254
  # Build context for next tasks
253
255
  if self.next_tasks:
254
256
  logger.info(f"Task {self.id}: Building context for next tasks...")
@@ -257,7 +259,7 @@ class Task:
257
259
  max_items=5
258
260
  )
259
261
  logger.info(f"Task {self.id}: Built context for next tasks: {len(context)} items")
260
-
262
+
261
263
  logger.info(f"Task {self.id}: Memory operations complete")
262
264
  except Exception as e:
263
265
  logger.error(f"Task {self.id}: Failed to process memory operations: {e}")
@@ -295,7 +297,7 @@ Expected Output: {self.expected_output}.
295
297
  context_results.append(
296
298
  f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet."
297
299
  )
298
-
300
+
299
301
  # Join unique context results
300
302
  unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates
301
303
  task_prompt += f"""
@@ -307,7 +309,7 @@ Context:
307
309
  def execute_callback_sync(self, task_output: TaskOutput) -> None:
308
310
  """
309
311
  Synchronous wrapper to ensure that execute_callback is awaited,
310
- preventing 'Task was destroyed but pending!' warnings if called
312
+ preventing 'Task was destroyed but pending!' warnings if called
311
313
  from non-async code.
312
314
  """
313
315
  import asyncio
@@ -3,6 +3,16 @@ import json
3
3
  from datetime import datetime
4
4
  from openai import OpenAI
5
5
  from pydantic import BaseModel
6
+ import os
7
+ import logging
8
+
9
+ # Setup logging based on environment variable
10
+ log_level = os.getenv('LOGLEVEL', 'INFO').upper()
11
+ logging.basicConfig(
12
+ level=log_level,
13
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
14
+ )
15
+ logger = logging.getLogger(__name__)
6
16
 
7
17
  # Lazy loader for LLM
8
18
  def get_llm():
@@ -21,16 +31,24 @@ class GenerateCOT:
21
31
  qa_pairs: Optional[Dict[str, str]] = None,
22
32
  model: str = "gpt-4o-mini",
23
33
  api_key: Optional[str] = None,
24
- max_attempts: int = 3
34
+ max_attempts: int = 3,
35
+ verbose: bool = True,
36
+ temperature: float = 0.5
25
37
  ):
26
38
  self.qa_pairs = qa_pairs or {}
27
39
  self.max_attempts = max_attempts
28
40
  self.solutions = {}
29
41
  self.llm = get_llm()(model=model) # Get LLM class and instantiate
30
42
  self.model = model
31
-
43
+ self.temperature = temperature
44
+ self.verbose = verbose
45
+ logger.debug(f"Initialized GenerateCOT with model={model}, max_attempts={max_attempts}")
46
+
32
47
  def _ask_ai(self, prompt: str) -> str:
33
- return self.llm.get_response(prompt, temperature=0.7)
48
+ logger.debug(f"Sending prompt to LLM: {prompt[:100]}...")
49
+ response = self.llm.response(prompt, temperature=self.temperature)
50
+ logger.debug(f"Received response: {response[:100]}...")
51
+ return response
34
52
 
35
53
  def _build_solution_prompt(self, question: str, context: str) -> str:
36
54
  return f"""
@@ -245,23 +263,33 @@ class GenerateCOT:
245
263
 
246
264
  def cot_run_dict(self, question: str) -> dict:
247
265
  """Uses the dictionary-based solution approach, storing the final solution in self.solutions."""
266
+ logger.debug(f"Starting cot_run_dict for question: {question}")
267
+
248
268
  solution = self.cot_generate_dict(question)
269
+ logger.debug(f"Initial solution generated: {str(solution)[:100]}...")
270
+
249
271
  if self.cot_check(question, solution["final_answer"]):
272
+ logger.debug("Initial solution passed verification")
250
273
  self.solutions[question] = solution
251
274
  return solution
252
275
 
276
+ logger.debug("Initial solution failed verification, attempting improvement")
253
277
  improved = self.cot_improve_dict(question, solution["thought_process"])
254
278
  if self.cot_check(question, improved["final_answer"]):
279
+ logger.debug("Improved solution passed verification")
255
280
  self.solutions[question] = improved
256
281
  return improved
257
282
 
283
+ logger.debug("Checking for errors in improved solution")
258
284
  error_pos = self.cot_find_error(question, improved["thought_process"])
259
285
  if error_pos != -1:
286
+ logger.debug(f"Found error at position {error_pos}, generating final solution")
260
287
  partial_solution = '. '.join(improved["thought_process"].split('. ')[:error_pos]) + '.'
261
288
  final = self.cot_generate_dict(question, partial_solution)
262
289
  self.solutions[question] = final
263
290
  return final
264
291
 
292
+ logger.debug("Using improved solution as final result")
265
293
  self.solutions[question] = improved
266
294
  return improved
267
295
 
@@ -332,29 +360,30 @@ class GenerateCOT:
332
360
  answer: str,
333
361
  filepath: str = 'dataset.csv'
334
362
  ) -> Optional[str]:
335
- """
336
- Save a single question-answer pair with chain of thought to CSV file.
337
- Creates file with headers if it doesn't exist.
338
- """
363
+ """Save a single question-answer pair with chain of thought to CSV file."""
364
+ logger.debug(f"Saving QA pair to {filepath}")
339
365
  try:
340
- # Add the current QA pair to self.qa_pairs
341
366
  self.qa_pairs[question] = answer
367
+ logger.debug("Added QA pair to internal dictionary")
342
368
 
343
- # Generate solution
344
369
  solution = self.cot_run_dict(question)
370
+ logger.debug("Generated solution for question")
345
371
 
346
372
  import csv
347
373
  import os
348
374
  file_exists = os.path.exists(filepath)
375
+ logger.debug(f"File exists: {file_exists}")
349
376
 
350
377
  with open(filepath, 'a', newline='', encoding='utf-8') as f:
351
378
  writer = csv.writer(f)
352
379
  if not file_exists:
380
+ logger.debug("Creating new file with headers")
353
381
  writer.writerow(['instruction', 'input', 'output'])
354
382
  writer.writerow([question, '', solution.get("thought_process", "")])
383
+ logger.debug("Wrote solution to file")
355
384
  return filepath
356
385
  except Exception as e:
357
- print(f"Error appending to CSV: {e}")
386
+ logger.error(f"Error saving to CSV: {str(e)}")
358
387
  return None
359
388
 
360
389
  # Rename existing function to indicate it handles qa_pairs dictionary
@@ -393,33 +422,37 @@ class GenerateCOT:
393
422
  private: bool = False
394
423
  ) -> str:
395
424
  """Upload generated solutions to HuggingFace datasets."""
425
+ logger.debug(f"Attempting to upload {filepath} to HuggingFace as {dataset_name}")
396
426
  try:
397
427
  from datasets import Dataset
398
428
  from huggingface_hub import HfApi, login
399
429
  import pandas as pd
400
430
 
401
- # Determine file type and load data
431
+ logger.debug(f"Loading data from {filepath}")
402
432
  if filepath.endswith('.csv'):
403
433
  data = pd.read_csv(filepath)
434
+ logger.debug(f"Loaded CSV with {len(data)} rows")
404
435
  elif filepath.endswith('.json'):
405
436
  data = pd.read_json(filepath)
437
+ logger.debug(f"Loaded JSON with {len(data)} records")
406
438
  else:
407
439
  raise ValueError("Only CSV and JSON files are supported")
408
440
 
409
- # Convert to HuggingFace dataset
441
+ logger.debug("Converting to HuggingFace dataset")
410
442
  dataset = Dataset.from_pandas(data)
411
443
 
412
- # Upload to HuggingFace
413
444
  repo_id = f"{huggingface_username}/{dataset_name}"
445
+ logger.debug(f"Pushing to hub: {repo_id}")
414
446
  dataset.push_to_hub(
415
447
  repo_id,
416
448
  private=private
417
449
  )
418
450
 
451
+ logger.debug("Upload completed successfully")
419
452
  return f"Dataset uploaded successfully to {repo_id}"
420
453
 
421
454
  except Exception as e:
422
- print(f"Error uploading to HuggingFace: {e}")
455
+ logger.error(f"Error uploading to HuggingFace: {str(e)}")
423
456
  return None
424
457
 
425
458
  # Usage example:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.58
3
+ Version: 0.0.60
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -9,12 +9,12 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=G2wKMwitWBJRS6nOq9W77zXtsxvJwsVwXFOKYcllY0E,51386
12
+ praisonaiagents/llm/llm.py,sha256=t47T80kL2QEUSAGfNYHQG130d-CRUDxXi0AwStW0zkk,58156
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
- praisonaiagents/process/process.py,sha256=BrS8_4Gt2ewXt559hThJTSrXVYG8daabO9tGDaWmrm0,44906
15
+ praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
16
16
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
17
- praisonaiagents/task/task.py,sha256=ikFjzNm4WPYONSLtWA3uDGNIUx_TvXTeU5SukWoC66E,14271
17
+ praisonaiagents/task/task.py,sha256=8KztSUKMO74y619TyI8d5DMJ1xPbyQXIB0Ux583oOVw,14259
18
18
  praisonaiagents/tools/__init__.py,sha256=CWOYV9SudYY82r45LnNgaVRV3cmsAFdasNRkPrLsgmI,9198
19
19
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
20
20
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
@@ -35,8 +35,8 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents/tools/train/data/generatecot.py,sha256=k1gZHtgY1poVp5kajhgs4S9a4-epdA8NyZfYTa34lQU,17651
39
- praisonaiagents-0.0.58.dist-info/METADATA,sha256=N_DIe_TPq4gXuySMSwVSPpsSarqo7h-_fTAIQaRFGT0,830
40
- praisonaiagents-0.0.58.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- praisonaiagents-0.0.58.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
- praisonaiagents-0.0.58.dist-info/RECORD,,
38
+ praisonaiagents/tools/train/data/generatecot.py,sha256=EwrF6p-AWQQJktJeZu2O52ipbHGPd5y1IEmTLw-YSCs,19479
39
+ praisonaiagents-0.0.60.dist-info/METADATA,sha256=I7mcUMdieRsRLzjdMO2Qkzt6dEY8Tm2X1XJIkydeIAg,830
40
+ praisonaiagents-0.0.60.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
+ praisonaiagents-0.0.60.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
+ praisonaiagents-0.0.60.dist-info/RECORD,,