praisonaiagents 0.0.57__py3-none-any.whl → 0.0.59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,7 +38,8 @@ class Task:
38
38
  loop_state: Optional[Dict[str, Union[str, int]]] = None,
39
39
  memory=None,
40
40
  quality_check=True,
41
- input_file: Optional[str] = None
41
+ input_file: Optional[str] = None,
42
+ rerun: bool = False # Renamed from can_rerun and logic inverted, default True for backward compatibility
42
43
  ):
43
44
  # Add check if memory config is provided
44
45
  if memory is not None or (config and config.get('memory_config')):
@@ -76,6 +77,7 @@ class Task:
76
77
  self.loop_state = loop_state if loop_state else {}
77
78
  self.memory = memory
78
79
  self.quality_check = quality_check
80
+ self.rerun = rerun # Assigning the rerun parameter
79
81
 
80
82
  # Set logger level based on config verbose level
81
83
  verbose = self.config.get("verbose", 0)
@@ -83,7 +85,7 @@ class Task:
83
85
  logger.setLevel(logging.INFO)
84
86
  else:
85
87
  logger.setLevel(logging.WARNING)
86
-
88
+
87
89
  # Also set third-party loggers to WARNING
88
90
  logging.getLogger('chromadb').setLevel(logging.WARNING)
89
91
  logging.getLogger('openai').setLevel(logging.WARNING)
@@ -148,7 +150,7 @@ class Task:
148
150
  logger.info(f"Task {self.id}: Initializing memory from config: {self.config['memory_config']}")
149
151
  self.memory = Memory(config=self.config['memory_config'])
150
152
  logger.info(f"Task {self.id}: Memory initialized successfully")
151
-
153
+
152
154
  # Verify database was created
153
155
  if os.path.exists(self.config['memory_config']['storage']['path']):
154
156
  logger.info(f"Task {self.id}: Memory database exists after initialization")
@@ -182,11 +184,11 @@ class Task:
182
184
  """Execute callback and store quality metrics if enabled"""
183
185
  logger.info(f"Task {self.id}: execute_callback called")
184
186
  logger.info(f"Quality check enabled: {self.quality_check}")
185
-
187
+
186
188
  # Initialize memory if not already initialized
187
189
  if not self.memory:
188
190
  self.memory = self.initialize_memory()
189
-
191
+
190
192
  logger.info(f"Memory object exists: {self.memory is not None}")
191
193
  if self.memory:
192
194
  logger.info(f"Memory config: {self.memory.cfg}")
@@ -202,24 +204,24 @@ class Task:
202
204
  except Exception as e:
203
205
  logger.error(f"Task {self.id}: Failed to store task output in memory: {e}")
204
206
  logger.exception(e)
205
-
207
+
206
208
  logger.info(f"Task output: {task_output.raw[:100]}...")
207
-
209
+
208
210
  if self.quality_check and self.memory:
209
211
  try:
210
212
  logger.info(f"Task {self.id}: Starting memory operations")
211
213
  logger.info(f"Task {self.id}: Calculating quality metrics for output: {task_output.raw[:100]}...")
212
-
214
+
213
215
  # Get quality metrics from LLM
214
216
  metrics = self.memory.calculate_quality_metrics(
215
217
  task_output.raw,
216
218
  self.expected_output
217
219
  )
218
220
  logger.info(f"Task {self.id}: Quality metrics calculated: {metrics}")
219
-
221
+
220
222
  quality_score = metrics.get("accuracy", 0.0)
221
223
  logger.info(f"Task {self.id}: Quality score: {quality_score}")
222
-
224
+
223
225
  # Store in both short and long-term memory with higher threshold
224
226
  logger.info(f"Task {self.id}: Finalizing task output in memory...")
225
227
  self.memory.finalize_task_output(
@@ -231,7 +233,7 @@ class Task:
231
233
  task_id=self.id
232
234
  )
233
235
  logger.info(f"Task {self.id}: Finalized task output in memory")
234
-
236
+
235
237
  # Store quality metrics separately
236
238
  logger.info(f"Task {self.id}: Storing quality metrics...")
237
239
  self.memory.store_quality(
@@ -240,7 +242,7 @@ class Task:
240
242
  task_id=self.id,
241
243
  metrics=metrics
242
244
  )
243
-
245
+
244
246
  # Store in both short and long-term memory with higher threshold
245
247
  self.memory.finalize_task_output(
246
248
  content=task_output.raw,
@@ -248,7 +250,7 @@ class Task:
248
250
  quality_score=quality_score,
249
251
  threshold=0.7 # Only high quality outputs in long-term memory
250
252
  )
251
-
253
+
252
254
  # Build context for next tasks
253
255
  if self.next_tasks:
254
256
  logger.info(f"Task {self.id}: Building context for next tasks...")
@@ -257,7 +259,7 @@ class Task:
257
259
  max_items=5
258
260
  )
259
261
  logger.info(f"Task {self.id}: Built context for next tasks: {len(context)} items")
260
-
262
+
261
263
  logger.info(f"Task {self.id}: Memory operations complete")
262
264
  except Exception as e:
263
265
  logger.error(f"Task {self.id}: Failed to process memory operations: {e}")
@@ -295,7 +297,7 @@ Expected Output: {self.expected_output}.
295
297
  context_results.append(
296
298
  f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet."
297
299
  )
298
-
300
+
299
301
  # Join unique context results
300
302
  unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates
301
303
  task_prompt += f"""
@@ -307,7 +309,7 @@ Context:
307
309
  def execute_callback_sync(self, task_output: TaskOutput) -> None:
308
310
  """
309
311
  Synchronous wrapper to ensure that execute_callback is awaited,
310
- preventing 'Task was destroyed but pending!' warnings if called
312
+ preventing 'Task was destroyed but pending!' warnings if called
311
313
  from non-async code.
312
314
  """
313
315
  import asyncio
@@ -89,7 +89,7 @@ class CSVTools:
89
89
  def write_csv(
90
90
  self,
91
91
  filepath: str,
92
- data: List[Dict[str, Any]],
92
+ data: Union[List[Dict[str, Any]], str],
93
93
  encoding: str = 'utf-8',
94
94
  delimiter: str = ',',
95
95
  index: bool = False,
@@ -102,35 +102,66 @@ class CSVTools:
102
102
 
103
103
  Args:
104
104
  filepath: Path to CSV file
105
- data: List of row dicts to write
106
- encoding: File encoding
107
- delimiter: Column delimiter
108
- index: Whether to write row indices
109
- header: Whether to write column headers
110
- float_format: Format string for float values
111
- date_format: Format string for date values
112
- mode: Write mode ('w' for write, 'a' for append)
113
-
105
+ data: Either a list of dictionaries or a string containing CSV data
106
+ If string, each line should be comma-separated values
107
+ encoding: File encoding (default: 'utf-8')
108
+ delimiter: Column delimiter (default: ',')
109
+ index: Whether to write row indices (default: False)
110
+ header: Whether to write column headers (default: True)
111
+ float_format: Format string for float values (default: None)
112
+ date_format: Format string for date values (default: None)
113
+ mode: Write mode - 'w' for write, 'a' for append (default: 'w')
114
+
114
115
  Returns:
115
- bool: Success status
116
+ bool: True if successful, False otherwise
116
117
  """
117
118
  try:
118
119
  pd = self._get_pandas()
119
120
  if pd is None:
120
121
  return False
121
122
 
122
- df = pd.DataFrame(data)
123
- df.to_csv(
124
- filepath,
125
- encoding=encoding,
126
- sep=delimiter,
127
- index=index,
128
- header=header,
129
- float_format=float_format,
130
- date_format=date_format,
131
- mode=mode
132
- )
133
- return True
123
+ # Handle string input
124
+ if isinstance(data, str):
125
+ # Convert string to list of dicts
126
+ rows = []
127
+ if delimiter in data:
128
+ # Get existing columns if file exists and in append mode
129
+ existing_cols = []
130
+ if mode == 'a' and Path(filepath).exists():
131
+ try:
132
+ existing_df = pd.read_csv(filepath, nrows=1)
133
+ existing_cols = existing_df.columns.tolist()
134
+ except:
135
+ pass
136
+
137
+ values = [v.strip() for v in data.split(delimiter)]
138
+
139
+ if existing_cols:
140
+ # Use existing column names
141
+ row_dict = dict(zip(existing_cols, values))
142
+ else:
143
+ # Create generic column names
144
+ row_dict = {f'col{i}': val for i, val in enumerate(values)}
145
+
146
+ rows.append(row_dict)
147
+ data = rows
148
+
149
+ df = pd.DataFrame(data)
150
+
151
+ # Handle append mode properly
152
+ write_header = header if mode == 'w' else (header and not Path(filepath).exists())
153
+
154
+ df.to_csv(
155
+ filepath,
156
+ encoding=encoding,
157
+ sep=delimiter,
158
+ index=index,
159
+ header=write_header,
160
+ float_format=float_format,
161
+ date_format=date_format,
162
+ mode=mode
163
+ )
164
+ return True
134
165
 
135
166
  except Exception as e:
136
167
  error_msg = f"Error writing CSV file {filepath}: {str(e)}"
@@ -21,7 +21,7 @@ class GenerateCOT:
21
21
  qa_pairs: Optional[Dict[str, str]] = None,
22
22
  model: str = "gpt-4o-mini",
23
23
  api_key: Optional[str] = None,
24
- max_attempts: int = 100
24
+ max_attempts: int = 3
25
25
  ):
26
26
  self.qa_pairs = qa_pairs or {}
27
27
  self.max_attempts = max_attempts
@@ -79,8 +79,10 @@ class GenerateCOT:
79
79
  def cot_improve(self, question: str, current: str) -> str:
80
80
  best_solution = current
81
81
  best_score = self._rate_solution(question, current)
82
+ attempts = 0
82
83
 
83
- for _ in range(self.max_attempts):
84
+ while attempts < self.max_attempts:
85
+ attempts += 1
84
86
  new_solution = self.cot_generate(question, current)
85
87
  new_score = self._rate_solution(question, new_solution)
86
88
 
@@ -88,7 +90,7 @@ class GenerateCOT:
88
90
  best_solution = new_solution
89
91
  best_score = new_score
90
92
 
91
- if best_score > 0.9:
93
+ if best_score > 0.8:
92
94
  break
93
95
 
94
96
  return best_solution
@@ -228,14 +230,16 @@ class GenerateCOT:
228
230
  "final_answer": current_solution
229
231
  }
230
232
  best_score = self._rate_solution(question, current_solution)
233
+ attempts = 0
231
234
 
232
- for _ in range(self.max_attempts):
235
+ while attempts < self.max_attempts:
236
+ attempts += 1
233
237
  new_solution = self.cot_generate_dict(question, current_solution)
234
238
  new_score = self._rate_solution(question, new_solution["thought_process"])
235
239
  if new_score > best_score:
236
240
  best_solution = new_solution
237
241
  best_score = new_score
238
- if best_score > 0.9:
242
+ if best_score > 0.8:
239
243
  break
240
244
  return best_solution
241
245
 
@@ -333,7 +337,10 @@ class GenerateCOT:
333
337
  Creates file with headers if it doesn't exist.
334
338
  """
335
339
  try:
336
- # Remove timestamp-based filename generation since we have default
340
+ # Add the current QA pair to self.qa_pairs
341
+ self.qa_pairs[question] = answer
342
+
343
+ # Generate solution
337
344
  solution = self.cot_run_dict(question)
338
345
 
339
346
  import csv
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.57
3
+ Version: 0.0.59
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -12,13 +12,13 @@ praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRW
12
12
  praisonaiagents/llm/llm.py,sha256=G2wKMwitWBJRS6nOq9W77zXtsxvJwsVwXFOKYcllY0E,51386
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
- praisonaiagents/process/process.py,sha256=gP3QQxxFO4oUw_HYLf8MoyWyaj_104LIL_AbwLiBxaU,31261
15
+ praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
16
16
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
17
- praisonaiagents/task/task.py,sha256=ikFjzNm4WPYONSLtWA3uDGNIUx_TvXTeU5SukWoC66E,14271
17
+ praisonaiagents/task/task.py,sha256=8KztSUKMO74y619TyI8d5DMJ1xPbyQXIB0Ux583oOVw,14259
18
18
  praisonaiagents/tools/__init__.py,sha256=CWOYV9SudYY82r45LnNgaVRV3cmsAFdasNRkPrLsgmI,9198
19
19
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
20
20
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
21
- praisonaiagents/tools/csv_tools.py,sha256=gX2nYz4ktmpKvXB36jx5-GqddntEQD4G2fVQWTIKrwU,8435
21
+ praisonaiagents/tools/csv_tools.py,sha256=4Yr0QYwBXt-1BDXGLalB2eSsFR2mB5rH3KdHmRBQY6E,10036
22
22
  praisonaiagents/tools/duckdb_tools.py,sha256=KB3b-1HcX7ocoxskDpk_7RRpTGHnH8hizIY0ZdLRbIE,8816
23
23
  praisonaiagents/tools/duckduckgo_tools.py,sha256=ynlB5ZyWfHYjUq0JZXH12TganqTihgD-2IyRgs32y84,1657
24
24
  praisonaiagents/tools/excel_tools.py,sha256=e2HqcwnyBueOyss0xEKxff3zB4w4sNWCOMXvZfbDYlE,11309
@@ -35,8 +35,8 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents/tools/train/data/generatecot.py,sha256=HA8HwbhGIavfALxMbKTdGwABP5S6qzuiPtmUiV-FTZI,17491
39
- praisonaiagents-0.0.57.dist-info/METADATA,sha256=ad3iyUlLBQqjpuTcbka6Z6MAX57RaJGRbkifyYEhz-w,830
40
- praisonaiagents-0.0.57.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- praisonaiagents-0.0.57.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
- praisonaiagents-0.0.57.dist-info/RECORD,,
38
+ praisonaiagents/tools/train/data/generatecot.py,sha256=k1gZHtgY1poVp5kajhgs4S9a4-epdA8NyZfYTa34lQU,17651
39
+ praisonaiagents-0.0.59.dist-info/METADATA,sha256=w6DYqKW5P9b2Rqu02j5Lt4-6K7f-InDrtUfV7fYu_FM,830
40
+ praisonaiagents-0.0.59.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
+ praisonaiagents-0.0.59.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
+ praisonaiagents-0.0.59.dist-info/RECORD,,