praisonaiagents 0.0.28__tar.gz → 0.0.30__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/PKG-INFO +4 -2
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/__init__.py +2 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/agent/agent.py +2 -2
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/agents/agents.py +169 -35
- praisonaiagents-0.0.30/praisonaiagents/memory/memory.py +931 -0
- praisonaiagents-0.0.30/praisonaiagents/task/task.py +222 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents.egg-info/PKG-INFO +4 -2
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents.egg-info/SOURCES.txt +1 -8
- praisonaiagents-0.0.30/praisonaiagents.egg-info/requires.txt +6 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/pyproject.toml +6 -1
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -1
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -4
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -350
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -4
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -318
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/main.py +0 -112
- praisonaiagents-0.0.28/praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -48
- praisonaiagents-0.0.28/praisonaiagents/task/__init__.py +0 -4
- praisonaiagents-0.0.28/praisonaiagents/task/task.py +0 -74
- praisonaiagents-0.0.28/praisonaiagents.egg-info/requires.txt +0 -3
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.28/praisonaiagents/build/lib → praisonaiagents-0.0.30}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.28 → praisonaiagents-0.0.30}/setup.cfg +0 -0
@@ -1,8 +1,10 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.2
|
2
2
|
Name: praisonaiagents
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.30
|
4
4
|
Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
|
5
5
|
Author: Mervin Praison
|
6
6
|
Requires-Dist: pydantic
|
7
7
|
Requires-Dist: rich
|
8
8
|
Requires-Dist: openai
|
9
|
+
Provides-Extra: memory
|
10
|
+
Requires-Dist: chromadb>=0.6.0; extra == "memory"
|
@@ -7,6 +7,7 @@ from .agents.agents import PraisonAIAgents
|
|
7
7
|
from .task.task import Task
|
8
8
|
from .tools.tools import Tools
|
9
9
|
from .agents.autoagents import AutoAgents
|
10
|
+
from .memory.memory import Memory
|
10
11
|
from .main import (
|
11
12
|
TaskOutput,
|
12
13
|
ReflectionOutput,
|
@@ -35,6 +36,7 @@ __all__ = [
|
|
35
36
|
'TaskOutput',
|
36
37
|
'ReflectionOutput',
|
37
38
|
'AutoAgents',
|
39
|
+
'Memory',
|
38
40
|
'display_interaction',
|
39
41
|
'display_self_reflection',
|
40
42
|
'display_instruction',
|
@@ -162,7 +162,7 @@ class Agent:
|
|
162
162
|
max_iter: int = 20,
|
163
163
|
max_rpm: Optional[int] = None,
|
164
164
|
max_execution_time: Optional[int] = None,
|
165
|
-
memory:
|
165
|
+
memory: Optional[Any] = None,
|
166
166
|
verbose: bool = True,
|
167
167
|
allow_delegation: bool = False,
|
168
168
|
step_callback: Optional[Any] = None,
|
@@ -178,7 +178,7 @@ class Agent:
|
|
178
178
|
knowledge_sources: Optional[List[Any]] = None,
|
179
179
|
use_system_prompt: Optional[bool] = True,
|
180
180
|
markdown: bool = True,
|
181
|
-
self_reflect:
|
181
|
+
self_reflect: bool = False,
|
182
182
|
max_reflect: int = 3,
|
183
183
|
min_reflect: int = 1,
|
184
184
|
reflect_llm: Optional[str] = None
|
@@ -13,6 +13,9 @@ from ..task.task import Task
|
|
13
13
|
from ..process.process import Process, LoopItems
|
14
14
|
import asyncio
|
15
15
|
|
16
|
+
# Set up logger
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
16
19
|
def encode_file_to_base64(file_path: str) -> str:
|
17
20
|
"""Base64-encode a file."""
|
18
21
|
import base64
|
@@ -41,7 +44,7 @@ def process_video(video_path: str, seconds_per_frame=2):
|
|
41
44
|
return base64_frames
|
42
45
|
|
43
46
|
class PraisonAIAgents:
|
44
|
-
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
|
47
|
+
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None):
|
45
48
|
if not agents:
|
46
49
|
raise ValueError("At least one agent must be provided")
|
47
50
|
|
@@ -57,8 +60,21 @@ class PraisonAIAgents:
|
|
57
60
|
|
58
61
|
# Check for manager_llm in environment variable if not provided
|
59
62
|
self.manager_llm = manager_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
63
|
+
|
64
|
+
# Set logger level based on verbose
|
65
|
+
if verbose >= 5:
|
66
|
+
logger.setLevel(logging.INFO)
|
67
|
+
else:
|
68
|
+
logger.setLevel(logging.WARNING)
|
69
|
+
|
70
|
+
# Also set third-party loggers to WARNING
|
71
|
+
logging.getLogger('chromadb').setLevel(logging.WARNING)
|
72
|
+
logging.getLogger('openai').setLevel(logging.WARNING)
|
73
|
+
logging.getLogger('httpx').setLevel(logging.WARNING)
|
74
|
+
logging.getLogger('httpcore').setLevel(logging.WARNING)
|
75
|
+
|
60
76
|
if self.verbose:
|
61
|
-
|
77
|
+
logger.info(f"Using model {self.manager_llm} for manager")
|
62
78
|
|
63
79
|
# If no tasks provided, generate them from agents
|
64
80
|
if tasks is None:
|
@@ -66,12 +82,12 @@ class PraisonAIAgents:
|
|
66
82
|
for agent in self.agents:
|
67
83
|
task = agent.generate_task()
|
68
84
|
tasks.append(task)
|
69
|
-
|
85
|
+
logger.info(f"Auto-generated {len(tasks)} tasks from agents")
|
70
86
|
else:
|
71
87
|
# Validate tasks for backward compatibility
|
72
88
|
if not tasks:
|
73
89
|
raise ValueError("If tasks are provided, at least one task must be present")
|
74
|
-
|
90
|
+
logger.info(f"Using {len(tasks)} provided tasks")
|
75
91
|
|
76
92
|
# Add tasks and set their status
|
77
93
|
for task in tasks:
|
@@ -87,9 +103,65 @@ class PraisonAIAgents:
|
|
87
103
|
if tasks[i + 1].context is None:
|
88
104
|
tasks[i + 1].context = []
|
89
105
|
tasks[i + 1].context.append(tasks[i])
|
90
|
-
|
106
|
+
logger.info("Set up sequential flow with automatic context passing")
|
91
107
|
|
92
108
|
self._state = {} # Add state storage at PraisonAIAgents level
|
109
|
+
|
110
|
+
# Initialize memory system
|
111
|
+
self.shared_memory = None
|
112
|
+
if memory:
|
113
|
+
try:
|
114
|
+
from ..memory.memory import Memory
|
115
|
+
|
116
|
+
# Get memory config from parameter or first task
|
117
|
+
mem_cfg = memory_config
|
118
|
+
if not mem_cfg:
|
119
|
+
mem_cfg = next((t.config.get('memory_config') for t in tasks if hasattr(t, 'config') and t.config), None)
|
120
|
+
|
121
|
+
# Set default memory config if none provided
|
122
|
+
if not mem_cfg:
|
123
|
+
mem_cfg = {
|
124
|
+
"provider": "rag",
|
125
|
+
"use_embedding": True,
|
126
|
+
"storage": {
|
127
|
+
"type": "sqlite",
|
128
|
+
"path": "./.praison/memory.db"
|
129
|
+
},
|
130
|
+
"rag_db_path": "./.praison/chroma_db"
|
131
|
+
}
|
132
|
+
|
133
|
+
# Add embedder config if provided
|
134
|
+
if embedder:
|
135
|
+
if isinstance(embedder, dict):
|
136
|
+
mem_cfg = mem_cfg or {}
|
137
|
+
mem_cfg["embedder"] = embedder
|
138
|
+
else:
|
139
|
+
# Handle direct embedder function
|
140
|
+
mem_cfg = mem_cfg or {}
|
141
|
+
mem_cfg["embedder_function"] = embedder
|
142
|
+
|
143
|
+
if mem_cfg:
|
144
|
+
# Pass verbose level to Memory
|
145
|
+
self.shared_memory = Memory(config=mem_cfg, verbose=verbose)
|
146
|
+
if verbose >= 5:
|
147
|
+
logger.info("Initialized shared memory for PraisonAIAgents")
|
148
|
+
|
149
|
+
# Distribute memory to tasks
|
150
|
+
for task in tasks:
|
151
|
+
if not task.memory:
|
152
|
+
task.memory = self.shared_memory
|
153
|
+
if verbose >= 5:
|
154
|
+
logger.info(f"Assigned shared memory to task {task.id}")
|
155
|
+
|
156
|
+
except Exception as e:
|
157
|
+
logger.error(f"Failed to initialize shared memory: {e}")
|
158
|
+
|
159
|
+
# Update tasks with shared memory
|
160
|
+
if self.shared_memory:
|
161
|
+
for task in tasks:
|
162
|
+
if not task.memory:
|
163
|
+
task.memory = self.shared_memory
|
164
|
+
logger.info(f"Assigned shared memory to task {task.id}")
|
93
165
|
|
94
166
|
def add_task(self, task):
|
95
167
|
task_id = self.task_id_counter
|
@@ -142,7 +214,7 @@ class PraisonAIAgents:
|
|
142
214
|
task_prompt = f"""
|
143
215
|
You need to do the following task: {task.description}.
|
144
216
|
Expected Output: {task.expected_output}.
|
145
|
-
|
217
|
+
"""
|
146
218
|
if task.context:
|
147
219
|
context_results = ""
|
148
220
|
for context_task in task.context:
|
@@ -157,8 +229,8 @@ Here are the results of previous tasks that might be useful:\n
|
|
157
229
|
task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
|
158
230
|
|
159
231
|
if self.verbose >= 2:
|
160
|
-
|
161
|
-
|
232
|
+
logger.info(f"Executing task {task_id}: {task.description} using {executor_agent.name}")
|
233
|
+
logger.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")
|
162
234
|
|
163
235
|
if task.images:
|
164
236
|
def _get_multimodal_message(text_prompt, images):
|
@@ -223,8 +295,8 @@ Here are the results of previous tasks that might be useful:\n
|
|
223
295
|
task_output.json_dict = parsed
|
224
296
|
task_output.output_format = "JSON"
|
225
297
|
except:
|
226
|
-
|
227
|
-
|
298
|
+
logger.warning(f"Warning: Could not parse output of task {task_id} as JSON")
|
299
|
+
logger.debug(f"Output that failed JSON parsing: {agent_output}")
|
228
300
|
|
229
301
|
if task.output_pydantic:
|
230
302
|
cleaned = self.clean_json_output(agent_output)
|
@@ -234,8 +306,8 @@ Here are the results of previous tasks that might be useful:\n
|
|
234
306
|
task_output.pydantic = pyd_obj
|
235
307
|
task_output.output_format = "Pydantic"
|
236
308
|
except:
|
237
|
-
|
238
|
-
|
309
|
+
logger.warning(f"Warning: Could not parse output of task {task_id} as Pydantic Model")
|
310
|
+
logger.debug(f"Output that failed Pydantic parsing: {agent_output}")
|
239
311
|
|
240
312
|
task.result = task_output
|
241
313
|
return task_output
|
@@ -250,37 +322,53 @@ Here are the results of previous tasks that might be useful:\n
|
|
250
322
|
return
|
251
323
|
task = self.tasks[task_id]
|
252
324
|
if task.status == "completed":
|
253
|
-
|
325
|
+
logger.info(f"Task with ID {task_id} is already completed")
|
254
326
|
return
|
255
327
|
|
256
328
|
retries = 0
|
257
329
|
while task.status != "completed" and retries < self.max_retries:
|
258
|
-
|
330
|
+
logger.debug(f"Attempt {retries+1} for task {task_id}")
|
259
331
|
if task.status in ["not started", "in progress"]:
|
260
332
|
task_output = await self.aexecute_task(task_id)
|
261
333
|
if task_output and self.completion_checker(task, task_output.raw):
|
262
334
|
task.status = "completed"
|
263
|
-
|
335
|
+
# Run execute_callback for memory operations
|
336
|
+
try:
|
264
337
|
await task.execute_callback(task_output)
|
338
|
+
except Exception as e:
|
339
|
+
logger.error(f"Error executing memory callback for task {task_id}: {e}")
|
340
|
+
logger.exception(e)
|
341
|
+
|
342
|
+
# Run task callback if exists
|
343
|
+
if task.callback:
|
344
|
+
try:
|
345
|
+
if asyncio.iscoroutinefunction(task.callback):
|
346
|
+
await task.callback(task_output)
|
347
|
+
else:
|
348
|
+
task.callback(task_output)
|
349
|
+
except Exception as e:
|
350
|
+
logger.error(f"Error executing task callback for task {task_id}: {e}")
|
351
|
+
logger.exception(e)
|
352
|
+
|
265
353
|
self.save_output_to_file(task, task_output)
|
266
354
|
if self.verbose >= 1:
|
267
|
-
|
355
|
+
logger.info(f"Task {task_id} completed successfully.")
|
268
356
|
else:
|
269
357
|
task.status = "in progress"
|
270
358
|
if self.verbose >= 1:
|
271
|
-
|
359
|
+
logger.info(f"Task {task_id} not completed, retrying")
|
272
360
|
await asyncio.sleep(1)
|
273
361
|
retries += 1
|
274
362
|
else:
|
275
363
|
if task.status == "failed":
|
276
|
-
|
364
|
+
logger.info("Task is failed, resetting to in-progress for another try...")
|
277
365
|
task.status = "in progress"
|
278
366
|
else:
|
279
|
-
|
367
|
+
logger.info("Invalid Task status")
|
280
368
|
break
|
281
369
|
|
282
370
|
if retries == self.max_retries and task.status != "completed":
|
283
|
-
|
371
|
+
logger.info(f"Task {task_id} failed after {self.max_retries} retries.")
|
284
372
|
|
285
373
|
async def arun_all_tasks(self):
|
286
374
|
"""Async version of run_all_tasks method"""
|
@@ -328,7 +416,7 @@ Here are the results of previous tasks that might be useful:\n
|
|
328
416
|
with open(task.output_file, "w") as f:
|
329
417
|
f.write(str(task_output))
|
330
418
|
if self.verbose >= 1:
|
331
|
-
|
419
|
+
logger.info(f"Task output saved to {task.output_file}")
|
332
420
|
except Exception as e:
|
333
421
|
display_error(f"Error saving task output to file: {e}")
|
334
422
|
|
@@ -339,6 +427,15 @@ Here are the results of previous tasks that might be useful:\n
|
|
339
427
|
return
|
340
428
|
task = self.tasks[task_id]
|
341
429
|
|
430
|
+
logger.info(f"Starting execution of task {task_id}")
|
431
|
+
logger.info(f"Task config: {task.config}")
|
432
|
+
|
433
|
+
# Initialize memory before task execution
|
434
|
+
if not task.memory:
|
435
|
+
task.memory = task.initialize_memory()
|
436
|
+
|
437
|
+
logger.info(f"Task memory status: {'Initialized' if task.memory else 'Not initialized'}")
|
438
|
+
|
342
439
|
# Only import multimodal dependencies if task has images
|
343
440
|
if task.images and task.status == "not started":
|
344
441
|
try:
|
@@ -371,11 +468,20 @@ Expected Output: {task.expected_output}.
|
|
371
468
|
Here are the results of previous tasks that might be useful:\n
|
372
469
|
{context_results}
|
373
470
|
"""
|
471
|
+
# Add memory context if available
|
472
|
+
if task.memory:
|
473
|
+
try:
|
474
|
+
memory_context = task.memory.build_context_for_task(task.description)
|
475
|
+
if memory_context:
|
476
|
+
task_prompt += f"\n\nRelevant memory context:\n{memory_context}"
|
477
|
+
except Exception as e:
|
478
|
+
logger.error(f"Error getting memory context: {e}")
|
479
|
+
|
374
480
|
task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
|
375
481
|
|
376
482
|
if self.verbose >= 2:
|
377
|
-
|
378
|
-
|
483
|
+
logger.info(f"Executing task {task_id}: {task.description} using {executor_agent.name}")
|
484
|
+
logger.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")
|
379
485
|
|
380
486
|
if task.images:
|
381
487
|
def _get_multimodal_message(text_prompt, images):
|
@@ -425,6 +531,17 @@ Here are the results of previous tasks that might be useful:\n
|
|
425
531
|
)
|
426
532
|
|
427
533
|
if agent_output:
|
534
|
+
# Store the response in memory
|
535
|
+
if task.memory:
|
536
|
+
try:
|
537
|
+
task.store_in_memory(
|
538
|
+
content=agent_output,
|
539
|
+
agent_name=executor_agent.name,
|
540
|
+
task_id=task_id
|
541
|
+
)
|
542
|
+
except Exception as e:
|
543
|
+
logger.error(f"Failed to store agent output in memory: {e}")
|
544
|
+
|
428
545
|
task_output = TaskOutput(
|
429
546
|
description=task.description,
|
430
547
|
summary=task.description[:10],
|
@@ -440,8 +557,8 @@ Here are the results of previous tasks that might be useful:\n
|
|
440
557
|
task_output.json_dict = parsed
|
441
558
|
task_output.output_format = "JSON"
|
442
559
|
except:
|
443
|
-
|
444
|
-
|
560
|
+
logger.warning(f"Warning: Could not parse output of task {task_id} as JSON")
|
561
|
+
logger.debug(f"Output that failed JSON parsing: {agent_output}")
|
445
562
|
|
446
563
|
if task.output_pydantic:
|
447
564
|
cleaned = self.clean_json_output(agent_output)
|
@@ -451,8 +568,8 @@ Here are the results of previous tasks that might be useful:\n
|
|
451
568
|
task_output.pydantic = pyd_obj
|
452
569
|
task_output.output_format = "Pydantic"
|
453
570
|
except:
|
454
|
-
|
455
|
-
|
571
|
+
logger.warning(f"Warning: Could not parse output of task {task_id} as Pydantic Model")
|
572
|
+
logger.debug(f"Output that failed Pydantic parsing: {agent_output}")
|
456
573
|
|
457
574
|
task.result = task_output
|
458
575
|
return task_output
|
@@ -467,37 +584,54 @@ Here are the results of previous tasks that might be useful:\n
|
|
467
584
|
return
|
468
585
|
task = self.tasks[task_id]
|
469
586
|
if task.status == "completed":
|
470
|
-
|
587
|
+
logger.info(f"Task with ID {task_id} is already completed")
|
471
588
|
return
|
472
589
|
|
473
590
|
retries = 0
|
474
591
|
while task.status != "completed" and retries < self.max_retries:
|
475
|
-
|
592
|
+
logger.debug(f"Attempt {retries+1} for task {task_id}")
|
476
593
|
if task.status in ["not started", "in progress"]:
|
477
594
|
task_output = self.execute_task(task_id)
|
478
595
|
if task_output and self.completion_checker(task, task_output.raw):
|
479
596
|
task.status = "completed"
|
597
|
+
# Run execute_callback for memory operations
|
598
|
+
try:
|
599
|
+
loop = asyncio.get_event_loop()
|
600
|
+
loop.run_until_complete(task.execute_callback(task_output))
|
601
|
+
except Exception as e:
|
602
|
+
logger.error(f"Error executing memory callback for task {task_id}: {e}")
|
603
|
+
logger.exception(e)
|
604
|
+
|
605
|
+
# Run task callback if exists
|
480
606
|
if task.callback:
|
481
|
-
|
607
|
+
try:
|
608
|
+
if asyncio.iscoroutinefunction(task.callback):
|
609
|
+
loop.run_until_complete(task.callback(task_output))
|
610
|
+
else:
|
611
|
+
task.callback(task_output)
|
612
|
+
except Exception as e:
|
613
|
+
logger.error(f"Error executing task callback for task {task_id}: {e}")
|
614
|
+
logger.exception(e)
|
615
|
+
|
482
616
|
self.save_output_to_file(task, task_output)
|
483
617
|
if self.verbose >= 1:
|
484
|
-
|
618
|
+
logger.info(f"Task {task_id} completed successfully.")
|
485
619
|
else:
|
486
620
|
task.status = "in progress"
|
487
621
|
if self.verbose >= 1:
|
488
|
-
|
622
|
+
logger.info(f"Task {task_id} not completed, retrying")
|
489
623
|
time.sleep(1)
|
490
624
|
retries += 1
|
491
625
|
else:
|
492
626
|
if task.status == "failed":
|
493
|
-
|
627
|
+
logger.info("Task is failed, resetting to in-progress for another try...")
|
494
628
|
task.status = "in progress"
|
495
629
|
else:
|
496
|
-
|
630
|
+
logger.info("Invalid Task status")
|
497
631
|
break
|
498
632
|
|
499
633
|
if retries == self.max_retries and task.status != "completed":
|
500
|
-
|
634
|
+
logger.info(f"Task {task_id} failed after {self.max_retries} retries.")
|
501
635
|
|
502
636
|
def run_all_tasks(self):
|
503
637
|
"""Synchronous version of run_all_tasks method"""
|