praisonaiagents 0.0.19__py3-none-any.whl → 0.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,6 +5,7 @@ Praison AI Agents - A package for hierarchical AI agent task execution
5
5
  from .agent.agent import Agent
6
6
  from .agents.agents import PraisonAIAgents
7
7
  from .task.task import Task
8
+ from .tools.tools import Tools
8
9
  from .main import (
9
10
  TaskOutput,
10
11
  ReflectionOutput,
@@ -17,12 +18,18 @@ from .main import (
17
18
  clean_triple_backticks,
18
19
  error_logs,
19
20
  register_display_callback,
20
- display_callbacks,
21
+ sync_display_callbacks,
22
+ async_display_callbacks,
21
23
  )
22
24
 
25
+ # Add Agents as an alias for PraisonAIAgents
26
+ Agents = PraisonAIAgents
27
+
23
28
  __all__ = [
24
29
  'Agent',
25
30
  'PraisonAIAgents',
31
+ 'Agents',
32
+ 'Tools',
26
33
  'Task',
27
34
  'TaskOutput',
28
35
  'ReflectionOutput',
@@ -35,5 +42,6 @@ __all__ = [
35
42
  'clean_triple_backticks',
36
43
  'error_logs',
37
44
  'register_display_callback',
38
- 'display_callbacks',
45
+ 'sync_display_callbacks',
46
+ 'async_display_callbacks',
39
47
  ]
@@ -3,7 +3,7 @@ import time
3
3
  import json
4
4
  import logging
5
5
  import asyncio
6
- from typing import List, Optional, Any, Dict, Union, Literal
6
+ from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING
7
7
  from rich.console import Console
8
8
  from rich.live import Live
9
9
  from openai import AsyncOpenAI
@@ -19,6 +19,9 @@ from ..main import (
19
19
  error_logs
20
20
  )
21
21
 
22
+ if TYPE_CHECKING:
23
+ from ..task.task import Task
24
+
22
25
  class Agent:
23
26
  def _generate_tool_definition(self, function_name):
24
27
  """
@@ -132,10 +135,11 @@ class Agent:
132
135
 
133
136
  def __init__(
134
137
  self,
135
- name: str,
136
- role: str,
137
- goal: str,
138
- backstory: str,
138
+ name: Optional[str] = None,
139
+ role: Optional[str] = None,
140
+ goal: Optional[str] = None,
141
+ backstory: Optional[str] = None,
142
+ instructions: Optional[str] = None,
139
143
  llm: Optional[Union[str, Any]] = "gpt-4o",
140
144
  tools: Optional[List[Any]] = None,
141
145
  function_calling_llm: Optional[Any] = None,
@@ -158,15 +162,33 @@ class Agent:
158
162
  knowledge_sources: Optional[List[Any]] = None,
159
163
  use_system_prompt: Optional[bool] = True,
160
164
  markdown: bool = True,
161
- self_reflect: bool = True,
165
+ self_reflect: Optional[bool] = None,
162
166
  max_reflect: int = 3,
163
167
  min_reflect: int = 1,
164
168
  reflect_llm: Optional[str] = None
165
169
  ):
166
- self.name = name
167
- self.role = role
168
- self.goal = goal
169
- self.backstory = backstory
170
+ # Handle backward compatibility for required fields
171
+ if all(x is None for x in [name, role, goal, backstory, instructions]):
172
+ raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
173
+
174
+ # If instructions are provided, use them to set role, goal, and backstory
175
+ if instructions:
176
+ self.name = name or "Agent"
177
+ self.role = role or "Assistant"
178
+ self.goal = goal or instructions
179
+ self.backstory = backstory or instructions
180
+ # Set self_reflect to False by default for instruction-based agents
181
+ self.self_reflect = False if self_reflect is None else self_reflect
182
+ else:
183
+ # Use provided values or defaults
184
+ self.name = name or "Agent"
185
+ self.role = role or "Assistant"
186
+ self.goal = goal or "Help the user with their tasks"
187
+ self.backstory = backstory or "I am an AI assistant"
188
+ # Default to True for traditional agents if not specified
189
+ self.self_reflect = True if self_reflect is None else self_reflect
190
+
191
+ self.instructions = instructions
170
192
  self.llm = llm
171
193
  self.tools = tools if tools else [] # Store original tools
172
194
  self.function_calling_llm = function_calling_llm
@@ -190,7 +212,6 @@ class Agent:
190
212
  self.use_system_prompt = use_system_prompt
191
213
  self.chat_history = []
192
214
  self.markdown = markdown
193
- self.self_reflect = self_reflect
194
215
  self.max_reflect = max_reflect
195
216
  self.min_reflect = min_reflect
196
217
  self.reflect_llm = reflect_llm
@@ -202,6 +223,21 @@ Your Role: {self.role}\n
202
223
  Your Goal: {self.goal}
203
224
  """
204
225
 
226
+ def generate_task(self) -> 'Task':
227
+ """Generate a Task object from the agent's instructions"""
228
+ from ..task.task import Task
229
+
230
+ description = self.instructions if self.instructions else f"Execute task as {self.role} with goal: {self.goal}"
231
+ expected_output = "Complete the assigned task successfully"
232
+
233
+ return Task(
234
+ name=self.name,
235
+ description=description,
236
+ expected_output=expected_output,
237
+ agent=self,
238
+ tools=self.tools
239
+ )
240
+
205
241
  def execute_tool(self, function_name, arguments):
206
242
  """
207
243
  Execute a tool dynamically based on the function name and arguments.
@@ -41,7 +41,10 @@ def process_video(video_path: str, seconds_per_frame=2):
41
41
  return base64_frames
42
42
 
43
43
  class PraisonAIAgents:
44
- def __init__(self, agents, tasks, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
44
+ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
45
+ if not agents:
46
+ raise ValueError("At least one agent must be provided")
47
+
45
48
  self.agents = agents
46
49
  self.tasks = {}
47
50
  if max_retries < 3:
@@ -54,9 +57,36 @@ class PraisonAIAgents:
54
57
  if not manager_llm:
55
58
  logging.debug("No manager_llm provided. Using OPENAI_MODEL_NAME environment variable or defaulting to 'gpt-4o'")
56
59
  self.manager_llm = manager_llm if manager_llm else os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
60
+
61
+ # If no tasks provided, generate them from agents
62
+ if tasks is None:
63
+ tasks = []
64
+ for agent in self.agents:
65
+ task = agent.generate_task()
66
+ tasks.append(task)
67
+ logging.info(f"Auto-generated {len(tasks)} tasks from agents")
68
+ else:
69
+ # Validate tasks for backward compatibility
70
+ if not tasks:
71
+ raise ValueError("If tasks are provided, at least one task must be present")
72
+ logging.info(f"Using {len(tasks)} provided tasks")
73
+
74
+ # Add tasks and set their status
57
75
  for task in tasks:
58
76
  self.add_task(task)
59
77
  task.status = "not started"
78
+
79
+ # If tasks were auto-generated from agents or process is sequential, set up sequential flow
80
+ if len(tasks) > 1 and (process == "sequential" or all(task.next_tasks == [] for task in tasks)):
81
+ for i in range(len(tasks) - 1):
82
+ # Set up next task relationship
83
+ tasks[i].next_tasks = [tasks[i + 1].name]
84
+ # Set up context for the next task to include the current task
85
+ if tasks[i + 1].context is None:
86
+ tasks[i + 1].context = []
87
+ tasks[i + 1].context.append(tasks[i])
88
+ logging.info("Set up sequential flow with automatic context passing")
89
+
60
90
  self._state = {} # Add state storage at PraisonAIAgents level
61
91
 
62
92
  def add_task(self, task):
@@ -119,9 +149,9 @@ Expected Output: {task.expected_output}.
119
149
  else:
120
150
  context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
121
151
  task_prompt += f"""
122
- Here are the results of previous tasks that might be useful:\n
123
- {context_results}
124
- """
152
+ Here are the results of previous tasks that might be useful:\n
153
+ {context_results}
154
+ """
125
155
  task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
126
156
 
127
157
  if self.verbose >= 2:
@@ -320,7 +350,7 @@ Expected Output: {task.expected_output}.
320
350
  task_prompt = f"""
321
351
  You need to do the following task: {task.description}.
322
352
  Expected Output: {task.expected_output}.
323
- """
353
+ """
324
354
  if task.context:
325
355
  context_results = ""
326
356
  for context_task in task.context:
@@ -329,9 +359,9 @@ Expected Output: {task.expected_output}.
329
359
  else:
330
360
  context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
331
361
  task_prompt += f"""
332
- Here are the results of previous tasks that might be useful:\n
333
- {context_results}
334
- """
362
+ Here are the results of previous tasks that might be useful:\n
363
+ {context_results}
364
+ """
335
365
  task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
336
366
 
337
367
  if self.verbose >= 2:
praisonaiagents/main.py CHANGED
@@ -26,12 +26,49 @@ logging.basicConfig(
26
26
  # Global list to store error logs
27
27
  error_logs = []
28
28
 
29
- # Global callback registry
30
- display_callbacks = {}
29
+ # Separate registries for sync and async callbacks
30
+ sync_display_callbacks = {}
31
+ async_display_callbacks = {}
32
+
33
+ # At the top of the file, add display_callbacks to __all__
34
+ __all__ = [
35
+ 'error_logs',
36
+ 'register_display_callback',
37
+ 'sync_display_callbacks',
38
+ 'async_display_callbacks',
39
+ # ... other exports
40
+ ]
41
+
42
+ def register_display_callback(display_type: str, callback_fn, is_async: bool = False):
43
+ """Register a synchronous or asynchronous callback function for a specific display type.
44
+
45
+ Args:
46
+ display_type (str): Type of display event ('interaction', 'self_reflection', etc.)
47
+ callback_fn: The callback function to register
48
+ is_async (bool): Whether the callback is asynchronous
49
+ """
50
+ if is_async:
51
+ async_display_callbacks[display_type] = callback_fn
52
+ else:
53
+ sync_display_callbacks[display_type] = callback_fn
31
54
 
32
- def register_display_callback(display_type: str, callback_fn):
33
- """Register a callback function for a specific display type."""
34
- display_callbacks[display_type] = callback_fn
55
+ async def execute_callback(display_type: str, **kwargs):
56
+ """Execute both sync and async callbacks for a given display type.
57
+
58
+ Args:
59
+ display_type (str): Type of display event
60
+ **kwargs: Arguments to pass to the callback functions
61
+ """
62
+ # Execute synchronous callback if registered
63
+ if display_type in sync_display_callbacks:
64
+ callback = sync_display_callbacks[display_type]
65
+ loop = asyncio.get_event_loop()
66
+ await loop.run_in_executor(None, lambda: callback(**kwargs))
67
+
68
+ # Execute asynchronous callback if registered
69
+ if display_type in async_display_callbacks:
70
+ callback = async_display_callbacks[display_type]
71
+ await callback(**kwargs)
35
72
 
36
73
  def _clean_display_content(content: str, max_length: int = 20000) -> str:
37
74
  """Helper function to clean and truncate content for display."""
@@ -54,11 +91,10 @@ def _clean_display_content(content: str, max_length: int = 20000) -> str:
54
91
  return content.strip()
55
92
 
56
93
  def display_interaction(message, response, markdown=True, generation_time=None, console=None):
57
- """Display the interaction between user and assistant."""
94
+ """Synchronous version of display_interaction."""
58
95
  if console is None:
59
96
  console = Console()
60
97
 
61
- # Handle multimodal content (list)
62
98
  if isinstance(message, list):
63
99
  text_content = next((item["text"] for item in message if item["type"] == "text"), "")
64
100
  message = text_content
@@ -66,16 +102,16 @@ def display_interaction(message, response, markdown=True, generation_time=None,
66
102
  message = _clean_display_content(str(message))
67
103
  response = _clean_display_content(str(response))
68
104
 
69
- # Execute callback if registered
70
- if 'interaction' in display_callbacks:
71
- display_callbacks['interaction'](
105
+ # Execute synchronous callback if registered
106
+ if 'interaction' in sync_display_callbacks:
107
+ sync_display_callbacks['interaction'](
72
108
  message=message,
73
109
  response=response,
74
110
  markdown=markdown,
75
111
  generation_time=generation_time
76
112
  )
77
113
 
78
- # Existing display logic...
114
+ # Rest of the display logic...
79
115
  if generation_time:
80
116
  console.print(Text(f"Response generated in {generation_time:.1f}s", style="dim"))
81
117
 
@@ -94,8 +130,8 @@ def display_self_reflection(message: str, console=None):
94
130
  message = _clean_display_content(str(message))
95
131
 
96
132
  # Execute callback if registered
97
- if 'self_reflection' in display_callbacks:
98
- display_callbacks['self_reflection'](message=message)
133
+ if 'self_reflection' in sync_display_callbacks:
134
+ sync_display_callbacks['self_reflection'](message=message)
99
135
 
100
136
  console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
101
137
 
@@ -107,8 +143,8 @@ def display_instruction(message: str, console=None):
107
143
  message = _clean_display_content(str(message))
108
144
 
109
145
  # Execute callback if registered
110
- if 'instruction' in display_callbacks:
111
- display_callbacks['instruction'](message=message)
146
+ if 'instruction' in sync_display_callbacks:
147
+ sync_display_callbacks['instruction'](message=message)
112
148
 
113
149
  console.print(Panel.fit(Text(message, style="bold blue"), title="Instruction", border_style="cyan"))
114
150
 
@@ -120,8 +156,8 @@ def display_tool_call(message: str, console=None):
120
156
  message = _clean_display_content(str(message))
121
157
 
122
158
  # Execute callback if registered
123
- if 'tool_call' in display_callbacks:
124
- display_callbacks['tool_call'](message=message)
159
+ if 'tool_call' in sync_display_callbacks:
160
+ sync_display_callbacks['tool_call'](message=message)
125
161
 
126
162
  console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
127
163
 
@@ -133,8 +169,8 @@ def display_error(message: str, console=None):
133
169
  message = _clean_display_content(str(message))
134
170
 
135
171
  # Execute callback if registered
136
- if 'error' in display_callbacks:
137
- display_callbacks['error'](message=message)
172
+ if 'error' in sync_display_callbacks:
173
+ sync_display_callbacks['error'](message=message)
138
174
 
139
175
  console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
140
176
  error_logs.append(message)
@@ -151,8 +187,8 @@ def display_generating(content: str = "", start_time: Optional[float] = None):
151
187
  content = _clean_display_content(str(content))
152
188
 
153
189
  # Execute callback if registered
154
- if 'generating' in display_callbacks:
155
- display_callbacks['generating'](
190
+ if 'generating' in sync_display_callbacks:
191
+ sync_display_callbacks['generating'](
156
192
  content=content,
157
193
  elapsed_time=elapsed_str.strip() if elapsed_str else None
158
194
  )
@@ -172,26 +208,16 @@ async def adisplay_interaction(message, response, markdown=True, generation_time
172
208
  message = _clean_display_content(str(message))
173
209
  response = _clean_display_content(str(response))
174
210
 
175
- if 'interaction' in display_callbacks:
176
- callback = display_callbacks['interaction']
177
- if asyncio.iscoroutinefunction(callback):
178
- await callback(
179
- message=message,
180
- response=response,
181
- markdown=markdown,
182
- generation_time=generation_time
183
- )
184
- else:
185
- loop = asyncio.get_event_loop()
186
- await loop.run_in_executor(
187
- None,
188
- callback,
189
- message,
190
- response,
191
- markdown,
192
- generation_time
193
- )
211
+ # Execute callbacks
212
+ await execute_callback(
213
+ 'interaction',
214
+ message=message,
215
+ response=response,
216
+ markdown=markdown,
217
+ generation_time=generation_time
218
+ )
194
219
 
220
+ # Rest of the display logic...
195
221
  if generation_time:
196
222
  console.print(Text(f"Response generated in {generation_time:.1f}s", style="dim"))
197
223
 
@@ -210,13 +236,8 @@ async def adisplay_self_reflection(message: str, console=None):
210
236
  console = Console()
211
237
  message = _clean_display_content(str(message))
212
238
 
213
- if 'self_reflection' in display_callbacks:
214
- callback = display_callbacks['self_reflection']
215
- if asyncio.iscoroutinefunction(callback):
216
- await callback(message=message)
217
- else:
218
- loop = asyncio.get_event_loop()
219
- await loop.run_in_executor(None, callback, message)
239
+ if 'self_reflection' in async_display_callbacks:
240
+ await async_display_callbacks['self_reflection'](message=message)
220
241
 
221
242
  console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
222
243
 
@@ -228,13 +249,8 @@ async def adisplay_instruction(message: str, console=None):
228
249
  console = Console()
229
250
  message = _clean_display_content(str(message))
230
251
 
231
- if 'instruction' in display_callbacks:
232
- callback = display_callbacks['instruction']
233
- if asyncio.iscoroutinefunction(callback):
234
- await callback(message=message)
235
- else:
236
- loop = asyncio.get_event_loop()
237
- await loop.run_in_executor(None, callback, message)
252
+ if 'instruction' in async_display_callbacks:
253
+ await async_display_callbacks['instruction'](message=message)
238
254
 
239
255
  console.print(Panel.fit(Text(message, style="bold blue"), title="Instruction", border_style="cyan"))
240
256
 
@@ -246,13 +262,8 @@ async def adisplay_tool_call(message: str, console=None):
246
262
  console = Console()
247
263
  message = _clean_display_content(str(message))
248
264
 
249
- if 'tool_call' in display_callbacks:
250
- callback = display_callbacks['tool_call']
251
- if asyncio.iscoroutinefunction(callback):
252
- await callback(message=message)
253
- else:
254
- loop = asyncio.get_event_loop()
255
- await loop.run_in_executor(None, callback, message)
265
+ if 'tool_call' in async_display_callbacks:
266
+ await async_display_callbacks['tool_call'](message=message)
256
267
 
257
268
  console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
258
269
 
@@ -264,13 +275,8 @@ async def adisplay_error(message: str, console=None):
264
275
  console = Console()
265
276
  message = _clean_display_content(str(message))
266
277
 
267
- if 'error' in display_callbacks:
268
- callback = display_callbacks['error']
269
- if asyncio.iscoroutinefunction(callback):
270
- await callback(message=message)
271
- else:
272
- loop = asyncio.get_event_loop()
273
- await loop.run_in_executor(None, callback, message)
278
+ if 'error' in async_display_callbacks:
279
+ await async_display_callbacks['error'](message=message)
274
280
 
275
281
  console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
276
282
  error_logs.append(message)
@@ -287,21 +293,11 @@ async def adisplay_generating(content: str = "", start_time: Optional[float] = N
287
293
 
288
294
  content = _clean_display_content(str(content))
289
295
 
290
- if 'generating' in display_callbacks:
291
- callback = display_callbacks['generating']
292
- if asyncio.iscoroutinefunction(callback):
293
- await callback(
294
- content=content,
295
- elapsed_time=elapsed_str.strip() if elapsed_str else None
296
- )
297
- else:
298
- loop = asyncio.get_event_loop()
299
- await loop.run_in_executor(
300
- None,
301
- callback,
302
- content,
303
- elapsed_str.strip() if elapsed_str else None
304
- )
296
+ if 'generating' in async_display_callbacks:
297
+ await async_display_callbacks['generating'](
298
+ content=content,
299
+ elapsed_time=elapsed_str.strip() if elapsed_str else None
300
+ )
305
301
 
306
302
  return Panel(Markdown(content), title=f"Generating...{elapsed_str}", border_style="green")
307
303
 
@@ -0,0 +1,4 @@
1
+ """Tools package for PraisonAI Agents"""
2
+ from .tools import Tools
3
+
4
+ __all__ = ['Tools']
@@ -0,0 +1,40 @@
1
+ """Tools module for PraisonAI Agents"""
2
+ from typing import List, Dict
3
+ import logging
4
+ import importlib
5
+
6
+ class Tools:
7
+ @staticmethod
8
+ def internet_search(query: str) -> List[Dict]:
9
+ """
10
+ Perform a search using DuckDuckGo.
11
+
12
+ Args:
13
+ query (str): The search query.
14
+
15
+ Returns:
16
+ list: A list of search result titles, URLs, and snippets.
17
+ """
18
+ # Check if duckduckgo_search is installed
19
+ if importlib.util.find_spec("duckduckgo_search") is None:
20
+ error_msg = "DuckDuckGo search is not available. Please install duckduckgo_search package using: pip install duckduckgo_search"
21
+ logging.error(error_msg)
22
+ return [{"error": error_msg}]
23
+
24
+ try:
25
+ # Import only when needed
26
+ from duckduckgo_search import DDGS
27
+ results = []
28
+ ddgs = DDGS()
29
+ for result in ddgs.text(keywords=query, max_results=5):
30
+ results.append({
31
+ "title": result.get("title", ""),
32
+ "url": result.get("href", ""),
33
+ "snippet": result.get("body", "")
34
+ })
35
+ return results
36
+
37
+ except Exception as e:
38
+ error_msg = f"Error during DuckDuckGo search: {e}"
39
+ logging.error(error_msg)
40
+ return [{"error": error_msg}]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.19
3
+ Version: 0.0.21
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,9 +1,9 @@
1
- praisonaiagents/__init__.py,sha256=bXQwi56S1iZOXs_TyXs3doxVxAeRAyqKVyZEIoBOipM,853
2
- praisonaiagents/main.py,sha256=XtrOfVDK4KEaclOTBxUXpc_q9F44Udue6u1W8wMvSrg,12529
1
+ praisonaiagents/__init__.py,sha256=xJLN8i6V9SRmJFMxSRWDQt_hBePoupVd3WanNIgbBbc,1052
2
+ praisonaiagents/main.py,sha256=7Phfe0gdxHzbhPb3WRzBTfq9CaLq0K31M5DM_4oCiCQ,12451
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=_UmUWGbZjd3tApPX2T6RPB5Pll3Gos97XBhhg_zmfn8,30662
4
+ praisonaiagents/agent/agent.py,sha256=NjoFCM1d2IWEgAGNcc_g0OGMLWUebkba_BgQtjM1tT4,32419
5
5
  praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
6
- praisonaiagents/agents/agents.py,sha256=P8JJ1849-djMDkMuP0kNhPwtg97L8gO60jYXzXFcPc0,21762
6
+ praisonaiagents/agents/agents.py,sha256=EnrX-nvIsRyi2Xv1fBpZZ1kCaY9CvrcNqxgL_GNqaQc,23089
7
7
  praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
8
8
  praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
9
9
  praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
@@ -16,7 +16,9 @@ praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0
16
16
  praisonaiagents/process/process.py,sha256=4qXdrCDQPH5MtvHvdJVURXKNgSl6ae3OYTiqAF_A2ZU,24295
17
17
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
18
18
  praisonaiagents/task/task.py,sha256=UiiWgLDOdX_w0opP8h8-u-leVZlq1CkpGUmf7L2qyJs,3110
19
- praisonaiagents-0.0.19.dist-info/METADATA,sha256=engdE6dLJ0SxfZv28CX2MKTZuYc04I_CdIWBjOdijXA,233
20
- praisonaiagents-0.0.19.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
21
- praisonaiagents-0.0.19.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
22
- praisonaiagents-0.0.19.dist-info/RECORD,,
19
+ praisonaiagents/tools/__init__.py,sha256=rFen7LHI55mxBVW2raQmqKnK_JFuvTVVAuUmrQpDg_c,87
20
+ praisonaiagents/tools/tools.py,sha256=fs0fjW7Y_k9qsaR0CmlPjayiiVrDxDmgfr5SwR1Tdck,1363
21
+ praisonaiagents-0.0.21.dist-info/METADATA,sha256=o5mWRqp1BpA36xWCw2V7mArC7iujMSD2z6lTWl8_9Qo,233
22
+ praisonaiagents-0.0.21.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
23
+ praisonaiagents-0.0.21.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
24
+ praisonaiagents-0.0.21.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.6.0)
2
+ Generator: setuptools (75.7.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5