praisonaiagents 0.0.103__py3-none-any.whl → 0.0.105__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -368,11 +368,97 @@ class Agent:
368
368
  max_reflect: int = 3,
369
369
  min_reflect: int = 1,
370
370
  reflect_llm: Optional[str] = None,
371
+ reflect_prompt: Optional[str] = None,
371
372
  user_id: Optional[str] = None,
372
373
  reasoning_steps: bool = False,
373
374
  guardrail: Optional[Union[Callable[['TaskOutput'], Tuple[bool, Any]], str]] = None,
374
375
  max_guardrail_retries: int = 3
375
376
  ):
377
+ """Initialize an Agent instance.
378
+
379
+ Args:
380
+ name (Optional[str], optional): Name of the agent used for identification and logging.
381
+ If None, defaults to "Agent". Defaults to None.
382
+ role (Optional[str], optional): Role or job title that defines the agent's expertise
383
+ and behavior patterns. Examples: "Data Analyst", "Content Writer". Defaults to None.
384
+ goal (Optional[str], optional): Primary objective or goal the agent aims to achieve.
385
+ Defines the agent's purpose and success criteria. Defaults to None.
386
+ backstory (Optional[str], optional): Background story or context that shapes the agent's
387
+ personality and decision-making approach. Defaults to None.
388
+ instructions (Optional[str], optional): Direct instructions that override role, goal,
389
+ and backstory when provided. Used for simple, task-specific agents. Defaults to None.
390
+ llm (Optional[Union[str, Any]], optional): Language model configuration. Can be a model
391
+ name string (e.g., "gpt-4o", "anthropic/claude-3-sonnet") or a configured LLM object.
392
+ Defaults to environment variable OPENAI_MODEL_NAME or "gpt-4o".
393
+ tools (Optional[List[Any]], optional): List of tools, functions, or capabilities
394
+ available to the agent for task execution. Can include callables, tool objects,
395
+ or MCP instances. Defaults to None.
396
+ function_calling_llm (Optional[Any], optional): Dedicated language model for function
397
+ calling operations. If None, uses the main llm parameter. Defaults to None.
398
+ max_iter (int, optional): Maximum number of iterations the agent can perform during
399
+ task execution to prevent infinite loops. Defaults to 20.
400
+ max_rpm (Optional[int], optional): Maximum requests per minute to rate limit API calls
401
+ and prevent quota exhaustion. If None, no rate limiting is applied. Defaults to None.
402
+ max_execution_time (Optional[int], optional): Maximum execution time in seconds for
403
+ agent operations before timeout. If None, no time limit is enforced. Defaults to None.
404
+ memory (Optional[Any], optional): Memory system for storing and retrieving information
405
+ across conversations. Requires memory dependencies to be installed. Defaults to None.
406
+ verbose (bool, optional): Enable detailed logging and status updates during agent
407
+ execution for debugging and monitoring. Defaults to True.
408
+ allow_delegation (bool, optional): Allow the agent to delegate tasks to other agents
409
+ or sub-processes when appropriate. Defaults to False.
410
+ step_callback (Optional[Any], optional): Callback function called after each step
411
+ of agent execution for custom monitoring or intervention. Defaults to None.
412
+ cache (bool, optional): Enable caching of responses and computations to improve
413
+ performance and reduce API costs. Defaults to True.
414
+ system_template (Optional[str], optional): Custom template for system prompts that
415
+ overrides the default system prompt generation. Defaults to None.
416
+ prompt_template (Optional[str], optional): Template for formatting user prompts
417
+ before sending to the language model. Defaults to None.
418
+ response_template (Optional[str], optional): Template for formatting agent responses
419
+ before returning to the user. Defaults to None.
420
+ allow_code_execution (Optional[bool], optional): Enable the agent to execute code
421
+ snippets during task completion. Use with caution for security. Defaults to False.
422
+ max_retry_limit (int, optional): Maximum number of retry attempts for failed operations
423
+ before giving up. Helps handle transient errors. Defaults to 2.
424
+ respect_context_window (bool, optional): Automatically manage context window size
425
+ to prevent token limit errors with large conversations. Defaults to True.
426
+ code_execution_mode (Literal["safe", "unsafe"], optional): Safety mode for code execution.
427
+ "safe" restricts dangerous operations, "unsafe" allows full code execution. Defaults to "safe".
428
+ embedder_config (Optional[Dict[str, Any]], optional): Configuration dictionary for
429
+ text embedding models used in knowledge retrieval and similarity search. Defaults to None.
430
+ knowledge (Optional[List[str]], optional): List of knowledge sources (file paths, URLs,
431
+ or text content) to be processed and made available to the agent. Defaults to None.
432
+ knowledge_config (Optional[Dict[str, Any]], optional): Configuration for knowledge
433
+ processing and retrieval system including chunking and indexing parameters. Defaults to None.
434
+ use_system_prompt (Optional[bool], optional): Whether to include system prompts in
435
+ conversations to establish agent behavior and context. Defaults to True.
436
+ markdown (bool, optional): Enable markdown formatting in agent responses for better
437
+ readability and structure. Defaults to True.
438
+ self_reflect (bool, optional): Enable self-reflection capabilities where the agent
439
+ evaluates and improves its own responses. Defaults to False.
440
+ max_reflect (int, optional): Maximum number of self-reflection iterations to prevent
441
+ excessive reflection loops. Defaults to 3.
442
+ min_reflect (int, optional): Minimum number of self-reflection iterations required
443
+ before accepting a response as satisfactory. Defaults to 1.
444
+ reflect_llm (Optional[str], optional): Dedicated language model for self-reflection
445
+ operations. If None, uses the main llm parameter. Defaults to None.
446
+ reflect_prompt (Optional[str], optional): Custom prompt template for self-reflection
447
+ that guides the agent's self-evaluation process. Defaults to None.
448
+ user_id (Optional[str], optional): Unique identifier for the user or session to
449
+ enable personalized responses and memory isolation. Defaults to "praison".
450
+ reasoning_steps (bool, optional): Enable step-by-step reasoning output to show the
451
+ agent's thought process during problem solving. Defaults to False.
452
+ guardrail (Optional[Union[Callable[['TaskOutput'], Tuple[bool, Any]], str]], optional):
453
+ Safety mechanism to validate agent outputs. Can be a validation function or
454
+ description string for LLM-based validation. Defaults to None.
455
+ max_guardrail_retries (int, optional): Maximum number of retry attempts when guardrail
456
+ validation fails before giving up. Defaults to 3.
457
+
458
+ Raises:
459
+ ValueError: If all of name, role, goal, backstory, and instructions are None.
460
+ ImportError: If memory or LLM features are requested but dependencies are not installed.
461
+ """
376
462
  # Add check at start if memory is requested
377
463
  if memory is not None:
378
464
  try:
@@ -470,6 +556,7 @@ class Agent:
470
556
  self.markdown = markdown
471
557
  self.max_reflect = max_reflect
472
558
  self.min_reflect = min_reflect
559
+ self.reflect_prompt = reflect_prompt
473
560
  # Use the same model selection logic for reflect_llm
474
561
  self.reflect_llm = reflect_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
475
562
  self.console = Console() # Create a single console instance for the agent
@@ -1230,7 +1317,7 @@ Your Goal: {self.goal}
1230
1317
 
1231
1318
  reflection_prompt = f"""
1232
1319
  Reflect on your previous response: '{response_text}'.
1233
- Identify any flaws, improvements, or actions.
1320
+ {self.reflect_prompt if self.reflect_prompt else "Identify any flaws, improvements, or actions."}
1234
1321
  Provide a "satisfactory" status ('yes' or 'no').
1235
1322
  Output MUST be JSON with 'reflection' and 'satisfactory'.
1236
1323
  """
@@ -1328,7 +1415,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1328
1415
  return cleaned
1329
1416
 
1330
1417
  async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
1331
- """Async version of chat method. TODO: Requires Syncing with chat method."""
1418
+ """Async version of chat method with self-reflection support."""
1332
1419
  # Log all parameter values when in debug mode
1333
1420
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1334
1421
  param_info = {
@@ -1501,10 +1588,79 @@ Your Goal: {self.goal}
1501
1588
  messages=messages,
1502
1589
  temperature=temperature
1503
1590
  )
1591
+
1592
+ response_text = response.choices[0].message.content
1593
+
1594
+ # Handle self-reflection if enabled
1595
+ if self.self_reflect:
1596
+ reflection_count = 0
1597
+
1598
+ while True:
1599
+ reflection_prompt = f"""
1600
+ Reflect on your previous response: '{response_text}'.
1601
+ {self.reflect_prompt if self.reflect_prompt else "Identify any flaws, improvements, or actions."}
1602
+ Provide a "satisfactory" status ('yes' or 'no').
1603
+ Output MUST be JSON with 'reflection' and 'satisfactory'.
1604
+ """
1605
+
1606
+ # Add reflection prompt to messages
1607
+ reflection_messages = messages + [
1608
+ {"role": "assistant", "content": response_text},
1609
+ {"role": "user", "content": reflection_prompt}
1610
+ ]
1611
+
1612
+ try:
1613
+ reflection_response = await async_client.beta.chat.completions.parse(
1614
+ model=self.reflect_llm if self.reflect_llm else self.llm,
1615
+ messages=reflection_messages,
1616
+ temperature=temperature,
1617
+ response_format=ReflectionOutput
1618
+ )
1619
+
1620
+ reflection_output = reflection_response.choices[0].message.parsed
1621
+
1622
+ if self.verbose:
1623
+ display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
1624
+
1625
+ # Only consider satisfactory after minimum reflections
1626
+ if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
1627
+ if self.verbose:
1628
+ display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1629
+ break
1630
+
1631
+ # Check if we've hit max reflections
1632
+ if reflection_count >= self.max_reflect - 1:
1633
+ if self.verbose:
1634
+ display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1635
+ break
1636
+
1637
+ # Regenerate response based on reflection
1638
+ regenerate_messages = reflection_messages + [
1639
+ {"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"},
1640
+ {"role": "user", "content": "Now regenerate your response using the reflection you made"}
1641
+ ]
1642
+
1643
+ new_response = await async_client.chat.completions.create(
1644
+ model=self.llm,
1645
+ messages=regenerate_messages,
1646
+ temperature=temperature
1647
+ )
1648
+ response_text = new_response.choices[0].message.content
1649
+ reflection_count += 1
1650
+
1651
+ except Exception as e:
1652
+ if self.verbose:
1653
+ display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
1654
+ logging.error("Reflection parsing failed.", exc_info=True)
1655
+ reflection_count += 1
1656
+ if reflection_count >= self.max_reflect:
1657
+ break
1658
+ continue
1659
+
1504
1660
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1505
1661
  total_time = time.time() - start_time
1506
1662
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1507
- return response.choices[0].message.content
1663
+ return response_text
1508
1664
  except Exception as e:
1509
1665
  display_error(f"Error in chat completion: {e}")
1510
1666
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
@@ -7,6 +7,9 @@ session API for developers building stateful agent applications.
7
7
 
8
8
  import os
9
9
  import uuid
10
+ import requests
11
+ import json
12
+ import time
10
13
  from typing import Any, Dict, List, Optional
11
14
  from .agent import Agent
12
15
  from .memory import Memory
@@ -22,68 +25,90 @@ class Session:
22
25
  - Memory operations (short-term, long-term, user-specific)
23
26
  - Knowledge base operations
24
27
  - Agent state management
28
+ - Remote agent connectivity
25
29
 
26
- Example:
30
+ Examples:
31
+ # Local session with agent
27
32
  session = Session(session_id="chat_123", user_id="user_456")
33
+ agent = session.Agent(name="Assistant", role="Helpful AI")
28
34
 
29
- # Create stateful agent
30
- agent = session.Agent(
31
- name="Assistant",
32
- role="Helpful AI",
33
- memory=True
34
- )
35
+ # Remote agent session (similar to Google ADK)
36
+ session = Session(agent_url="192.168.1.10:8000/agent")
37
+ response = session.chat("Hello from remote client!")
35
38
 
36
39
  # Save session state
37
40
  session.save_state({"conversation_topic": "AI research"})
38
-
39
- # Restore state later
40
- session.restore_state()
41
41
  """
42
42
 
43
43
  def __init__(
44
44
  self,
45
45
  session_id: Optional[str] = None,
46
46
  user_id: Optional[str] = None,
47
+ agent_url: Optional[str] = None,
47
48
  memory_config: Optional[Dict[str, Any]] = None,
48
- knowledge_config: Optional[Dict[str, Any]] = None
49
+ knowledge_config: Optional[Dict[str, Any]] = None,
50
+ timeout: int = 30
49
51
  ):
50
52
  """
51
- Initialize a new session with optional persistence.
53
+ Initialize a new session with optional persistence or remote agent connectivity.
52
54
 
53
55
  Args:
54
56
  session_id: Unique session identifier. Auto-generated if None.
55
57
  user_id: User identifier for user-specific memory operations.
58
+ agent_url: URL of remote agent for direct connectivity (e.g., "192.168.1.10:8000/agent")
56
59
  memory_config: Configuration for memory system (defaults to RAG)
57
- knowledge_config: Configuration for knowledge base system
60
+ knowledge_config: Configuration for knowledge base system
61
+ timeout: HTTP timeout for remote agent calls (default: 30 seconds)
58
62
  """
59
63
  self.session_id = session_id or str(uuid.uuid4())[:8]
60
64
  self.user_id = user_id or "default_user"
65
+ self.agent_url = agent_url
66
+ self.timeout = timeout
67
+ self.is_remote = agent_url is not None
68
+
69
+ # Validate agent_url format
70
+ if self.is_remote:
71
+ if not self.agent_url.startswith(('http://', 'https://')):
72
+ # Assume http if no protocol specified
73
+ self.agent_url = f"http://{self.agent_url}"
74
+ # Test connectivity to remote agent
75
+ self._test_remote_connection()
76
+
77
+ # Initialize memory with sensible defaults (only for local sessions)
78
+ if not self.is_remote:
79
+ default_memory_config = {
80
+ "provider": "rag",
81
+ "use_embedding": True,
82
+ "rag_db_path": f".praison/sessions/{self.session_id}/chroma_db"
83
+ }
84
+ if memory_config:
85
+ default_memory_config.update(memory_config)
86
+ self.memory_config = default_memory_config
61
87
 
62
- # Initialize memory with sensible defaults
63
- default_memory_config = {
64
- "provider": "rag",
65
- "use_embedding": True,
66
- "rag_db_path": f".praison/sessions/{self.session_id}/chroma_db"
67
- }
68
- if memory_config:
69
- default_memory_config.update(memory_config)
70
- self.memory_config = default_memory_config
71
-
72
- # Initialize knowledge with session-specific config
73
- default_knowledge_config = knowledge_config or {}
74
- self.knowledge_config = default_knowledge_config
88
+ # Initialize knowledge with session-specific config
89
+ default_knowledge_config = knowledge_config or {}
90
+ self.knowledge_config = default_knowledge_config
75
91
 
76
- # Create session directory
77
- os.makedirs(f".praison/sessions/{self.session_id}", exist_ok=True)
92
+ # Create session directory
93
+ os.makedirs(f".praison/sessions/{self.session_id}", exist_ok=True)
78
94
 
79
- # Initialize components lazily
80
- self._memory = None
81
- self._knowledge = None
82
- self._agents_instance = None
95
+ # Initialize components lazily
96
+ self._memory = None
97
+ self._knowledge = None
98
+ self._agents_instance = None
99
+ else:
100
+ # For remote sessions, disable local memory/knowledge
101
+ self.memory_config = {}
102
+ self.knowledge_config = {}
103
+ self._memory = None
104
+ self._knowledge = None
105
+ self._agents_instance = None
83
106
 
84
107
  @property
85
108
  def memory(self) -> Memory:
86
109
  """Lazy-loaded memory instance"""
110
+ if self.is_remote:
111
+ raise ValueError("Memory operations are not available for remote agent sessions")
87
112
  if self._memory is None:
88
113
  self._memory = Memory(config=self.memory_config)
89
114
  return self._memory
@@ -91,6 +116,8 @@ class Session:
91
116
  @property
92
117
  def knowledge(self) -> Knowledge:
93
118
  """Lazy-loaded knowledge instance"""
119
+ if self.is_remote:
120
+ raise ValueError("Knowledge operations are not available for remote agent sessions")
94
121
  if self._knowledge is None:
95
122
  self._knowledge = Knowledge(config=self.knowledge_config)
96
123
  return self._knowledge
@@ -119,7 +146,13 @@ class Session:
119
146
 
120
147
  Returns:
121
148
  Configured Agent instance
149
+
150
+ Raises:
151
+ ValueError: If this is a remote session (use chat() instead)
122
152
  """
153
+ if self.is_remote:
154
+ raise ValueError("Cannot create local agents in remote sessions. Use chat() to communicate with the remote agent.")
155
+
123
156
  agent_kwargs = {
124
157
  "name": name,
125
158
  "role": role,
@@ -150,7 +183,12 @@ class Session:
150
183
 
151
184
  Args:
152
185
  state_data: Dictionary of state data to save
186
+
187
+ Raises:
188
+ ValueError: If this is a remote session
153
189
  """
190
+ if self.is_remote:
191
+ raise ValueError("State operations are not available for remote agent sessions")
154
192
  state_text = f"Session state: {state_data}"
155
193
  self.memory.store_short_term(
156
194
  text=state_text,
@@ -168,7 +206,12 @@ class Session:
168
206
 
169
207
  Returns:
170
208
  Dictionary of restored state data
209
+
210
+ Raises:
211
+ ValueError: If this is a remote session
171
212
  """
213
+ if self.is_remote:
214
+ raise ValueError("State operations are not available for remote agent sessions")
172
215
  # Use metadata-based search for better SQLite compatibility
173
216
  results = self.memory.search_short_term(
174
217
  query=f"type:session_state",
@@ -284,7 +327,105 @@ class Session:
284
327
  max_items=max_items
285
328
  )
286
329
 
330
+ def _test_remote_connection(self) -> None:
331
+ """
332
+ Test connectivity to the remote agent.
333
+
334
+ Raises:
335
+ ConnectionError: If unable to connect to the remote agent
336
+ """
337
+ try:
338
+ # Try a simple GET request to check if the server is responding
339
+ test_url = self.agent_url.rstrip('/') + '/health' if '/health' not in self.agent_url else self.agent_url
340
+ response = requests.get(test_url, timeout=self.timeout)
341
+ if response.status_code != 200:
342
+ # If health endpoint fails, try the main endpoint
343
+ response = requests.head(self.agent_url, timeout=self.timeout)
344
+ if response.status_code not in [200, 405]: # 405 = Method Not Allowed is OK
345
+ raise ConnectionError(f"Remote agent returned status code: {response.status_code}")
346
+ print(f"✅ Successfully connected to remote agent at {self.agent_url}")
347
+ except requests.exceptions.Timeout:
348
+ raise ConnectionError(f"Timeout connecting to remote agent at {self.agent_url}")
349
+ except requests.exceptions.ConnectionError:
350
+ raise ConnectionError(f"Failed to connect to remote agent at {self.agent_url}")
351
+ except Exception as e:
352
+ raise ConnectionError(f"Error connecting to remote agent: {str(e)}")
353
+
354
+ def chat(self, message: str, **kwargs) -> str:
355
+ """
356
+ Send a message to the remote agent or handle local session.
357
+
358
+ Args:
359
+ message: The message to send to the agent
360
+ **kwargs: Additional parameters for the request
361
+
362
+ Returns:
363
+ The agent's response
364
+
365
+ Raises:
366
+ ValueError: If this is not a remote session
367
+ ConnectionError: If unable to communicate with remote agent
368
+ """
369
+ if not self.is_remote:
370
+ raise ValueError("chat() method is only available for remote agent sessions. Use Agent.chat() for local agents.")
371
+
372
+ try:
373
+ # Prepare the request payload
374
+ payload = {
375
+ "query": message,
376
+ "session_id": self.session_id,
377
+ "user_id": self.user_id,
378
+ **kwargs
379
+ }
380
+
381
+ # Send POST request to the remote agent
382
+ response = requests.post(
383
+ self.agent_url,
384
+ json=payload,
385
+ headers={"Content-Type": "application/json"},
386
+ timeout=self.timeout
387
+ )
388
+
389
+ # Check if request was successful
390
+ response.raise_for_status()
391
+
392
+ # Parse the response
393
+ result = response.json()
394
+
395
+ # Extract the agent's response
396
+ if isinstance(result, dict):
397
+ return result.get("response", str(result))
398
+ else:
399
+ return str(result)
400
+
401
+ except requests.exceptions.Timeout:
402
+ raise ConnectionError(f"Timeout communicating with remote agent at {self.agent_url}")
403
+ except requests.exceptions.ConnectionError:
404
+ raise ConnectionError(f"Failed to communicate with remote agent at {self.agent_url}")
405
+ except requests.exceptions.HTTPError as e:
406
+ raise ConnectionError(f"HTTP error from remote agent: {e}")
407
+ except json.JSONDecodeError:
408
+ # If response is not JSON, return the raw text
409
+ return response.text
410
+ except Exception as e:
411
+ raise ConnectionError(f"Error communicating with remote agent: {str(e)}")
412
+
413
+ def send_message(self, message: str, **kwargs) -> str:
414
+ """
415
+ Alias for chat() method to match Google ADK pattern.
416
+
417
+ Args:
418
+ message: The message to send to the agent
419
+ **kwargs: Additional parameters for the request
420
+
421
+ Returns:
422
+ The agent's response
423
+ """
424
+ return self.chat(message, **kwargs)
425
+
287
426
  def __str__(self) -> str:
427
+ if self.is_remote:
428
+ return f"Session(id='{self.session_id}', user='{self.user_id}', remote_agent='{self.agent_url}')"
288
429
  return f"Session(id='{self.session_id}', user='{self.user_id}')"
289
430
 
290
431
  def __repr__(self) -> str:
@@ -86,7 +86,9 @@ class MinimalTelemetry:
86
86
  self._posthog = Posthog(
87
87
  project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7',
88
88
  host='https://eu.i.posthog.com',
89
- disable_geoip=True
89
+ disable_geoip=True,
90
+ on_error=lambda e: self.logger.debug(f"PostHog error: {e}"),
91
+ sync_mode=False # Use async mode to prevent blocking
90
92
  )
91
93
  except:
92
94
  self._posthog = None
@@ -220,6 +222,7 @@ class MinimalTelemetry:
220
222
  '$geoip_disable': True
221
223
  }
222
224
  )
225
+ # Don't flush here - let PostHog handle it asynchronously
223
226
  except:
224
227
  pass
225
228
 
@@ -227,6 +230,25 @@ class MinimalTelemetry:
227
230
  for key in self._metrics:
228
231
  if isinstance(self._metrics[key], int):
229
232
  self._metrics[key] = 0
233
+
234
+ def shutdown(self):
235
+ """
236
+ Shutdown telemetry and ensure all events are sent.
237
+ """
238
+ if not self.enabled:
239
+ return
240
+
241
+ # Final flush
242
+ self.flush()
243
+
244
+ # Shutdown PostHog if available
245
+ if hasattr(self, '_posthog') and self._posthog:
246
+ try:
247
+ # Force a synchronous flush before shutdown
248
+ self._posthog.flush()
249
+ self._posthog.shutdown()
250
+ except:
251
+ pass
230
252
 
231
253
 
232
254
  # Global telemetry instance
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.103
3
+ Version: 0.0.105
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -8,7 +8,7 @@ Requires-Dist: pydantic
8
8
  Requires-Dist: rich
9
9
  Requires-Dist: openai
10
10
  Requires-Dist: mcp>=1.6.0
11
- Requires-Dist: posthog>=4.0.0
11
+ Requires-Dist: posthog>=3.0.0
12
12
  Provides-Extra: mcp
13
13
  Requires-Dist: mcp>=1.6.0; extra == "mcp"
14
14
  Requires-Dist: fastapi>=0.115.0; extra == "mcp"
@@ -31,7 +31,7 @@ Provides-Extra: api
31
31
  Requires-Dist: fastapi>=0.115.0; extra == "api"
32
32
  Requires-Dist: uvicorn>=0.34.0; extra == "api"
33
33
  Provides-Extra: telemetry
34
- Requires-Dist: posthog>=4.0.0; extra == "telemetry"
34
+ Requires-Dist: posthog>=3.0.0; extra == "telemetry"
35
35
  Provides-Extra: all
36
36
  Requires-Dist: praisonaiagents[memory]; extra == "all"
37
37
  Requires-Dist: praisonaiagents[knowledge]; extra == "all"
@@ -1,9 +1,9 @@
1
1
  praisonaiagents/__init__.py,sha256=10wExtoVkZ31OwxoKjj9gtWq2uvC05dYBommV2Eii4M,2769
2
2
  praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
3
3
  praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
4
- praisonaiagents/session.py,sha256=CI-ffCiOfmgB-1zFFik9daKCB5Sm41Q9ZOaq1-oSLW8,9250
4
+ praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
6
- praisonaiagents/agent/agent.py,sha256=it38pIYzHQIn2qscIuuvfgWyC9gPLZFj-nN8tDI8x5A,97766
6
+ praisonaiagents/agent/agent.py,sha256=pyHW34UkqlMaiPg5e8mzLJZoI8mi9D5WnFEBd6pJ9UE,109857
7
7
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
8
8
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
9
9
  praisonaiagents/agents/agents.py,sha256=C_yDdJB4XUuwKA9DrysAtAj3zSYT0IKtfCT4Pxo0oyI,63309
@@ -27,7 +27,7 @@ praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSS
27
27
  praisonaiagents/task/task.py,sha256=imqJ8wzZzVyUSym2EyF2tC-vAsV1UdfI_P3YM5mqAiw,20786
28
28
  praisonaiagents/telemetry/__init__.py,sha256=5iAOrj_N_cKMmh2ltWGYs3PfOYt_jcwUoElW8fTAIsc,3062
29
29
  praisonaiagents/telemetry/integration.py,sha256=36vvYac8tW92YzQYbBeKWKM8JC9IiizlxhUy3AFqPlA,8667
30
- praisonaiagents/telemetry/telemetry.py,sha256=T2Mv_iOXYbL-C3CZW5EEEs7N0dUk1S2xrD1FQjVaxmc,11064
30
+ praisonaiagents/telemetry/telemetry.py,sha256=SAEK5lrHn-Rb3nk_Yx1sjAdRxqT63ycyNRv3ZGh9Rck,11812
31
31
  praisonaiagents/tools/README.md,sha256=bIQGTSqQbC8l_UvTAnKbnh1TxrybSFGbCqxnhvDwkE4,4450
32
32
  praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
33
33
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
@@ -51,7 +51,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
51
51
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
52
52
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
53
53
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
54
- praisonaiagents-0.0.103.dist-info/METADATA,sha256=QPwe9pWnpP519hGH179MF5Vt_Jt0uD-9F-O5D5IhK84,1669
55
- praisonaiagents-0.0.103.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
- praisonaiagents-0.0.103.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
57
- praisonaiagents-0.0.103.dist-info/RECORD,,
54
+ praisonaiagents-0.0.105.dist-info/METADATA,sha256=G161N724qL4u2KTozdfECuF3m9vbR7kEFIgrPT0N_Rs,1669
55
+ praisonaiagents-0.0.105.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
+ praisonaiagents-0.0.105.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
57
+ praisonaiagents-0.0.105.dist-info/RECORD,,