praisonaiagents 0.0.36__py3-none-any.whl → 0.0.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,6 +7,8 @@ from .agents.agents import PraisonAIAgents
7
7
  from .task.task import Task
8
8
  from .tools.tools import Tools
9
9
  from .agents.autoagents import AutoAgents
10
+ from .knowledge.knowledge import Knowledge
11
+ from .knowledge.chunking import Chunking
10
12
  from .memory.memory import Memory
11
13
  from .main import (
12
14
  TaskOutput,
@@ -48,4 +50,6 @@ __all__ = [
48
50
  'register_display_callback',
49
51
  'sync_display_callbacks',
50
52
  'async_display_callbacks',
53
+ 'Knowledge',
54
+ 'Chunking'
51
55
  ]
@@ -20,6 +20,7 @@ from ..main import (
20
20
  adisplay_instruction
21
21
  )
22
22
  import inspect
23
+ import uuid
23
24
 
24
25
  if TYPE_CHECKING:
25
26
  from ..task.task import Task
@@ -176,13 +177,15 @@ class Agent:
176
177
  respect_context_window: bool = True,
177
178
  code_execution_mode: Literal["safe", "unsafe"] = "safe",
178
179
  embedder_config: Optional[Dict[str, Any]] = None,
179
- knowledge_sources: Optional[List[Any]] = None,
180
+ knowledge: Optional[List[str]] = None,
181
+ knowledge_config: Optional[Dict[str, Any]] = None,
180
182
  use_system_prompt: Optional[bool] = True,
181
183
  markdown: bool = True,
182
184
  self_reflect: bool = False,
183
185
  max_reflect: int = 3,
184
186
  min_reflect: int = 1,
185
- reflect_llm: Optional[str] = None
187
+ reflect_llm: Optional[str] = None,
188
+ user_id: Optional[str] = None
186
189
  ):
187
190
  # Handle backward compatibility for required fields
188
191
  if all(x is None for x in [name, role, goal, backstory, instructions]):
@@ -226,7 +229,7 @@ class Agent:
226
229
  self.respect_context_window = respect_context_window
227
230
  self.code_execution_mode = code_execution_mode
228
231
  self.embedder_config = embedder_config
229
- self.knowledge_sources = knowledge_sources
232
+ self.knowledge = knowledge
230
233
  self.use_system_prompt = use_system_prompt
231
234
  self.chat_history = []
232
235
  self.markdown = markdown
@@ -242,6 +245,36 @@ Your Role: {self.role}\n
242
245
  Your Goal: {self.goal}
243
246
  """
244
247
 
248
+ # Generate unique IDs
249
+ self.agent_id = str(uuid.uuid4())
250
+
251
+ # Store user_id
252
+ self.user_id = user_id
253
+
254
+ # Initialize Knowledge with provided or default config
255
+ from praisonaiagents.knowledge import Knowledge
256
+ self.knowledge = Knowledge(knowledge_config or None)
257
+
258
+ # Handle knowledge
259
+ if knowledge:
260
+ for source in knowledge:
261
+ self._process_knowledge(source)
262
+
263
+ def _process_knowledge(self, knowledge_item):
264
+ """Process and store knowledge from a file path, URL, or string."""
265
+ try:
266
+ if os.path.exists(knowledge_item):
267
+ # It's a file path
268
+ self.knowledge.add(knowledge_item, user_id=self.user_id, agent_id=self.agent_id)
269
+ elif knowledge_item.startswith("http://") or knowledge_item.startswith("https://"):
270
+ # It's a URL
271
+ pass
272
+ else:
273
+ # It's a string content
274
+ self.knowledge.store(knowledge_item, user_id=self.user_id, agent_id=self.agent_id)
275
+ except Exception as e:
276
+ logging.error(f"Error processing knowledge item: {knowledge_item}, error: {e}")
277
+
245
278
  def generate_task(self) -> 'Task':
246
279
  """Generate a Task object from the agent's instructions"""
247
280
  from ..task.task import Task
@@ -418,6 +451,21 @@ Your Goal: {self.goal}
418
451
  return None
419
452
 
420
453
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
454
+ # Search for existing knowledge if any knowledge is provided
455
+ if self.knowledge:
456
+ search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
457
+ if search_results:
458
+ # Check if search_results is a list of dictionaries or strings
459
+ if isinstance(search_results, dict) and 'results' in search_results:
460
+ # Extract memory content from the results
461
+ knowledge_content = "\n".join([result['memory'] for result in search_results['results']])
462
+ else:
463
+ # If search_results is a list of strings, join them directly
464
+ knowledge_content = "\n".join(search_results)
465
+
466
+ # Append found knowledge to the prompt
467
+ prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
468
+
421
469
  if self.use_system_prompt:
422
470
  system_prompt = f"""{self.backstory}\n
423
471
  Your Role: {self.role}\n
@@ -12,6 +12,7 @@ from ..agent.agent import Agent
12
12
  from ..task.task import Task
13
13
  from ..process.process import Process, LoopItems
14
14
  import asyncio
15
+ import uuid
15
16
 
16
17
  # Set up logger
17
18
  logger = logging.getLogger(__name__)
@@ -44,10 +45,17 @@ def process_video(video_path: str, seconds_per_frame=2):
44
45
  return base64_frames
45
46
 
46
47
  class PraisonAIAgents:
47
- def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None):
48
+ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None):
48
49
  if not agents:
49
50
  raise ValueError("At least one agent must be provided")
50
-
51
+
52
+ self.run_id = str(uuid.uuid4()) # Auto-generate run_id
53
+ self.user_id = user_id # Optional user_id
54
+
55
+ # Pass user_id to each agent
56
+ for agent in agents:
57
+ agent.user_id = self.user_id
58
+
51
59
  self.agents = agents
52
60
  self.tasks = {}
53
61
  if max_retries < 3:
@@ -0,0 +1,8 @@
1
+ """
2
+ PraisonAI Knowledge - Advanced knowledge management system with configurable features
3
+ """
4
+
5
+ from praisonaiagents.knowledge.knowledge import Knowledge
6
+ from praisonaiagents.knowledge.chunking import Chunking
7
+
8
+ __all__ = ["Knowledge", "Chunking"]
@@ -0,0 +1,182 @@
1
+ from typing import List, Union, Optional, Dict, Any
2
+ from functools import cached_property
3
+ import importlib
4
+
5
+ class Chunking:
6
+ """A unified class for text chunking with various chunking strategies."""
7
+
8
+ CHUNKER_PARAMS = {
9
+ 'token': ['chunk_size', 'chunk_overlap', 'tokenizer'],
10
+ 'word': ['chunk_size', 'chunk_overlap', 'tokenizer'],
11
+ 'sentence': ['chunk_size', 'chunk_overlap', 'tokenizer'],
12
+ 'semantic': ['chunk_size', 'embedding_model', 'tokenizer'],
13
+ 'sdpm': ['chunk_size', 'embedding_model', 'tokenizer'],
14
+ 'late': ['chunk_size', 'embedding_model', 'tokenizer'],
15
+ 'recursive': ['chunk_size', 'tokenizer']
16
+ }
17
+
18
+ @cached_property
19
+ def SUPPORTED_CHUNKERS(self) -> Dict[str, Any]:
20
+ """Lazy load chunker classes."""
21
+ try:
22
+ from chonkie.chunker import (
23
+ TokenChunker,
24
+ WordChunker,
25
+ SentenceChunker,
26
+ SemanticChunker,
27
+ SDPMChunker,
28
+ LateChunker,
29
+ RecursiveChunker
30
+ )
31
+ except ImportError:
32
+ raise ImportError(
33
+ "chonkie package not found. Please install it using: pip install 'praisonaiagents[knowledge]'"
34
+ )
35
+
36
+ return {
37
+ 'token': TokenChunker,
38
+ 'word': WordChunker,
39
+ 'sentence': SentenceChunker,
40
+ 'semantic': SemanticChunker,
41
+ 'sdpm': SDPMChunker,
42
+ 'late': LateChunker,
43
+ 'recursive': RecursiveChunker
44
+ }
45
+
46
+ def __init__(
47
+ self,
48
+ chunker_type: str = 'token',
49
+ chunk_size: int = 512,
50
+ chunk_overlap: int = 128,
51
+ tokenizer: str = "gpt2",
52
+ embedding_model: Optional[Union[str, Any]] = None,
53
+ **kwargs
54
+ ):
55
+ """Initialize the Chunking class."""
56
+ if chunker_type not in self.CHUNKER_PARAMS:
57
+ raise ValueError(
58
+ f"Unsupported chunker type: {chunker_type}. "
59
+ f"Must be one of: {list(self.CHUNKER_PARAMS.keys())}"
60
+ )
61
+
62
+ self.chunker_type = chunker_type
63
+ self.chunk_size = chunk_size
64
+ self.chunk_overlap = chunk_overlap
65
+ self.tokenizer = tokenizer
66
+ self._embedding_model = embedding_model
67
+ self.kwargs = kwargs
68
+
69
+ # Initialize these as None for lazy loading
70
+ self._chunker = None
71
+ self._embeddings = None
72
+
73
+ @cached_property
74
+ def embedding_model(self):
75
+ """Lazy load the embedding model."""
76
+ if self._embedding_model is None and self.chunker_type in ['semantic', 'sdpm', 'late']:
77
+ from chonkie.embeddings import AutoEmbeddings
78
+ return AutoEmbeddings.get_embeddings("all-MiniLM-L6-v2")
79
+ elif isinstance(self._embedding_model, str):
80
+ from chonkie.embeddings import AutoEmbeddings
81
+ return AutoEmbeddings.get_embeddings(self._embedding_model)
82
+ return self._embedding_model
83
+
84
+ def _get_chunker_params(self) -> Dict[str, Any]:
85
+ """Get the appropriate parameters for the current chunker type."""
86
+ allowed_params = self.CHUNKER_PARAMS[self.chunker_type]
87
+ params = {'chunk_size': self.chunk_size}
88
+
89
+ if 'chunk_overlap' in allowed_params:
90
+ params['chunk_overlap'] = self.chunk_overlap
91
+
92
+ if 'tokenizer' in allowed_params:
93
+ if self.chunker_type in ['semantic', 'sdpm', 'late']:
94
+ params['tokenizer'] = self.embedding_model.get_tokenizer_or_token_counter()
95
+ else:
96
+ params['tokenizer'] = self.tokenizer
97
+
98
+ if 'embedding_model' in allowed_params:
99
+ params['embedding_model'] = self.embedding_model
100
+
101
+ # Add any additional kwargs that are in allowed_params
102
+ for key, value in self.kwargs.items():
103
+ if key in allowed_params:
104
+ params[key] = value
105
+
106
+ return params
107
+
108
+ @cached_property
109
+ def chunker(self):
110
+ """Lazy load the chunker instance."""
111
+ if self._chunker is None:
112
+ chunker_cls = self.SUPPORTED_CHUNKERS[self.chunker_type]
113
+ common_params = self._get_chunker_params()
114
+ self._chunker = chunker_cls(**common_params)
115
+
116
+ return self._chunker
117
+
118
+ def _get_overlap_refinery(self, context_size: Optional[int] = None, **kwargs):
119
+ """Lazy load the overlap refinery."""
120
+ try:
121
+ from chonkie.refinery import OverlapRefinery
122
+ except ImportError:
123
+ raise ImportError("Failed to import OverlapRefinery from chonkie.refinery")
124
+
125
+ if context_size is None:
126
+ context_size = self.chunk_overlap
127
+
128
+ return OverlapRefinery(
129
+ context_size=context_size,
130
+ tokenizer=self.chunker.tokenizer,
131
+ **kwargs
132
+ )
133
+
134
+ def add_overlap_context(
135
+ self,
136
+ chunks: List[Any],
137
+ context_size: int = None,
138
+ mode: str = "suffix",
139
+ merge_context: bool = True
140
+ ) -> List[Any]:
141
+ """Add overlap context to chunks using OverlapRefinery."""
142
+ refinery = self._get_overlap_refinery(
143
+ context_size=context_size,
144
+ mode=mode,
145
+ merge_context=merge_context
146
+ )
147
+ return refinery.refine(chunks)
148
+
149
+ def chunk(
150
+ self,
151
+ text: Union[str, List[str]],
152
+ add_context: bool = False,
153
+ context_params: Optional[Dict[str, Any]] = None
154
+ ) -> Union[List[Any], List[List[Any]]]:
155
+ """Chunk text using the configured chunking strategy."""
156
+ chunks = self.chunker(text)
157
+
158
+ if add_context:
159
+ context_params = context_params or {}
160
+ if isinstance(text, str):
161
+ chunks = self.add_overlap_context(chunks, **context_params)
162
+ else:
163
+ chunks = [self.add_overlap_context(c, **context_params) for c in chunks]
164
+
165
+ return chunks
166
+
167
+ def __call__(
168
+ self,
169
+ text: Union[str, List[str]],
170
+ add_context: bool = False,
171
+ context_params: Optional[Dict[str, Any]] = None
172
+ ) -> Union[List[Any], List[List[Any]]]:
173
+ """Make the Chunking instance callable."""
174
+ return self.chunk(text, add_context, context_params)
175
+
176
+ def __repr__(self) -> str:
177
+ """String representation of the Chunking instance."""
178
+ return (
179
+ f"Chunking(chunker_type='{self.chunker_type}', "
180
+ f"chunk_size={self.chunk_size}, "
181
+ f"chunk_overlap={self.chunk_overlap})"
182
+ )
@@ -0,0 +1,269 @@
1
+ import os
2
+ import logging
3
+ import uuid
4
+ import time
5
+ from .chunking import Chunking
6
+ from functools import cached_property
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ class CustomMemory:
11
+ @classmethod
12
+ def from_config(cls, config):
13
+ from mem0 import Memory
14
+ return type('CustomMemory', (Memory,), {
15
+ '_add_to_vector_store': cls._add_to_vector_store
16
+ }).from_config(config)
17
+
18
+ @staticmethod
19
+ def _add_to_vector_store(self, messages, metadata, filters):
20
+ # Custom implementation that doesn't use LLM
21
+ parsed_messages = "\n".join([msg["content"] for msg in messages])
22
+
23
+ # Create a simple fact without using LLM
24
+ new_retrieved_facts = [parsed_messages]
25
+
26
+ # Process embeddings and continue with vector store operations
27
+ new_message_embeddings = {}
28
+ for new_mem in new_retrieved_facts:
29
+ messages_embeddings = self.embedding_model.embed(new_mem)
30
+ new_message_embeddings[new_mem] = messages_embeddings
31
+
32
+ # Create the memory
33
+ memory_id = self._create_memory(
34
+ data=parsed_messages,
35
+ existing_embeddings=new_message_embeddings,
36
+ metadata=metadata
37
+ )
38
+
39
+ return [{
40
+ "id": memory_id,
41
+ "memory": parsed_messages,
42
+ "event": "ADD"
43
+ }]
44
+
45
+ class Knowledge:
46
+ def __init__(self, config=None):
47
+ self._config = config
48
+ os.environ['ANONYMIZED_TELEMETRY'] = 'False' # Chromadb
49
+
50
+ @cached_property
51
+ def _deps(self):
52
+ try:
53
+ from markitdown import MarkItDown
54
+ import chromadb
55
+ return {
56
+ 'chromadb': chromadb,
57
+ 'markdown': MarkItDown()
58
+ }
59
+ except ImportError:
60
+ raise ImportError(
61
+ "Required packages not installed. Please install using: "
62
+ 'pip install "praisonaiagents[knowledge]"'
63
+ )
64
+
65
+ @cached_property
66
+ def config(self):
67
+ # Generate unique collection name for each instance
68
+ collection_name = f"test_{int(time.time())}_{str(uuid.uuid4())[:8]}"
69
+ persist_dir = ".praison"
70
+
71
+ # Create persistent client config
72
+ base_config = {
73
+ "vector_store": {
74
+ "provider": "chroma",
75
+ "config": {
76
+ "collection_name": collection_name,
77
+ "path": persist_dir,
78
+ "client": self._deps['chromadb'].PersistentClient(path=persist_dir)
79
+ }
80
+ },
81
+ "version": "v1.1",
82
+ "custom_prompt": "Return {{\"facts\": [text]}} where text is the exact input provided and json response"
83
+ }
84
+
85
+ # If config is provided, merge it with base config
86
+ if self._config:
87
+ if "vector_store" in self._config and "config" in self._config["vector_store"]:
88
+ config_copy = self._config["vector_store"]["config"].copy()
89
+ for key in ["collection_name", "client"]:
90
+ if key in config_copy:
91
+ del config_copy[key]
92
+ base_config["vector_store"]["config"].update(config_copy)
93
+
94
+ return base_config
95
+
96
+ @cached_property
97
+ def memory(self):
98
+ try:
99
+ return CustomMemory.from_config(self.config)
100
+ except (NotImplementedError, ValueError) as e:
101
+ if "list_collections" in str(e) or "Extra fields not allowed" in str(e):
102
+ # Keep only allowed fields
103
+ vector_store_config = {
104
+ "collection_name": self.config["vector_store"]["config"]["collection_name"],
105
+ "path": self.config["vector_store"]["config"]["path"]
106
+ }
107
+ self.config["vector_store"]["config"] = vector_store_config
108
+ from mem0 import Memory
109
+ return Memory.from_config(self.config)
110
+ raise
111
+
112
+ @cached_property
113
+ def markdown(self):
114
+ return self._deps['markdown']
115
+
116
+ @cached_property
117
+ def chunker(self):
118
+ return Chunking(
119
+ chunker_type='recursive',
120
+ chunk_size=512,
121
+ chunk_overlap=50
122
+ )
123
+
124
+ def store(self, content, user_id=None, agent_id=None, run_id=None, metadata=None):
125
+ """Store a memory."""
126
+ try:
127
+ # Process content to match expected format
128
+ if isinstance(content, str):
129
+ # Check if content is actually a file path
130
+ if any(content.lower().endswith(ext) for ext in ['.pdf', '.doc', '.docx', '.txt']):
131
+ logger.info(f"Content appears to be a file path, processing file: {content}")
132
+ return self.add(content, user_id=user_id, agent_id=agent_id, run_id=run_id, metadata=metadata)
133
+
134
+ content = content.strip()
135
+ if not content:
136
+ return []
137
+
138
+ result = self.memory.add(content, user_id=user_id, agent_id=agent_id, run_id=run_id, metadata=metadata)
139
+ logger.info(f"Store operation result: {result}")
140
+ return result
141
+ except Exception as e:
142
+ logger.error(f"Error storing content: {str(e)}")
143
+ return []
144
+
145
+ def get_all(self, user_id=None, agent_id=None, run_id=None):
146
+ """Retrieve all memories."""
147
+ return self.memory.get_all(user_id=user_id, agent_id=agent_id, run_id=run_id)
148
+
149
+ def get(self, memory_id):
150
+ """Retrieve a specific memory by ID."""
151
+ return self.memory.get(memory_id)
152
+
153
+ def search(self, query, user_id=None, agent_id=None, run_id=None):
154
+ """Search for memories related to a query."""
155
+ return self.memory.search(query, user_id=user_id, agent_id=agent_id, run_id=run_id)
156
+
157
+ def update(self, memory_id, data):
158
+ """Update a memory."""
159
+ return self.memory.update(memory_id, data)
160
+
161
+ def history(self, memory_id):
162
+ """Get the history of changes for a memory."""
163
+ return self.memory.history(memory_id)
164
+
165
+ def delete(self, memory_id):
166
+ """Delete a memory."""
167
+ self.memory.delete(memory_id)
168
+
169
+ def delete_all(self, user_id=None, agent_id=None, run_id=None):
170
+ """Delete all memories."""
171
+ self.memory.delete_all(user_id=user_id, agent_id=agent_id, run_id=run_id)
172
+
173
+ def reset(self):
174
+ """Reset all memories."""
175
+ self.memory.reset()
176
+
177
+ def normalize_content(self, content):
178
+ """Normalize content for consistent storage."""
179
+ # Example normalization: strip whitespace, convert to lowercase
180
+ return content.strip().lower()
181
+
182
+ def add(self, file_path, user_id=None, agent_id=None, run_id=None, metadata=None):
183
+ """Read file content and store it in memory.
184
+
185
+ Args:
186
+ file_path: Can be:
187
+ - A string path to local file
188
+ - A URL string
189
+ - A list containing file paths and/or URLs
190
+ """
191
+ if isinstance(file_path, (list, tuple)):
192
+ results = []
193
+ for path in file_path:
194
+ result = self._process_single_input(path, user_id, agent_id, run_id, metadata)
195
+ results.extend(result.get('results', []))
196
+ return {'results': results, 'relations': []}
197
+
198
+ return self._process_single_input(file_path, user_id, agent_id, run_id, metadata)
199
+
200
+ def _process_single_input(self, input_path, user_id=None, agent_id=None, run_id=None, metadata=None):
201
+ """Process a single input which can be a file path or URL."""
202
+ try:
203
+ # Define supported file extensions
204
+ DOCUMENT_EXTENSIONS = {
205
+ 'document': ('.pdf', '.ppt', '.pptx', '.doc', '.docx', '.xls', '.xlsx'),
206
+ 'media': ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.mp3', '.wav', '.ogg', '.m4a'),
207
+ 'text': ('.txt', '.csv', '.json', '.xml', '.md', '.html', '.htm'),
208
+ 'archive': '.zip'
209
+ }
210
+
211
+ # Check if input is URL
212
+ if isinstance(input_path, str) and (input_path.startswith('http://') or input_path.startswith('https://')):
213
+ logger.info(f"Processing URL: {input_path}")
214
+ # TODO: Implement URL handling
215
+ raise NotImplementedError("URL processing not yet implemented")
216
+
217
+ # Check if input ends with any supported extension
218
+ is_supported_file = any(input_path.lower().endswith(ext)
219
+ for exts in DOCUMENT_EXTENSIONS.values()
220
+ for ext in (exts if isinstance(exts, tuple) else (exts,)))
221
+
222
+ if is_supported_file:
223
+ logger.info(f"Processing as file path: {input_path}")
224
+ if not os.path.exists(input_path):
225
+ logger.error(f"File not found: {input_path}")
226
+ raise FileNotFoundError(f"File not found: {input_path}")
227
+
228
+ file_ext = '.' + input_path.lower().split('.')[-1] # Get extension reliably
229
+
230
+ # Process file based on type
231
+ if file_ext in DOCUMENT_EXTENSIONS['text']:
232
+ with open(input_path, 'r', encoding='utf-8') as file:
233
+ content = file.read().strip()
234
+ if not content:
235
+ raise ValueError("Empty text file")
236
+ memories = [self.normalize_content(content)]
237
+ else:
238
+ # Use MarkItDown for documents and media
239
+ result = self.markdown.convert(input_path)
240
+ content = result.text_content
241
+ if not content:
242
+ raise ValueError("No content could be extracted from file")
243
+ chunks = self.chunker.chunk(content)
244
+ memories = [chunk.text.strip() if hasattr(chunk, 'text') else str(chunk).strip()
245
+ for chunk in chunks if chunk]
246
+
247
+ # Set metadata for file
248
+ if not metadata:
249
+ metadata = {}
250
+ metadata['file_type'] = file_ext.lstrip('.')
251
+ metadata['filename'] = os.path.basename(input_path)
252
+ else:
253
+ # Treat as raw text content only if no file extension
254
+ memories = [self.normalize_content(input_path)]
255
+
256
+ # Store memories
257
+ all_results = []
258
+ for memory in memories:
259
+ if memory:
260
+ memory_result = self.store(memory, user_id=user_id, agent_id=agent_id,
261
+ run_id=run_id, metadata=metadata)
262
+ if memory_result:
263
+ all_results.extend(memory_result.get('results', []))
264
+
265
+ return {'results': all_results, 'relations': []}
266
+
267
+ except Exception as e:
268
+ logger.error(f"Error processing input {input_path}: {str(e)}", exc_info=True)
269
+ raise
@@ -0,0 +1,18 @@
1
+ Metadata-Version: 2.2
2
+ Name: praisonaiagents
3
+ Version: 0.0.37
4
+ Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
+ Author: Mervin Praison
6
+ Requires-Dist: pydantic
7
+ Requires-Dist: rich
8
+ Requires-Dist: openai
9
+ Provides-Extra: memory
10
+ Requires-Dist: chromadb>=0.5.23; extra == "memory"
11
+ Provides-Extra: knowledge
12
+ Requires-Dist: mem0ai>=0.1.0; extra == "knowledge"
13
+ Requires-Dist: chromadb==0.5.23; extra == "knowledge"
14
+ Requires-Dist: markitdown; extra == "knowledge"
15
+ Requires-Dist: chonkie; extra == "knowledge"
16
+ Provides-Extra: all
17
+ Requires-Dist: praisonaiagents[memory]; extra == "all"
18
+ Requires-Dist: praisonaiagents[knowledge]; extra == "all"
@@ -1,10 +1,13 @@
1
- praisonaiagents/__init__.py,sha256=Pm_HNlIsenf5zIstcVNk6nteJmOEnI4nB-zB-YL0Jgo,1160
1
+ praisonaiagents/__init__.py,sha256=MCgAj12hVJ0YZmVmdmZgYAAMfPdWSoNSiDlRJCvrJqA,1276
2
2
  praisonaiagents/main.py,sha256=uMBdwxjnJKHLPUzr_5vXlkuhCUO6EW5O8XC0M-h47sE,13915
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=CSULJNsm53Fh3LK-ZaAPuAcL3-7ca7aqFKYsNHAiTo8,34789
4
+ praisonaiagents/agent/agent.py,sha256=gSJgyOYjEbJjRwWGUyT6Mz1Ln2NC-5LBXpitqMetzqU,36924
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
- praisonaiagents/agents/agents.py,sha256=Os_k25-temlpzzncyElqbQI-e29nlcxqfJYy_ZUIprY,31004
6
+ praisonaiagents/agents/agents.py,sha256=M-nR53A7Qcz_pJ-gyNc4xgM13Nhof7oM-5hXWzr85ho,31250
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
8
+ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
9
+ praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
+ praisonaiagents/knowledge/knowledge.py,sha256=fnZMGm8AtlKlaIIbHT5xeQz-dfIPsii8koTdrTPl7H8,10985
8
11
  praisonaiagents/memory/memory.py,sha256=ZxqSpOUxk9jeTKGW0ZiTifC0uZtym-EZILP3kuOOKkU,35626
9
12
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
10
13
  praisonaiagents/process/process.py,sha256=uSudOFI1ZlGM_nbT8qHD4iIP3y5Ygu8V-izLot2te70,26316
@@ -30,7 +33,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
30
33
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
31
34
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
32
35
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
33
- praisonaiagents-0.0.36.dist-info/METADATA,sha256=x6Ivo2TMN_tYww70m6DEMXtGwO3cmgc86B_SQMT0uO0,306
34
- praisonaiagents-0.0.36.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
35
- praisonaiagents-0.0.36.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
36
- praisonaiagents-0.0.36.dist-info/RECORD,,
36
+ praisonaiagents-0.0.37.dist-info/METADATA,sha256=PVUCq8U4iPXxXah6DsUdfak6fXW5HEmQUEOA229MbTk,664
37
+ praisonaiagents-0.0.37.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
38
+ praisonaiagents-0.0.37.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
39
+ praisonaiagents-0.0.37.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- Metadata-Version: 2.2
2
- Name: praisonaiagents
3
- Version: 0.0.36
4
- Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
- Author: Mervin Praison
6
- Requires-Dist: pydantic
7
- Requires-Dist: rich
8
- Requires-Dist: openai
9
- Provides-Extra: memory
10
- Requires-Dist: chromadb>=0.6.0; extra == "memory"