praisonaiagents 0.0.59__py3-none-any.whl → 0.0.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,7 @@ Praison AI Agents - A package for hierarchical AI agent task execution
3
3
  """
4
4
 
5
5
  from .agent.agent import Agent
6
+ from .agent.image_agent import ImageAgent
6
7
  from .agents.agents import PraisonAIAgents
7
8
  from .task.task import Task
8
9
  from .tools.tools import Tools
@@ -30,6 +31,7 @@ Agents = PraisonAIAgents
30
31
 
31
32
  __all__ = [
32
33
  'Agent',
34
+ 'ImageAgent',
33
35
  'PraisonAIAgents',
34
36
  'Agents',
35
37
  'Tools',
@@ -1,4 +1,5 @@
1
1
  """Agent module for AI agents"""
2
2
  from .agent import Agent
3
+ from .image_agent import ImageAgent
3
4
 
4
- __all__ = ['Agent']
5
+ __all__ = ['Agent', 'ImageAgent']
@@ -16,7 +16,6 @@ from ..main import (
16
16
  display_self_reflection,
17
17
  ReflectionOutput,
18
18
  client,
19
- error_logs,
20
19
  adisplay_instruction
21
20
  )
22
21
  import inspect
@@ -0,0 +1,213 @@
1
+ """
2
+ ImageAgent - A specialized agent class for generating images using AI models.
3
+
4
+ This class extends the base Agent class to provide specific functionality for image generation,
5
+ including support for different image models, sizes, and quality settings.
6
+ """
7
+
8
+ from typing import Optional, Any, Dict, Union, List
9
+ from ..agent.agent import Agent
10
+ from pydantic import BaseModel, Field
11
+ import logging
12
+ import warnings
13
+
14
+ # Filter out Pydantic warning about fields
15
+ warnings.filterwarnings("ignore", "Valid config keys have changed in V2", UserWarning)
16
+
17
+ from rich.console import Console
18
+ from rich.panel import Panel
19
+ from rich.progress import Progress, SpinnerColumn, TextColumn
20
+
21
+ class ImageGenerationConfig(BaseModel):
22
+ """Configuration for image generation settings."""
23
+ style: str = Field(default="natural", description="Style of the generated image")
24
+ response_format: str = Field(default="url", description="Format of the response (url or b64_json)")
25
+ timeout: int = Field(default=600, description="Timeout in seconds for the API call")
26
+ api_base: Optional[str] = Field(default=None, description="Optional API base URL")
27
+ api_key: Optional[str] = Field(default=None, description="Optional API key")
28
+ api_version: Optional[str] = Field(default=None, description="Optional API version (required for Azure dall-e-3)")
29
+
30
+ class ImageAgent(Agent):
31
+ """
32
+ A specialized agent for generating images using AI models.
33
+
34
+ This agent extends the base Agent class with specific functionality for image generation,
35
+ including support for different models, sizes, and quality settings.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ name: Optional[str] = None,
41
+ role: Optional[str] = None,
42
+ goal: Optional[str] = None,
43
+ backstory: Optional[str] = None,
44
+ instructions: Optional[str] = None,
45
+ llm: Optional[Union[str, Any]] = None,
46
+ style: str = "natural",
47
+ response_format: str = "url",
48
+ timeout: int = 600,
49
+ api_base: Optional[str] = None,
50
+ api_key: Optional[str] = None,
51
+ api_version: Optional[str] = None,
52
+ verbose: Union[bool, int] = True,
53
+ **kwargs
54
+ ):
55
+ """Initialize ImageAgent with parameters."""
56
+ # Set default role and goal if not provided
57
+ role = role or "Image Generation Assistant"
58
+ goal = goal or "Generate high-quality images based on text descriptions"
59
+ backstory = backstory or "I am an AI assistant specialized in generating images from textual descriptions"
60
+
61
+ # Initialize the base agent
62
+ super().__init__(
63
+ name=name,
64
+ role=role,
65
+ goal=goal,
66
+ backstory=backstory,
67
+ instructions=instructions,
68
+ llm=llm,
69
+ verbose=verbose,
70
+ **kwargs
71
+ )
72
+
73
+ # Store image generation configuration
74
+ self.image_config = ImageGenerationConfig(
75
+ style=style,
76
+ response_format=response_format,
77
+ timeout=timeout,
78
+ api_base=api_base,
79
+ api_key=api_key,
80
+ api_version=api_version
81
+ )
82
+
83
+ # Lazy load litellm
84
+ self._litellm = None
85
+
86
+ # Configure logging based on verbose level
87
+ self._configure_logging(verbose)
88
+
89
+ def _configure_logging(self, verbose: Union[bool, int]) -> None:
90
+ """Configure logging levels based on verbose setting."""
91
+ # Only suppress logs if not in debug mode
92
+ if not isinstance(verbose, bool) and verbose >= 10:
93
+ # Enable detailed debug logging
94
+ logging.getLogger("asyncio").setLevel(logging.DEBUG)
95
+ logging.getLogger("selector_events").setLevel(logging.DEBUG)
96
+ logging.getLogger("litellm.utils").setLevel(logging.DEBUG)
97
+ logging.getLogger("litellm.main").setLevel(logging.DEBUG)
98
+ if hasattr(self, 'litellm'):
99
+ self.litellm.suppress_debug_messages = False
100
+ self.litellm.set_verbose = True
101
+ # Don't filter warnings in debug mode
102
+ warnings.resetwarnings()
103
+ else:
104
+ # Suppress debug logging for normal operation
105
+ logging.getLogger("asyncio").setLevel(logging.WARNING)
106
+ logging.getLogger("selector_events").setLevel(logging.WARNING)
107
+ logging.getLogger("litellm.utils").setLevel(logging.WARNING)
108
+ logging.getLogger("litellm.main").setLevel(logging.WARNING)
109
+ logging.getLogger("httpx").setLevel(logging.WARNING)
110
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
111
+ if hasattr(self, 'litellm'):
112
+ self.litellm.suppress_debug_messages = True
113
+ self.litellm._logging._disable_debugging()
114
+ # Suppress all warnings including Pydantic's
115
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
116
+ warnings.filterwarnings("ignore", category=UserWarning)
117
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
118
+
119
+ @property
120
+ def litellm(self):
121
+ """Lazy load litellm module when needed."""
122
+ if self._litellm is None:
123
+ try:
124
+ import litellm
125
+ from litellm import image_generation
126
+
127
+ # Configure litellm to disable success handler logs
128
+ litellm.success_callback = []
129
+ litellm._logging._disable_debugging()
130
+
131
+ self._litellm = image_generation
132
+ # Configure logging after litellm is loaded
133
+ self._configure_logging(self.verbose)
134
+ except ImportError:
135
+ raise ImportError(
136
+ "litellm is required for image generation. "
137
+ "Please install it with: pip install litellm"
138
+ )
139
+ return self._litellm
140
+
141
+ def generate_image(self, prompt: str, **kwargs) -> Dict[str, Any]:
142
+ """Generate an image based on the provided prompt."""
143
+ # Merge default config with any provided kwargs
144
+ config = self.image_config.dict(exclude_none=True)
145
+ config.update(kwargs)
146
+
147
+ # Use llm parameter as the model
148
+ config['model'] = self.llm
149
+
150
+ with Progress(
151
+ SpinnerColumn(),
152
+ TextColumn("[progress.description]{task.description}"),
153
+ transient=True
154
+ ) as progress:
155
+ try:
156
+ # Add a task for image generation
157
+ task = progress.add_task(f"[cyan]Generating image with {self.llm}...", total=None)
158
+
159
+ # Use litellm's image generation
160
+ response = self.litellm(
161
+ prompt=prompt,
162
+ **config
163
+ )
164
+
165
+ # Mark task as complete
166
+ progress.update(task, completed=True)
167
+ return response
168
+
169
+ except Exception as e:
170
+ error_msg = f"Error generating image: {str(e)}"
171
+ if self.verbose:
172
+ self.console.print(f"[red]{error_msg}[/red]")
173
+ logging.error(error_msg)
174
+ raise
175
+
176
+ async def agenerate_image(self, prompt: str, **kwargs) -> Dict[str, Any]:
177
+ """Async wrapper for generate_image."""
178
+ return self.generate_image(prompt, **kwargs)
179
+
180
+ def chat(self, prompt: str, **kwargs) -> Dict[str, Any]:
181
+ """Generate an image from the prompt."""
182
+ try:
183
+ result = self.generate_image(prompt, **kwargs)
184
+ if self.verbose:
185
+ self.console.print(f"[green]Successfully generated image from prompt[/green]")
186
+ return result
187
+ except Exception as e:
188
+ error_msg = f"Failed to generate image: {str(e)}"
189
+ if self.verbose:
190
+ self.console.print(f"[red]{error_msg}[/red]")
191
+ return {"error": str(e)}
192
+
193
+ async def achat(
194
+ self,
195
+ prompt: str,
196
+ temperature: float = 0.2,
197
+ tools: Optional[List[Any]] = None,
198
+ output_json: Optional[str] = None,
199
+ output_pydantic: Optional[Any] = None,
200
+ reasoning_steps: bool = False,
201
+ **kwargs
202
+ ) -> Union[str, Dict[str, Any]]:
203
+ """Async chat method for image generation."""
204
+ try:
205
+ image_result = await self.agenerate_image(prompt, **kwargs)
206
+ if self.verbose:
207
+ self.console.print(f"[green]Successfully generated image from prompt[/green]")
208
+ return image_result
209
+ except Exception as e:
210
+ error_msg = f"Failed to generate image: {str(e)}"
211
+ if self.verbose:
212
+ self.console.print(f"[red]{error_msg}[/red]")
213
+ return {"error": str(e)}
@@ -8,10 +8,10 @@ It automatically handles agent creation, task setup, and execution flow.
8
8
  from .agents import PraisonAIAgents
9
9
  from ..agent.agent import Agent
10
10
  from ..task.task import Task
11
- from typing import List, Any, Optional, Dict, Union
11
+ from typing import List, Any, Optional, Dict
12
12
  import logging
13
13
  import os
14
- from pydantic import BaseModel, Field
14
+ from pydantic import BaseModel
15
15
  from ..main import display_instruction, display_tool_call, display_interaction, client
16
16
 
17
17
  # Define Pydantic models for structured output
@@ -1040,4 +1040,174 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1040
1040
  if type(event) in event_types:
1041
1041
  litellm._async_success_callback.remove(event)
1042
1042
 
1043
- litellm.callbacks = events
1043
+ litellm.callbacks = events
1044
+
1045
+ # Response without tool calls
1046
+ def response(
1047
+ self,
1048
+ prompt: Union[str, List[Dict]],
1049
+ system_prompt: Optional[str] = None,
1050
+ temperature: float = 0.2,
1051
+ stream: bool = True,
1052
+ verbose: bool = True,
1053
+ markdown: bool = True,
1054
+ console: Optional[Console] = None,
1055
+ **kwargs
1056
+ ) -> str:
1057
+ """Simple function to get model response without tool calls or complex features"""
1058
+ try:
1059
+ import litellm
1060
+ import logging
1061
+ logger = logging.getLogger(__name__)
1062
+
1063
+ litellm.set_verbose = False
1064
+ start_time = time.time()
1065
+
1066
+ logger.debug("Using synchronous response function")
1067
+
1068
+ # Build messages list
1069
+ messages = []
1070
+ if system_prompt:
1071
+ messages.append({"role": "system", "content": system_prompt})
1072
+
1073
+ # Add prompt to messages
1074
+ if isinstance(prompt, list):
1075
+ messages.append({"role": "user", "content": prompt})
1076
+ else:
1077
+ messages.append({"role": "user", "content": prompt})
1078
+
1079
+ # Get response from LiteLLM
1080
+ if stream:
1081
+ response_text = ""
1082
+ if verbose:
1083
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1084
+ for chunk in litellm.completion(
1085
+ model=self.model,
1086
+ messages=messages,
1087
+ temperature=temperature,
1088
+ stream=True,
1089
+ **kwargs
1090
+ ):
1091
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1092
+ content = chunk.choices[0].delta.content
1093
+ response_text += content
1094
+ live.update(display_generating(response_text, start_time))
1095
+ else:
1096
+ for chunk in litellm.completion(
1097
+ model=self.model,
1098
+ messages=messages,
1099
+ temperature=temperature,
1100
+ stream=True,
1101
+ **kwargs
1102
+ ):
1103
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1104
+ response_text += chunk.choices[0].delta.content
1105
+ else:
1106
+ response = litellm.completion(
1107
+ model=self.model,
1108
+ messages=messages,
1109
+ temperature=temperature,
1110
+ stream=False,
1111
+ **kwargs
1112
+ )
1113
+ response_text = response.choices[0].message.content.strip()
1114
+
1115
+ if verbose:
1116
+ display_interaction(
1117
+ prompt if isinstance(prompt, str) else prompt[0].get("text", ""),
1118
+ response_text,
1119
+ markdown=markdown,
1120
+ generation_time=time.time() - start_time,
1121
+ console=console or self.console
1122
+ )
1123
+
1124
+ return response_text.strip()
1125
+
1126
+ except Exception as error:
1127
+ display_error(f"Error in response: {str(error)}")
1128
+ raise
1129
+
1130
+ # Async version of response function. Response without tool calls
1131
+ async def aresponse(
1132
+ self,
1133
+ prompt: Union[str, List[Dict]],
1134
+ system_prompt: Optional[str] = None,
1135
+ temperature: float = 0.2,
1136
+ stream: bool = True,
1137
+ verbose: bool = True,
1138
+ markdown: bool = True,
1139
+ console: Optional[Console] = None,
1140
+ **kwargs
1141
+ ) -> str:
1142
+ """Async version of response function"""
1143
+ try:
1144
+ import litellm
1145
+ import logging
1146
+ logger = logging.getLogger(__name__)
1147
+
1148
+ litellm.set_verbose = False
1149
+ start_time = time.time()
1150
+
1151
+ logger.debug("Using asynchronous response function")
1152
+
1153
+ # Build messages list
1154
+ messages = []
1155
+ if system_prompt:
1156
+ messages.append({"role": "system", "content": system_prompt})
1157
+
1158
+ # Add prompt to messages
1159
+ if isinstance(prompt, list):
1160
+ messages.append({"role": "user", "content": prompt})
1161
+ else:
1162
+ messages.append({"role": "user", "content": prompt})
1163
+
1164
+ # Get response from LiteLLM
1165
+ if stream:
1166
+ response_text = ""
1167
+ if verbose:
1168
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1169
+ async for chunk in await litellm.acompletion(
1170
+ model=self.model,
1171
+ messages=messages,
1172
+ temperature=temperature,
1173
+ stream=True,
1174
+ **kwargs
1175
+ ):
1176
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1177
+ content = chunk.choices[0].delta.content
1178
+ response_text += content
1179
+ live.update(display_generating(response_text, start_time))
1180
+ else:
1181
+ async for chunk in await litellm.acompletion(
1182
+ model=self.model,
1183
+ messages=messages,
1184
+ temperature=temperature,
1185
+ stream=True,
1186
+ **kwargs
1187
+ ):
1188
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1189
+ response_text += chunk.choices[0].delta.content
1190
+ else:
1191
+ response = await litellm.acompletion(
1192
+ model=self.model,
1193
+ messages=messages,
1194
+ temperature=temperature,
1195
+ stream=False,
1196
+ **kwargs
1197
+ )
1198
+ response_text = response.choices[0].message.content.strip()
1199
+
1200
+ if verbose:
1201
+ display_interaction(
1202
+ prompt if isinstance(prompt, str) else prompt[0].get("text", ""),
1203
+ response_text,
1204
+ markdown=markdown,
1205
+ generation_time=time.time() - start_time,
1206
+ console=console or self.console
1207
+ )
1208
+
1209
+ return response_text.strip()
1210
+
1211
+ except Exception as error:
1212
+ display_error(f"Error in response_async: {str(error)}")
1213
+ raise
@@ -1,8 +1,16 @@
1
1
  from typing import Dict, Optional, Union, Any
2
2
  import json
3
3
  from datetime import datetime
4
- from openai import OpenAI
5
- from pydantic import BaseModel
4
+ import os
5
+ import logging
6
+
7
+ # Setup logging based on environment variable
8
+ log_level = os.getenv('LOGLEVEL', 'INFO').upper()
9
+ logging.basicConfig(
10
+ level=log_level,
11
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
12
+ )
13
+ logger = logging.getLogger(__name__)
6
14
 
7
15
  # Lazy loader for LLM
8
16
  def get_llm():
@@ -21,16 +29,24 @@ class GenerateCOT:
21
29
  qa_pairs: Optional[Dict[str, str]] = None,
22
30
  model: str = "gpt-4o-mini",
23
31
  api_key: Optional[str] = None,
24
- max_attempts: int = 3
32
+ max_attempts: int = 3,
33
+ verbose: bool = True,
34
+ temperature: float = 0.5
25
35
  ):
26
36
  self.qa_pairs = qa_pairs or {}
27
37
  self.max_attempts = max_attempts
28
38
  self.solutions = {}
29
39
  self.llm = get_llm()(model=model) # Get LLM class and instantiate
30
40
  self.model = model
31
-
41
+ self.temperature = temperature
42
+ self.verbose = verbose
43
+ logger.debug(f"Initialized GenerateCOT with model={model}, max_attempts={max_attempts}")
44
+
32
45
  def _ask_ai(self, prompt: str) -> str:
33
- return self.llm.get_response(prompt, temperature=0.7)
46
+ logger.debug(f"Sending prompt to LLM: {prompt[:100]}...")
47
+ response = self.llm.response(prompt, temperature=self.temperature)
48
+ logger.debug(f"Received response: {response[:100]}...")
49
+ return response
34
50
 
35
51
  def _build_solution_prompt(self, question: str, context: str) -> str:
36
52
  return f"""
@@ -106,7 +122,7 @@ class GenerateCOT:
106
122
  try:
107
123
  score = float(self._ask_ai(prompt))
108
124
  return min(max(score, 0), 1)
109
- except:
125
+ except ValueError:
110
126
  return 0.0
111
127
 
112
128
  def cot_run(self, question: str) -> str:
@@ -245,23 +261,33 @@ class GenerateCOT:
245
261
 
246
262
  def cot_run_dict(self, question: str) -> dict:
247
263
  """Uses the dictionary-based solution approach, storing the final solution in self.solutions."""
264
+ logger.debug(f"Starting cot_run_dict for question: {question}")
265
+
248
266
  solution = self.cot_generate_dict(question)
267
+ logger.debug(f"Initial solution generated: {str(solution)[:100]}...")
268
+
249
269
  if self.cot_check(question, solution["final_answer"]):
270
+ logger.debug("Initial solution passed verification")
250
271
  self.solutions[question] = solution
251
272
  return solution
252
273
 
274
+ logger.debug("Initial solution failed verification, attempting improvement")
253
275
  improved = self.cot_improve_dict(question, solution["thought_process"])
254
276
  if self.cot_check(question, improved["final_answer"]):
277
+ logger.debug("Improved solution passed verification")
255
278
  self.solutions[question] = improved
256
279
  return improved
257
280
 
281
+ logger.debug("Checking for errors in improved solution")
258
282
  error_pos = self.cot_find_error(question, improved["thought_process"])
259
283
  if error_pos != -1:
284
+ logger.debug(f"Found error at position {error_pos}, generating final solution")
260
285
  partial_solution = '. '.join(improved["thought_process"].split('. ')[:error_pos]) + '.'
261
286
  final = self.cot_generate_dict(question, partial_solution)
262
287
  self.solutions[question] = final
263
288
  return final
264
289
 
290
+ logger.debug("Using improved solution as final result")
265
291
  self.solutions[question] = improved
266
292
  return improved
267
293
 
@@ -308,6 +334,7 @@ class GenerateCOT:
308
334
  ) -> Optional[str]:
309
335
  """Export solutions in CSV format."""
310
336
  try:
337
+ import csv
311
338
  self._is_qa_pairs(qa_pairs) # Validate format
312
339
  if qa_pairs:
313
340
  self.qa_pairs.update(qa_pairs)
@@ -332,29 +359,30 @@ class GenerateCOT:
332
359
  answer: str,
333
360
  filepath: str = 'dataset.csv'
334
361
  ) -> Optional[str]:
335
- """
336
- Save a single question-answer pair with chain of thought to CSV file.
337
- Creates file with headers if it doesn't exist.
338
- """
362
+ """Save a single question-answer pair with chain of thought to CSV file."""
363
+ logger.debug(f"Saving QA pair to {filepath}")
339
364
  try:
340
- # Add the current QA pair to self.qa_pairs
341
365
  self.qa_pairs[question] = answer
366
+ logger.debug("Added QA pair to internal dictionary")
342
367
 
343
- # Generate solution
344
368
  solution = self.cot_run_dict(question)
369
+ logger.debug("Generated solution for question")
345
370
 
346
371
  import csv
347
372
  import os
348
373
  file_exists = os.path.exists(filepath)
374
+ logger.debug(f"File exists: {file_exists}")
349
375
 
350
376
  with open(filepath, 'a', newline='', encoding='utf-8') as f:
351
377
  writer = csv.writer(f)
352
378
  if not file_exists:
379
+ logger.debug("Creating new file with headers")
353
380
  writer.writerow(['instruction', 'input', 'output'])
354
381
  writer.writerow([question, '', solution.get("thought_process", "")])
382
+ logger.debug("Wrote solution to file")
355
383
  return filepath
356
384
  except Exception as e:
357
- print(f"Error appending to CSV: {e}")
385
+ logger.error(f"Error saving to CSV: {str(e)}")
358
386
  return None
359
387
 
360
388
  # Rename existing function to indicate it handles qa_pairs dictionary
@@ -393,33 +421,36 @@ class GenerateCOT:
393
421
  private: bool = False
394
422
  ) -> str:
395
423
  """Upload generated solutions to HuggingFace datasets."""
424
+ logger.debug(f"Attempting to upload {filepath} to HuggingFace as {dataset_name}")
396
425
  try:
397
426
  from datasets import Dataset
398
- from huggingface_hub import HfApi, login
399
427
  import pandas as pd
400
428
 
401
- # Determine file type and load data
429
+ logger.debug(f"Loading data from {filepath}")
402
430
  if filepath.endswith('.csv'):
403
431
  data = pd.read_csv(filepath)
432
+ logger.debug(f"Loaded CSV with {len(data)} rows")
404
433
  elif filepath.endswith('.json'):
405
434
  data = pd.read_json(filepath)
435
+ logger.debug(f"Loaded JSON with {len(data)} records")
406
436
  else:
407
437
  raise ValueError("Only CSV and JSON files are supported")
408
438
 
409
- # Convert to HuggingFace dataset
439
+ logger.debug("Converting to HuggingFace dataset")
410
440
  dataset = Dataset.from_pandas(data)
411
441
 
412
- # Upload to HuggingFace
413
442
  repo_id = f"{huggingface_username}/{dataset_name}"
443
+ logger.debug(f"Pushing to hub: {repo_id}")
414
444
  dataset.push_to_hub(
415
445
  repo_id,
416
446
  private=private
417
447
  )
418
448
 
449
+ logger.debug("Upload completed successfully")
419
450
  return f"Dataset uploaded successfully to {repo_id}"
420
451
 
421
452
  except Exception as e:
422
- print(f"Error uploading to HuggingFace: {e}")
453
+ logger.error(f"Error uploading to HuggingFace: {str(e)}")
423
454
  return None
424
455
 
425
456
  # Usage example:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.59
3
+ Version: 0.0.61
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,15 +1,16 @@
1
- praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
1
+ praisonaiagents/__init__.py,sha256=frdIvimDY-kU9j-9yXV1z4NtXypfPvyvlnac5mgBCuQ,1288
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
- praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=VZPci7g_LAvTz7_rWvWAZ1JgCGKQc1vSfI6x0fqi2os,57598
3
+ praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
+ praisonaiagents/agent/agent.py,sha256=_v8WrWK1oP4OpPgp30nH4xbPyREnjOnRT1cyHUa2T9Q,57582
5
+ praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
5
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
7
  praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
7
- praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
8
+ praisonaiagents/agents/autoagents.py,sha256=olYDn--rlJp-SckxILqmREkkgNlzCgEEcAUzfMj-54E,13518
8
9
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
9
10
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
11
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=G2wKMwitWBJRS6nOq9W77zXtsxvJwsVwXFOKYcllY0E,51386
13
+ praisonaiagents/llm/llm.py,sha256=SYfiMOmduEOhwraewmXSydu6tNBb9n5uKRxnO9moGYM,58151
13
14
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
15
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
16
  praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
@@ -35,8 +36,8 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
36
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
37
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
38
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents/tools/train/data/generatecot.py,sha256=k1gZHtgY1poVp5kajhgs4S9a4-epdA8NyZfYTa34lQU,17651
39
- praisonaiagents-0.0.59.dist-info/METADATA,sha256=w6DYqKW5P9b2Rqu02j5Lt4-6K7f-InDrtUfV7fYu_FM,830
40
- praisonaiagents-0.0.59.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- praisonaiagents-0.0.59.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
- praisonaiagents-0.0.59.dist-info/RECORD,,
39
+ praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
40
+ praisonaiagents-0.0.61.dist-info/METADATA,sha256=I_883Gdgeer-wm8RVmBk-kAdkHQloL0ewZ96wqwW26c,830
41
+ praisonaiagents-0.0.61.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
42
+ praisonaiagents-0.0.61.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
43
+ praisonaiagents-0.0.61.dist-info/RECORD,,