praisonaiagents 0.0.121__py3-none-any.whl → 0.0.123__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1274,27 +1274,40 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1274
1274
  messages.append({"role": "user", "content": reflection_prompt})
1275
1275
 
1276
1276
  try:
1277
- # Check if OpenAI client is available
1278
- if self._openai_client is None:
1279
- # For custom LLMs, self-reflection with structured output is not supported
1280
- if self.verbose:
1281
- display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
1282
- # Return the original response without reflection
1283
- self.chat_history.append({"role": "user", "content": prompt})
1284
- self.chat_history.append({"role": "assistant", "content": response_text})
1285
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1286
- if self.verbose and not self._using_custom_llm:
1287
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1288
- return response_text
1289
-
1290
- reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1291
- model=self.reflect_llm if self.reflect_llm else self.llm,
1292
- messages=messages,
1293
- temperature=temperature,
1294
- response_format=ReflectionOutput
1295
- )
1277
+ # Check if we're using a custom LLM (like Gemini)
1278
+ if self._using_custom_llm or self._openai_client is None:
1279
+ # For custom LLMs, we need to handle reflection differently
1280
+ # Use non-streaming to get complete JSON response
1281
+ reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
1282
+
1283
+ if not reflection_response or not reflection_response.choices:
1284
+ raise Exception("No response from reflection request")
1285
+
1286
+ reflection_text = reflection_response.choices[0].message.content.strip()
1287
+
1288
+ # Clean the JSON output
1289
+ cleaned_json = self.clean_json_output(reflection_text)
1290
+
1291
+ # Parse the JSON manually
1292
+ reflection_data = json.loads(cleaned_json)
1293
+
1294
+ # Create a reflection output object manually
1295
+ class CustomReflectionOutput:
1296
+ def __init__(self, data):
1297
+ self.reflection = data.get('reflection', '')
1298
+ self.satisfactory = data.get('satisfactory', 'no').lower()
1299
+
1300
+ reflection_output = CustomReflectionOutput(reflection_data)
1301
+ else:
1302
+ # Use OpenAI's structured output for OpenAI models
1303
+ reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1304
+ model=self.reflect_llm if self.reflect_llm else self.llm,
1305
+ messages=messages,
1306
+ temperature=temperature,
1307
+ response_format=ReflectionOutput
1308
+ )
1296
1309
 
1297
- reflection_output = reflection_response.choices[0].message.parsed
1310
+ reflection_output = reflection_response.choices[0].message.parsed
1298
1311
 
1299
1312
  if self.verbose:
1300
1313
  display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
@@ -1337,7 +1350,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1337
1350
 
1338
1351
  logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
1339
1352
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1340
- response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
1353
+ # For custom LLMs during reflection, always use non-streaming to ensure complete responses
1354
+ use_stream = self.stream if not self._using_custom_llm else False
1355
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
1341
1356
  response_text = response.choices[0].message.content.strip()
1342
1357
  reflection_count += 1
1343
1358
  continue # Continue the loop for more reflections
@@ -214,9 +214,37 @@ Tools: {', '.join(agent_tools)}"""
214
214
 
215
215
  return assigned_tools
216
216
 
217
+ def _validate_config(self, config: AutoAgentsConfig) -> tuple[bool, str]:
218
+ """
219
+ Validate that the configuration has proper TaskConfig objects.
220
+
221
+ Returns:
222
+ Tuple of (is_valid, error_message)
223
+ """
224
+ for agent_idx, agent in enumerate(config.agents):
225
+ if not hasattr(agent, 'tasks') or not agent.tasks:
226
+ return False, f"Agent '{agent.name}' has no tasks defined"
227
+
228
+ for task_idx, task in enumerate(agent.tasks):
229
+ # Check if task is a proper TaskConfig instance
230
+ if not isinstance(task, TaskConfig):
231
+ return False, f"Task at index {task_idx} for agent '{agent.name}' is not a proper TaskConfig object"
232
+
233
+ # Check required fields
234
+ if not task.name:
235
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no name"
236
+ if not task.description:
237
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no description"
238
+ if not task.expected_output:
239
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no expected_output"
240
+ if task.tools is None:
241
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no tools field"
242
+
243
+ return True, ""
244
+
217
245
  def _generate_config(self) -> AutoAgentsConfig:
218
- """Generate the configuration for agents and tasks"""
219
- prompt = f"""
246
+ """Generate the configuration for agents and tasks with retry logic"""
247
+ base_prompt = f"""
220
248
  Generate a configuration for AI agents to accomplish this task: "{self.instructions}"
221
249
 
222
250
  The configuration should include:
@@ -237,84 +265,161 @@ Requirements:
237
265
  4. The process type should match the task requirements
238
266
  5. Generate maximum {self.max_agents} agents to handle this task efficiently
239
267
 
240
- Return the configuration in a structured JSON format matching the AutoAgentsConfig schema.
268
+ Return the configuration in a structured JSON format matching this exact schema:
269
+ {{
270
+ "main_instruction": "Overall goal description",
271
+ "process_type": "sequential|workflow|hierarchical",
272
+ "agents": [
273
+ {{
274
+ "name": "Agent Name",
275
+ "role": "Agent Role",
276
+ "goal": "Agent Goal",
277
+ "backstory": "Agent Backstory",
278
+ "tools": ["tool1", "tool2"],
279
+ "tasks": [
280
+ {{
281
+ "name": "Task Name",
282
+ "description": "Detailed task description",
283
+ "expected_output": "What the task should produce",
284
+ "tools": ["tool1", "tool2"]
285
+ }}
286
+ ]
287
+ }}
288
+ ]
289
+ }}
290
+
291
+ IMPORTANT: Each task MUST be an object with name, description, expected_output, and tools fields, NOT a simple string.
241
292
  """
242
293
 
243
- try:
244
- # Try to use OpenAI's structured output if available
294
+ max_retries = 3
295
+ last_response = None
296
+ last_error = None
297
+
298
+ for attempt in range(max_retries):
299
+ # Initialize variables for this attempt
245
300
  use_openai_structured = False
246
301
  client = None
247
302
 
303
+ # Prepare prompt for this attempt
304
+ if attempt > 0 and last_response and last_error:
305
+ # On retry, include the previous response and error
306
+ prompt = f"""{base_prompt}
307
+
308
+ PREVIOUS ATTEMPT FAILED!
309
+ Your previous response was:
310
+ ```json
311
+ {last_response}
312
+ ```
313
+
314
+ Error: {last_error}
315
+
316
+ REMEMBER: Tasks MUST be objects with the following structure:
317
+ {{
318
+ "name": "Task Name",
319
+ "description": "Task Description",
320
+ "expected_output": "Expected Output",
321
+ "tools": ["tool1", "tool2"]
322
+ }}
323
+
324
+ DO NOT use strings for tasks. Each task MUST be a complete object with all four fields."""
325
+ else:
326
+ prompt = base_prompt
327
+
248
328
  try:
249
329
  # Check if we have OpenAI API and the model supports structured output
250
- if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
251
- # Create a new client instance if custom parameters are provided
252
- if self.api_key or self.base_url:
253
- client = OpenAIClient(api_key=self.api_key, base_url=self.base_url)
254
- else:
255
- client = get_openai_client()
330
+ from ..llm import supports_structured_outputs
331
+ if self.llm and supports_structured_outputs(self.llm):
332
+ client = get_openai_client()
256
333
  use_openai_structured = True
257
334
  except:
258
335
  # If OpenAI client is not available, we'll use the LLM class
259
336
  pass
260
337
 
261
- if use_openai_structured and client:
262
- # Use OpenAI's structured output for OpenAI models (backward compatibility)
263
- config = client.parse_structured_output(
264
- messages=[
265
- {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
266
- {"role": "user", "content": prompt}
267
- ],
268
- response_format=AutoAgentsConfig,
269
- model=self.llm
270
- )
271
- else:
272
- # Use LLM class for all other providers (Gemini, Anthropic, etc.)
273
- llm_instance = LLM(
274
- model=self.llm,
275
- base_url=self.base_url,
276
- api_key=self.api_key
277
- )
338
+ try:
339
+ if use_openai_structured and client:
340
+ # Use OpenAI's structured output for OpenAI models (backward compatibility)
341
+ config = client.parse_structured_output(
342
+ messages=[
343
+ {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
344
+ {"role": "user", "content": prompt}
345
+ ],
346
+ response_format=AutoAgentsConfig,
347
+ model=self.llm
348
+ )
349
+ # Store the response for potential retry
350
+ last_response = json.dumps(config.model_dump(), indent=2)
351
+ else:
352
+ # Use LLM class for all other providers (Gemini, Anthropic, etc.)
353
+ llm_instance = LLM(
354
+ model=self.llm,
355
+ base_url=self.base_url,
356
+ api_key=self.api_key
357
+ )
358
+
359
+ response_text = llm_instance.get_response(
360
+ prompt=prompt,
361
+ system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
362
+ output_pydantic=AutoAgentsConfig,
363
+ temperature=0.7,
364
+ stream=False,
365
+ verbose=False
366
+ )
367
+
368
+ # Store the raw response for potential retry
369
+ last_response = response_text
370
+
371
+ # Parse the JSON response
372
+ try:
373
+ # First try to parse as is
374
+ config_dict = json.loads(response_text)
375
+ config = AutoAgentsConfig(**config_dict)
376
+ except json.JSONDecodeError:
377
+ # If that fails, try to extract JSON from the response
378
+ # Handle cases where the model might wrap JSON in markdown blocks
379
+ cleaned_response = response_text.strip()
380
+ if cleaned_response.startswith("```json"):
381
+ cleaned_response = cleaned_response[7:]
382
+ if cleaned_response.startswith("```"):
383
+ cleaned_response = cleaned_response[3:]
384
+ if cleaned_response.endswith("```"):
385
+ cleaned_response = cleaned_response[:-3]
386
+ cleaned_response = cleaned_response.strip()
387
+
388
+ config_dict = json.loads(cleaned_response)
389
+ config = AutoAgentsConfig(**config_dict)
278
390
 
279
- response_text = llm_instance.response(
280
- prompt=prompt,
281
- system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
282
- output_pydantic=AutoAgentsConfig,
283
- temperature=0.7,
284
- stream=False,
285
- verbose=False
286
- )
391
+ # Validate the configuration
392
+ is_valid, error_msg = self._validate_config(config)
393
+ if not is_valid:
394
+ last_error = error_msg
395
+ if attempt < max_retries - 1:
396
+ logging.warning(f"Configuration validation failed (attempt {attempt + 1}/{max_retries}): {error_msg}")
397
+ continue
398
+ else:
399
+ raise ValueError(f"Configuration validation failed after {max_retries} attempts: {error_msg}")
287
400
 
288
- # Parse the JSON response
289
- try:
290
- # First try to parse as is
291
- config_dict = json.loads(response_text)
292
- config = AutoAgentsConfig(**config_dict)
293
- except json.JSONDecodeError:
294
- # If that fails, try to extract JSON from the response
295
- # Handle cases where the model might wrap JSON in markdown blocks
296
- cleaned_response = response_text.strip()
297
- if cleaned_response.startswith("```json"):
298
- cleaned_response = cleaned_response[7:]
299
- if cleaned_response.startswith("```"):
300
- cleaned_response = cleaned_response[3:]
301
- if cleaned_response.endswith("```"):
302
- cleaned_response = cleaned_response[:-3]
303
- cleaned_response = cleaned_response.strip()
304
-
305
- config_dict = json.loads(cleaned_response)
306
- config = AutoAgentsConfig(**config_dict)
307
-
308
- # Ensure we have exactly max_agents number of agents
309
- if len(config.agents) > self.max_agents:
310
- config.agents = config.agents[:self.max_agents]
311
- elif len(config.agents) < self.max_agents:
312
- logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
313
-
314
- return config
315
- except Exception as e:
316
- logging.error(f"Error generating configuration: {e}")
317
- raise
401
+ # Ensure we have exactly max_agents number of agents
402
+ if len(config.agents) > self.max_agents:
403
+ config.agents = config.agents[:self.max_agents]
404
+ elif len(config.agents) < self.max_agents:
405
+ logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
406
+
407
+ return config
408
+
409
+ except ValueError as e:
410
+ # Re-raise validation errors
411
+ raise
412
+ except Exception as e:
413
+ last_error = str(e)
414
+ if attempt < max_retries - 1:
415
+ logging.warning(f"Error generating configuration (attempt {attempt + 1}/{max_retries}): {e}")
416
+ continue
417
+ else:
418
+ logging.error(f"Error generating configuration after {max_retries} attempts: {e}")
419
+ raise
420
+
421
+ # This should never be reached due to the raise statements above
422
+ raise RuntimeError(f"Failed to generate valid configuration after {max_retries} attempts")
318
423
 
319
424
  def _create_agents_and_tasks(self, config: AutoAgentsConfig) -> Tuple[List[Agent], List[Task]]:
320
425
  """Create agents and tasks from configuration"""
@@ -32,6 +32,10 @@ from .openai_client import (
32
32
  ToolCall,
33
33
  process_stream_chunks
34
34
  )
35
+ from .model_capabilities import (
36
+ supports_structured_outputs,
37
+ supports_streaming_with_tools
38
+ )
35
39
 
36
40
  # Ensure telemetry is disabled after import as well
37
41
  try:
@@ -52,5 +56,7 @@ __all__ = [
52
56
  "CompletionUsage",
53
57
  "ChatCompletion",
54
58
  "ToolCall",
55
- "process_stream_chunks"
59
+ "process_stream_chunks",
60
+ "supports_structured_outputs",
61
+ "supports_streaming_with_tools"
56
62
  ]
@@ -386,8 +386,11 @@ class LLM:
386
386
  if self._is_ollama_provider():
387
387
  return False
388
388
 
389
- # OpenAI models support streaming with tools
390
- if any(self.model.startswith(prefix) for prefix in ["gpt-", "o1-", "o3-"]):
389
+ # Import the capability check function
390
+ from .model_capabilities import supports_streaming_with_tools
391
+
392
+ # Check if this model supports streaming with tools
393
+ if supports_streaming_with_tools(self.model):
391
394
  return True
392
395
 
393
396
  # Anthropic Claude models support streaming with tools
@@ -0,0 +1,90 @@
1
+ """
2
+ Model capabilities configuration for different LLM providers.
3
+ This module defines which models support specific features like structured outputs.
4
+ """
5
+
6
+ # Models that support OpenAI-style structured outputs (response_format with Pydantic models)
7
+ MODELS_SUPPORTING_STRUCTURED_OUTPUTS = {
8
+ # OpenAI models
9
+ "gpt-4o",
10
+ "gpt-4o-mini",
11
+ "gpt-4-turbo",
12
+ "gpt-4-turbo-preview",
13
+ "gpt-4-turbo-2024-04-09",
14
+ "gpt-4-1106-preview",
15
+ "gpt-4-0125-preview",
16
+ "gpt-3.5-turbo",
17
+ "gpt-3.5-turbo-1106",
18
+ "gpt-3.5-turbo-0125",
19
+
20
+ # New/Future OpenAI models (as mentioned by user)
21
+ "codex-mini",
22
+ "o3-pro",
23
+ "gpt-4.5-preview",
24
+ "o3-mini",
25
+ "o1",
26
+ "o1-preview",
27
+ "o1-mini",
28
+ "gpt-4.1",
29
+ "gpt-4.1-nano",
30
+ "gpt-4.1-mini",
31
+ "o4-mini",
32
+ "o3",
33
+ }
34
+
35
+ # Models that explicitly DON'T support structured outputs
36
+ MODELS_NOT_SUPPORTING_STRUCTURED_OUTPUTS = {
37
+ # Audio preview models
38
+ "gpt-4o-audio-preview",
39
+ "gpt-4o-mini-audio-preview",
40
+
41
+ # Legacy o1 models (don't support system messages either)
42
+ "o1-preview-2024-09-12",
43
+ "o1-mini-2024-09-12",
44
+ }
45
+
46
+
47
+ def supports_structured_outputs(model_name: str) -> bool:
48
+ """
49
+ Check if a model supports OpenAI-style structured outputs.
50
+
51
+ Args:
52
+ model_name: The name of the model to check
53
+
54
+ Returns:
55
+ bool: True if the model supports structured outputs, False otherwise
56
+ """
57
+ if not model_name:
58
+ return False
59
+
60
+ # First check if it's explicitly in the NOT supporting list
61
+ if model_name in MODELS_NOT_SUPPORTING_STRUCTURED_OUTPUTS:
62
+ return False
63
+
64
+ # Then check if it's in the supporting list
65
+ if model_name in MODELS_SUPPORTING_STRUCTURED_OUTPUTS:
66
+ return True
67
+
68
+ # For models with version suffixes, check the base model name
69
+ base_model = model_name.split('-2024-')[0].split('-2025-')[0]
70
+ if base_model in MODELS_SUPPORTING_STRUCTURED_OUTPUTS:
71
+ return True
72
+
73
+ # Default to False for unknown models
74
+ return False
75
+
76
+
77
+ def supports_streaming_with_tools(model_name: str) -> bool:
78
+ """
79
+ Check if a model supports streaming when tools are provided.
80
+ Most models that support structured outputs also support streaming with tools.
81
+
82
+ Args:
83
+ model_name: The name of the model to check
84
+
85
+ Returns:
86
+ bool: True if the model supports streaming with tools, False otherwise
87
+ """
88
+ # For now, use the same logic as structured outputs
89
+ # In the future, this could be a separate list if needed
90
+ return supports_structured_outputs(model_name)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.121
3
+ Version: 0.0.123
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -3,20 +3,21 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
3
3
  praisonaiagents/main.py,sha256=bamnEu5PaekloGi52VqAFclm-HzjEVeKtWF0Zpdmfzs,15479
4
4
  praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
6
- praisonaiagents/agent/agent.py,sha256=_ROVyOTPBMB5Porv4YvZ4-kKWr4-tGMbSN7V8uDWZgk,109619
6
+ praisonaiagents/agent/agent.py,sha256=BZx0iCP4hHfKFlfGlkZtanBJDDBDZe54dhf4Oskhmhw,110427
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
10
10
  praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
11
- praisonaiagents/agents/autoagents.py,sha256=gLzNsYkvefY667p3xbbvgEBLu4VzEZeyh3a_3yxt1e8,16478
11
+ praisonaiagents/agents/autoagents.py,sha256=GHHCGjrKFIFkmz6bhiKqaGKWJCbHHcF_RSGmRnc5PlA,20766
12
12
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
13
13
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
14
14
  praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
15
15
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
- praisonaiagents/llm/__init__.py,sha256=6lTeQ8jWi1-KiwjCDCmkHo2e-bRLq2dP0s5iJWqjO3s,1421
19
- praisonaiagents/llm/llm.py,sha256=8cDahPVMPI882J0psA1cXreJGXvO33eSOpMNy7FLCS4,107383
18
+ praisonaiagents/llm/__init__.py,sha256=2rCx-Vfgg881XhVnOSDIt8mDab3Nj_BkHwDP4Bbbudc,1596
19
+ praisonaiagents/llm/llm.py,sha256=pP8mu9E4oWYFmjH4OJjvD0SlFz8AxpP2WUOuGN2oiL8,107486
20
+ praisonaiagents/llm/model_capabilities.py,sha256=poxOxATUOi9XPTx3v6BPnXvSfikWSA9NciWQVuPU7Zg,2586
20
21
  praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
22
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
22
23
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
@@ -53,7 +54,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
53
54
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
54
55
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
55
56
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
56
- praisonaiagents-0.0.121.dist-info/METADATA,sha256=okAbJt5iVUK3GgBI66uk_0sKZSSs2orSgKepbSpQb-8,1669
57
- praisonaiagents-0.0.121.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
- praisonaiagents-0.0.121.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
- praisonaiagents-0.0.121.dist-info/RECORD,,
57
+ praisonaiagents-0.0.123.dist-info/METADATA,sha256=gTnevKwNN3D6hq-AlVNWPcvyKZAGABLsHYbKkSj5YLQ,1669
58
+ praisonaiagents-0.0.123.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
+ praisonaiagents-0.0.123.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
60
+ praisonaiagents-0.0.123.dist-info/RECORD,,