praisonaiagents 0.0.122__py3-none-any.whl → 0.0.123__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -296,6 +296,10 @@ IMPORTANT: Each task MUST be an object with name, description, expected_output,
296
296
  last_error = None
297
297
 
298
298
  for attempt in range(max_retries):
299
+ # Initialize variables for this attempt
300
+ use_openai_structured = False
301
+ client = None
302
+
299
303
  # Prepare prompt for this attempt
300
304
  if attempt > 0 and last_response and last_error:
301
305
  # On retry, include the previous response and error
@@ -322,23 +326,16 @@ DO NOT use strings for tasks. Each task MUST be a complete object with all four
322
326
  prompt = base_prompt
323
327
 
324
328
  try:
325
- # Try to use OpenAI's structured output if available
326
- use_openai_structured = False
327
- client = None
328
-
329
- try:
330
- # Check if we have OpenAI API and the model supports structured output
331
- if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
332
- # Create a new client instance if custom parameters are provided
333
- if self.api_key or self.base_url:
334
- client = OpenAIClient(api_key=self.api_key, base_url=self.base_url)
335
- else:
336
- client = get_openai_client()
337
- use_openai_structured = True
338
- except:
339
- # If OpenAI client is not available, we'll use the LLM class
340
- pass
341
-
329
+ # Check if we have OpenAI API and the model supports structured output
330
+ from ..llm import supports_structured_outputs
331
+ if self.llm and supports_structured_outputs(self.llm):
332
+ client = get_openai_client()
333
+ use_openai_structured = True
334
+ except:
335
+ # If OpenAI client is not available, we'll use the LLM class
336
+ pass
337
+
338
+ try:
342
339
  if use_openai_structured and client:
343
340
  # Use OpenAI's structured output for OpenAI models (backward compatibility)
344
341
  config = client.parse_structured_output(
@@ -359,7 +356,7 @@ DO NOT use strings for tasks. Each task MUST be a complete object with all four
359
356
  api_key=self.api_key
360
357
  )
361
358
 
362
- response_text = llm_instance.response(
359
+ response_text = llm_instance.get_response(
363
360
  prompt=prompt,
364
361
  system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
365
362
  output_pydantic=AutoAgentsConfig,
@@ -32,6 +32,10 @@ from .openai_client import (
32
32
  ToolCall,
33
33
  process_stream_chunks
34
34
  )
35
+ from .model_capabilities import (
36
+ supports_structured_outputs,
37
+ supports_streaming_with_tools
38
+ )
35
39
 
36
40
  # Ensure telemetry is disabled after import as well
37
41
  try:
@@ -52,5 +56,7 @@ __all__ = [
52
56
  "CompletionUsage",
53
57
  "ChatCompletion",
54
58
  "ToolCall",
55
- "process_stream_chunks"
59
+ "process_stream_chunks",
60
+ "supports_structured_outputs",
61
+ "supports_streaming_with_tools"
56
62
  ]
@@ -386,8 +386,11 @@ class LLM:
386
386
  if self._is_ollama_provider():
387
387
  return False
388
388
 
389
- # OpenAI models support streaming with tools
390
- if any(self.model.startswith(prefix) for prefix in ["gpt-", "o1-", "o3-"]):
389
+ # Import the capability check function
390
+ from .model_capabilities import supports_streaming_with_tools
391
+
392
+ # Check if this model supports streaming with tools
393
+ if supports_streaming_with_tools(self.model):
391
394
  return True
392
395
 
393
396
  # Anthropic Claude models support streaming with tools
@@ -0,0 +1,90 @@
1
+ """
2
+ Model capabilities configuration for different LLM providers.
3
+ This module defines which models support specific features like structured outputs.
4
+ """
5
+
6
+ # Models that support OpenAI-style structured outputs (response_format with Pydantic models)
7
+ MODELS_SUPPORTING_STRUCTURED_OUTPUTS = {
8
+ # OpenAI models
9
+ "gpt-4o",
10
+ "gpt-4o-mini",
11
+ "gpt-4-turbo",
12
+ "gpt-4-turbo-preview",
13
+ "gpt-4-turbo-2024-04-09",
14
+ "gpt-4-1106-preview",
15
+ "gpt-4-0125-preview",
16
+ "gpt-3.5-turbo",
17
+ "gpt-3.5-turbo-1106",
18
+ "gpt-3.5-turbo-0125",
19
+
20
+ # New/Future OpenAI models (as mentioned by user)
21
+ "codex-mini",
22
+ "o3-pro",
23
+ "gpt-4.5-preview",
24
+ "o3-mini",
25
+ "o1",
26
+ "o1-preview",
27
+ "o1-mini",
28
+ "gpt-4.1",
29
+ "gpt-4.1-nano",
30
+ "gpt-4.1-mini",
31
+ "o4-mini",
32
+ "o3",
33
+ }
34
+
35
+ # Models that explicitly DON'T support structured outputs
36
+ MODELS_NOT_SUPPORTING_STRUCTURED_OUTPUTS = {
37
+ # Audio preview models
38
+ "gpt-4o-audio-preview",
39
+ "gpt-4o-mini-audio-preview",
40
+
41
+ # Legacy o1 models (don't support system messages either)
42
+ "o1-preview-2024-09-12",
43
+ "o1-mini-2024-09-12",
44
+ }
45
+
46
+
47
+ def supports_structured_outputs(model_name: str) -> bool:
48
+ """
49
+ Check if a model supports OpenAI-style structured outputs.
50
+
51
+ Args:
52
+ model_name: The name of the model to check
53
+
54
+ Returns:
55
+ bool: True if the model supports structured outputs, False otherwise
56
+ """
57
+ if not model_name:
58
+ return False
59
+
60
+ # First check if it's explicitly in the NOT supporting list
61
+ if model_name in MODELS_NOT_SUPPORTING_STRUCTURED_OUTPUTS:
62
+ return False
63
+
64
+ # Then check if it's in the supporting list
65
+ if model_name in MODELS_SUPPORTING_STRUCTURED_OUTPUTS:
66
+ return True
67
+
68
+ # For models with version suffixes, check the base model name
69
+ base_model = model_name.split('-2024-')[0].split('-2025-')[0]
70
+ if base_model in MODELS_SUPPORTING_STRUCTURED_OUTPUTS:
71
+ return True
72
+
73
+ # Default to False for unknown models
74
+ return False
75
+
76
+
77
+ def supports_streaming_with_tools(model_name: str) -> bool:
78
+ """
79
+ Check if a model supports streaming when tools are provided.
80
+ Most models that support structured outputs also support streaming with tools.
81
+
82
+ Args:
83
+ model_name: The name of the model to check
84
+
85
+ Returns:
86
+ bool: True if the model supports streaming with tools, False otherwise
87
+ """
88
+ # For now, use the same logic as structured outputs
89
+ # In the future, this could be a separate list if needed
90
+ return supports_structured_outputs(model_name)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.122
3
+ Version: 0.0.123
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -8,15 +8,16 @@ praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-7
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
10
10
  praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
11
- praisonaiagents/agents/autoagents.py,sha256=1stF8z94eyVg6hyfrLgdArlarftz_OFvEDtRMsGZFvg,21094
11
+ praisonaiagents/agents/autoagents.py,sha256=GHHCGjrKFIFkmz6bhiKqaGKWJCbHHcF_RSGmRnc5PlA,20766
12
12
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
13
13
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
14
14
  praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
15
15
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
- praisonaiagents/llm/__init__.py,sha256=6lTeQ8jWi1-KiwjCDCmkHo2e-bRLq2dP0s5iJWqjO3s,1421
19
- praisonaiagents/llm/llm.py,sha256=8cDahPVMPI882J0psA1cXreJGXvO33eSOpMNy7FLCS4,107383
18
+ praisonaiagents/llm/__init__.py,sha256=2rCx-Vfgg881XhVnOSDIt8mDab3Nj_BkHwDP4Bbbudc,1596
19
+ praisonaiagents/llm/llm.py,sha256=pP8mu9E4oWYFmjH4OJjvD0SlFz8AxpP2WUOuGN2oiL8,107486
20
+ praisonaiagents/llm/model_capabilities.py,sha256=poxOxATUOi9XPTx3v6BPnXvSfikWSA9NciWQVuPU7Zg,2586
20
21
  praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
22
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
22
23
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
@@ -53,7 +54,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
53
54
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
54
55
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
55
56
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
56
- praisonaiagents-0.0.122.dist-info/METADATA,sha256=pU6W0akH1O1raC15FOsFQW3GXuflwpznV2ij10vYUP4,1669
57
- praisonaiagents-0.0.122.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
- praisonaiagents-0.0.122.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
- praisonaiagents-0.0.122.dist-info/RECORD,,
57
+ praisonaiagents-0.0.123.dist-info/METADATA,sha256=gTnevKwNN3D6hq-AlVNWPcvyKZAGABLsHYbKkSj5YLQ,1669
58
+ praisonaiagents-0.0.123.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
+ praisonaiagents-0.0.123.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
60
+ praisonaiagents-0.0.123.dist-info/RECORD,,