praisonaiagents 0.0.110__py3-none-any.whl → 0.0.112__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -311,6 +311,58 @@ class LLM:
311
311
  # This ensures we make a single non-streaming call rather than risk
312
312
  # missing tool calls or making duplicate calls
313
313
  return False
314
+
315
+ def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None):
316
+ """Build messages list for LLM completion. Works for both sync and async.
317
+
318
+ Args:
319
+ prompt: The user prompt (str or list)
320
+ system_prompt: Optional system prompt
321
+ chat_history: Optional list of previous messages
322
+ output_json: Optional Pydantic model for JSON output
323
+ output_pydantic: Optional Pydantic model for JSON output (alias)
324
+
325
+ Returns:
326
+ tuple: (messages list, original prompt)
327
+ """
328
+ messages = []
329
+
330
+ # Handle system prompt
331
+ if system_prompt:
332
+ # Append JSON schema if needed
333
+ if output_json:
334
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
335
+ elif output_pydantic:
336
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
337
+
338
+ # Skip system messages for legacy o1 models as they don't support them
339
+ if not self._needs_system_message_skip():
340
+ messages.append({"role": "system", "content": system_prompt})
341
+
342
+ # Add chat history if provided
343
+ if chat_history:
344
+ messages.extend(chat_history)
345
+
346
+ # Handle prompt modifications for JSON output
347
+ original_prompt = prompt
348
+ if output_json or output_pydantic:
349
+ if isinstance(prompt, str):
350
+ prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
351
+ elif isinstance(prompt, list):
352
+ # Create a copy to avoid modifying the original
353
+ prompt = prompt.copy()
354
+ for item in prompt:
355
+ if item.get("type") == "text":
356
+ item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
357
+ break
358
+
359
+ # Add prompt to messages
360
+ if isinstance(prompt, list):
361
+ messages.append({"role": "user", "content": prompt})
362
+ else:
363
+ messages.append({"role": "user", "content": prompt})
364
+
365
+ return messages, original_prompt
314
366
 
315
367
  def get_response(
316
368
  self,
@@ -421,36 +473,14 @@ class LLM:
421
473
  if not formatted_tools:
422
474
  formatted_tools = None
423
475
 
424
- # Build messages list
425
- messages = []
426
- if system_prompt:
427
- if output_json:
428
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
429
- elif output_pydantic:
430
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
431
- # Skip system messages for legacy o1 models as they don't support them
432
- if not self._needs_system_message_skip():
433
- messages.append({"role": "system", "content": system_prompt})
434
-
435
- if chat_history:
436
- messages.extend(chat_history)
437
-
438
- # Handle prompt modifications for JSON output
439
- original_prompt = prompt
440
- if output_json or output_pydantic:
441
- if isinstance(prompt, str):
442
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
443
- elif isinstance(prompt, list):
444
- for item in prompt:
445
- if item["type"] == "text":
446
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
447
- break
448
-
449
- # Add prompt to messages
450
- if isinstance(prompt, list):
451
- messages.append({"role": "user", "content": prompt})
452
- else:
453
- messages.append({"role": "user", "content": prompt})
476
+ # Build messages list using shared helper
477
+ messages, original_prompt = self._build_messages(
478
+ prompt=prompt,
479
+ system_prompt=system_prompt,
480
+ chat_history=chat_history,
481
+ output_json=output_json,
482
+ output_pydantic=output_pydantic
483
+ )
454
484
 
455
485
  start_time = time.time()
456
486
  reflection_count = 0
@@ -1160,36 +1190,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1160
1190
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
1161
1191
  litellm.set_verbose = False
1162
1192
 
1163
- # Build messages list
1164
- messages = []
1165
- if system_prompt:
1166
- if output_json:
1167
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
1168
- elif output_pydantic:
1169
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
1170
- # Skip system messages for legacy o1 models as they don't support them
1171
- if not self._needs_system_message_skip():
1172
- messages.append({"role": "system", "content": system_prompt})
1173
-
1174
- if chat_history:
1175
- messages.extend(chat_history)
1176
-
1177
- # Handle prompt modifications for JSON output
1178
- original_prompt = prompt
1179
- if output_json or output_pydantic:
1180
- if isinstance(prompt, str):
1181
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
1182
- elif isinstance(prompt, list):
1183
- for item in prompt:
1184
- if item["type"] == "text":
1185
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
1186
- break
1187
-
1188
- # Add prompt to messages
1189
- if isinstance(prompt, list):
1190
- messages.append({"role": "user", "content": prompt})
1191
- else:
1192
- messages.append({"role": "user", "content": prompt})
1193
+ # Build messages list using shared helper
1194
+ messages, original_prompt = self._build_messages(
1195
+ prompt=prompt,
1196
+ system_prompt=system_prompt,
1197
+ chat_history=chat_history,
1198
+ output_json=output_json,
1199
+ output_pydantic=output_pydantic
1200
+ )
1193
1201
 
1194
1202
  start_time = time.time()
1195
1203
  reflection_count = 0
@@ -1901,18 +1909,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1901
1909
  }
1902
1910
  logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1903
1911
 
1904
- # Build messages list
1905
- messages = []
1906
- if system_prompt:
1907
- # Skip system messages for legacy o1 models as they don't support them
1908
- if not self._needs_system_message_skip():
1909
- messages.append({"role": "system", "content": system_prompt})
1910
-
1911
- # Add prompt to messages
1912
- if isinstance(prompt, list):
1913
- messages.append({"role": "user", "content": prompt})
1914
- else:
1915
- messages.append({"role": "user", "content": prompt})
1912
+ # Build messages list using shared helper (simplified version without JSON output)
1913
+ messages, _ = self._build_messages(
1914
+ prompt=prompt,
1915
+ system_prompt=system_prompt
1916
+ )
1916
1917
 
1917
1918
  # Get response from LiteLLM
1918
1919
  if stream:
@@ -2009,18 +2010,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2009
2010
  }
2010
2011
  logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
2011
2012
 
2012
- # Build messages list
2013
- messages = []
2014
- if system_prompt:
2015
- # Skip system messages for legacy o1 models as they don't support them
2016
- if not self._needs_system_message_skip():
2017
- messages.append({"role": "system", "content": system_prompt})
2018
-
2019
- # Add prompt to messages
2020
- if isinstance(prompt, list):
2021
- messages.append({"role": "user", "content": prompt})
2022
- else:
2023
- messages.append({"role": "user", "content": prompt})
2013
+ # Build messages list using shared helper (simplified version without JSON output)
2014
+ messages, _ = self._build_messages(
2015
+ prompt=prompt,
2016
+ system_prompt=system_prompt
2017
+ )
2024
2018
 
2025
2019
  # Get response from LiteLLM
2026
2020
  if stream:
@@ -140,7 +140,7 @@ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['Minima
140
140
  task = None
141
141
  try:
142
142
  # Get task info
143
- if hasattr(workflow, 'tasks') and task_id < len(workflow.tasks):
143
+ if hasattr(workflow, 'tasks') and isinstance(task_id, int) and task_id < len(workflow.tasks):
144
144
  task = workflow.tasks[task_id]
145
145
 
146
146
  result = original_execute_task(task_id, *args, **kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.110
3
+ Version: 0.0.112
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
18
  praisonaiagents/llm/__init__.py,sha256=bSywIHBHH0YUf4hSx-FmFXkRv2g1Rlhuk-gjoImE8j8,925
19
- praisonaiagents/llm/llm.py,sha256=e6lER7PVJsmY7ytzTG-PoJgriRjNjUP1_edaecxRLB4,113777
19
+ praisonaiagents/llm/llm.py,sha256=Gk4ILbegZtxc3iFCI2H2Zglfc73xNhsEjW1fWu12ltM,113025
20
20
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
21
21
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
22
22
  praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYSY,8357
@@ -27,7 +27,7 @@ praisonaiagents/process/process.py,sha256=gxhMXG3s4CzaREyuwE5zxCMx2Wp_b_Wd53tDfk
27
27
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
28
28
  praisonaiagents/task/task.py,sha256=imqJ8wzZzVyUSym2EyF2tC-vAsV1UdfI_P3YM5mqAiw,20786
29
29
  praisonaiagents/telemetry/__init__.py,sha256=5iAOrj_N_cKMmh2ltWGYs3PfOYt_jcwUoElW8fTAIsc,3062
30
- praisonaiagents/telemetry/integration.py,sha256=36vvYac8tW92YzQYbBeKWKM8JC9IiizlxhUy3AFqPlA,8667
30
+ praisonaiagents/telemetry/integration.py,sha256=8h8TDlPFTbsBmU5rIYNOibJbwEEEWmzS1ENE9uPTvvg,8696
31
31
  praisonaiagents/telemetry/telemetry.py,sha256=SAEK5lrHn-Rb3nk_Yx1sjAdRxqT63ycyNRv3ZGh9Rck,11812
32
32
  praisonaiagents/tools/README.md,sha256=bIQGTSqQbC8l_UvTAnKbnh1TxrybSFGbCqxnhvDwkE4,4450
33
33
  praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
@@ -52,7 +52,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
52
52
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
53
53
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
54
54
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
55
- praisonaiagents-0.0.110.dist-info/METADATA,sha256=YvEuh5oBB5MkA7fXyYwD6fCJxMqOcI5L34pJ7lQvm8M,1669
56
- praisonaiagents-0.0.110.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
57
- praisonaiagents-0.0.110.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
58
- praisonaiagents-0.0.110.dist-info/RECORD,,
55
+ praisonaiagents-0.0.112.dist-info/METADATA,sha256=qKFv-nDM6_21eStVCY503Btr0plIKKhVjZQOg6V2jDM,1669
56
+ praisonaiagents-0.0.112.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
57
+ praisonaiagents-0.0.112.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
58
+ praisonaiagents-0.0.112.dist-info/RECORD,,