praisonaiagents 0.0.119__py3-none-any.whl → 0.0.120__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1233,7 +1233,8 @@ Your Goal: {self.goal}
1233
1233
  # Add to chat history and return raw response
1234
1234
  self.chat_history.append({"role": "user", "content": original_prompt})
1235
1235
  self.chat_history.append({"role": "assistant", "content": response_text})
1236
- if self.verbose:
1236
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1237
+ if self.verbose and not self._using_custom_llm:
1237
1238
  display_interaction(original_prompt, response_text, markdown=self.markdown,
1238
1239
  generation_time=time.time() - start_time, console=self.console)
1239
1240
  return response_text
@@ -1243,7 +1244,9 @@ Your Goal: {self.goal}
1243
1244
  self.chat_history.append({"role": "assistant", "content": response_text})
1244
1245
  if self.verbose:
1245
1246
  logging.debug(f"Agent {self.name} final response: {response_text}")
1246
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1248
+ if self.verbose and not self._using_custom_llm:
1249
+ display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
1250
  # Return only reasoning content if reasoning_steps is True
1248
1251
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1249
1252
  # Apply guardrail to reasoning content
@@ -1279,7 +1282,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1279
1282
  # Return the original response without reflection
1280
1283
  self.chat_history.append({"role": "user", "content": prompt})
1281
1284
  self.chat_history.append({"role": "assistant", "content": response_text})
1282
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1285
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1286
+ if self.verbose and not self._using_custom_llm:
1287
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1283
1288
  return response_text
1284
1289
 
1285
1290
  reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
@@ -1302,7 +1307,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1302
1307
  display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1303
1308
  self.chat_history.append({"role": "user", "content": prompt})
1304
1309
  self.chat_history.append({"role": "assistant", "content": response_text})
1305
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1310
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1311
+ if self.verbose and not self._using_custom_llm:
1312
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1306
1313
  # Apply guardrail validation after satisfactory reflection
1307
1314
  try:
1308
1315
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -1317,7 +1324,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1317
1324
  display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1318
1325
  self.chat_history.append({"role": "user", "content": prompt})
1319
1326
  self.chat_history.append({"role": "assistant", "content": response_text})
1320
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1327
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1328
+ if self.verbose and not self._using_custom_llm:
1329
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1321
1330
  # Apply guardrail validation after max reflections
1322
1331
  try:
1323
1332
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -13,7 +13,8 @@ import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
15
  from ..main import display_instruction, display_tool_call, display_interaction
16
- from ..llm import get_openai_client
16
+ from ..llm import get_openai_client, LLM
17
+ import json
17
18
 
18
19
  # Define Pydantic models for structured output
19
20
  class TaskConfig(BaseModel):
@@ -238,33 +239,74 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
238
239
  """
239
240
 
240
241
  try:
241
- # Get OpenAI client
242
+ # Try to use OpenAI's structured output if available
243
+ use_openai_structured = False
244
+ client = None
245
+
242
246
  try:
243
- client = get_openai_client()
244
- except ValueError as e:
245
- # AutoAgents requires OpenAI for structured output generation
246
- raise ValueError(
247
- "AutoAgents requires OpenAI API for automatic agent generation. "
248
- "Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
249
- "with manually configured agents for non-OpenAI providers."
250
- ) from e
247
+ # Check if we have OpenAI API and the model supports structured output
248
+ if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
249
+ client = get_openai_client()
250
+ use_openai_structured = True
251
+ except:
252
+ # If OpenAI client is not available, we'll use the LLM class
253
+ pass
254
+
255
+ if use_openai_structured and client:
256
+ # Use OpenAI's structured output for OpenAI models (backward compatibility)
257
+ response = client.beta.chat.completions.parse(
258
+ model=self.llm,
259
+ response_format=AutoAgentsConfig,
260
+ messages=[
261
+ {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
262
+ {"role": "user", "content": prompt}
263
+ ]
264
+ )
265
+ config = response.choices[0].message.parsed
266
+ else:
267
+ # Use LLM class for all other providers (Gemini, Anthropic, etc.)
268
+ llm_instance = LLM(
269
+ model=self.llm,
270
+ base_url=self.base_url,
271
+ api_key=self.api_key
272
+ )
251
273
 
252
- response = client.beta.chat.completions.parse(
253
- model=self.llm,
254
- response_format=AutoAgentsConfig,
255
- messages=[
256
- {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
257
- {"role": "user", "content": prompt}
258
- ]
259
- )
274
+ response_text = llm_instance.response(
275
+ prompt=prompt,
276
+ system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
277
+ output_pydantic=AutoAgentsConfig,
278
+ temperature=0.7,
279
+ stream=False,
280
+ verbose=False
281
+ )
282
+
283
+ # Parse the JSON response
284
+ try:
285
+ # First try to parse as is
286
+ config_dict = json.loads(response_text)
287
+ config = AutoAgentsConfig(**config_dict)
288
+ except json.JSONDecodeError:
289
+ # If that fails, try to extract JSON from the response
290
+ # Handle cases where the model might wrap JSON in markdown blocks
291
+ cleaned_response = response_text.strip()
292
+ if cleaned_response.startswith("```json"):
293
+ cleaned_response = cleaned_response[7:]
294
+ if cleaned_response.startswith("```"):
295
+ cleaned_response = cleaned_response[3:]
296
+ if cleaned_response.endswith("```"):
297
+ cleaned_response = cleaned_response[:-3]
298
+ cleaned_response = cleaned_response.strip()
299
+
300
+ config_dict = json.loads(cleaned_response)
301
+ config = AutoAgentsConfig(**config_dict)
260
302
 
261
303
  # Ensure we have exactly max_agents number of agents
262
- if len(response.choices[0].message.parsed.agents) > self.max_agents:
263
- response.choices[0].message.parsed.agents = response.choices[0].message.parsed.agents[:self.max_agents]
264
- elif len(response.choices[0].message.parsed.agents) < self.max_agents:
265
- logging.warning(f"Generated {len(response.choices[0].message.parsed.agents)} agents, expected {self.max_agents}")
304
+ if len(config.agents) > self.max_agents:
305
+ config.agents = config.agents[:self.max_agents]
306
+ elif len(config.agents) < self.max_agents:
307
+ logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
266
308
 
267
- return response.choices[0].message.parsed
309
+ return config
268
310
  except Exception as e:
269
311
  logging.error(f"Error generating configuration: {e}")
270
312
  raise
@@ -87,6 +87,65 @@ class LLM:
87
87
  "llama-3.2-90b-text-preview": 6144 # 8,192 actual
88
88
  }
89
89
 
90
+ def _log_llm_config(self, method_name: str, **config):
91
+ """Centralized debug logging for LLM configuration and parameters.
92
+
93
+ Args:
94
+ method_name: The name of the method calling this logger (e.g., '__init__', 'get_response')
95
+ **config: Configuration parameters to log
96
+ """
97
+ # Check for debug logging - either global debug level OR explicit verbose mode
98
+ verbose = config.get('verbose', self.verbose if hasattr(self, 'verbose') else False)
99
+ should_log = logging.getLogger().getEffectiveLevel() == logging.DEBUG or (not isinstance(verbose, bool) and verbose >= 10)
100
+
101
+ if should_log:
102
+ # Mask sensitive information
103
+ safe_config = config.copy()
104
+ if 'api_key' in safe_config:
105
+ safe_config['api_key'] = "***" if safe_config['api_key'] is not None else None
106
+ if 'extra_settings' in safe_config and isinstance(safe_config['extra_settings'], dict):
107
+ safe_config['extra_settings'] = {k: v for k, v in safe_config['extra_settings'].items() if k not in ["api_key"]}
108
+
109
+ # Handle special formatting for certain fields
110
+ if 'prompt' in safe_config:
111
+ prompt = safe_config['prompt']
112
+ # Convert to string first for consistent logging behavior
113
+ prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
114
+ if len(prompt_str) > 100:
115
+ safe_config['prompt'] = prompt_str[:100] + "..."
116
+ else:
117
+ safe_config['prompt'] = prompt_str
118
+ if 'system_prompt' in safe_config:
119
+ sp = safe_config['system_prompt']
120
+ if sp and isinstance(sp, str) and len(sp) > 100:
121
+ safe_config['system_prompt'] = sp[:100] + "..."
122
+ if 'chat_history' in safe_config:
123
+ ch = safe_config['chat_history']
124
+ safe_config['chat_history'] = f"[{len(ch)} messages]" if ch else None
125
+ if 'tools' in safe_config:
126
+ tools = safe_config['tools']
127
+ # Check if tools is iterable before processing
128
+ if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
129
+ safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
130
+ else:
131
+ safe_config['tools'] = None
132
+ if 'output_json' in safe_config:
133
+ oj = safe_config['output_json']
134
+ safe_config['output_json'] = str(oj.__class__.__name__) if oj else None
135
+ if 'output_pydantic' in safe_config:
136
+ op = safe_config['output_pydantic']
137
+ safe_config['output_pydantic'] = str(op.__class__.__name__) if op else None
138
+
139
+ # Log based on method name - check more specific conditions first
140
+ if method_name == '__init__':
141
+ logging.debug(f"LLM instance initialized with: {json.dumps(safe_config, indent=2, default=str)}")
142
+ elif "parameters" in method_name:
143
+ logging.debug(f"{method_name}: {json.dumps(safe_config, indent=2, default=str)}")
144
+ elif "_async" in method_name:
145
+ logging.debug(f"LLM async instance configuration: {json.dumps(safe_config, indent=2, default=str)}")
146
+ else:
147
+ logging.debug(f"{method_name} configuration: {json.dumps(safe_config, indent=2, default=str)}")
148
+
90
149
  def __init__(
91
150
  self,
92
151
  model: str,
@@ -181,35 +240,34 @@ class LLM:
181
240
  litellm.modify_params = True
182
241
  self._setup_event_tracking(events)
183
242
 
184
- # Log all initialization parameters when in debug mode
185
- if not isinstance(verbose, bool) and verbose >= 10:
186
- debug_info = {
187
- "model": self.model,
188
- "timeout": self.timeout,
189
- "temperature": self.temperature,
190
- "top_p": self.top_p,
191
- "n": self.n,
192
- "max_tokens": self.max_tokens,
193
- "presence_penalty": self.presence_penalty,
194
- "frequency_penalty": self.frequency_penalty,
195
- "logit_bias": self.logit_bias,
196
- "response_format": self.response_format,
197
- "seed": self.seed,
198
- "logprobs": self.logprobs,
199
- "top_logprobs": self.top_logprobs,
200
- "api_version": self.api_version,
201
- "stop_phrases": self.stop_phrases,
202
- "api_key": "***" if self.api_key else None, # Mask API key for security
203
- "base_url": self.base_url,
204
- "verbose": self.verbose,
205
- "markdown": self.markdown,
206
- "self_reflect": self.self_reflect,
207
- "max_reflect": self.max_reflect,
208
- "min_reflect": self.min_reflect,
209
- "reasoning_steps": self.reasoning_steps,
210
- "extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
211
- }
212
- logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
243
+ # Log all initialization parameters when in debug mode or verbose >= 10
244
+ self._log_llm_config(
245
+ '__init__',
246
+ model=self.model,
247
+ timeout=self.timeout,
248
+ temperature=self.temperature,
249
+ top_p=self.top_p,
250
+ n=self.n,
251
+ max_tokens=self.max_tokens,
252
+ presence_penalty=self.presence_penalty,
253
+ frequency_penalty=self.frequency_penalty,
254
+ logit_bias=self.logit_bias,
255
+ response_format=self.response_format,
256
+ seed=self.seed,
257
+ logprobs=self.logprobs,
258
+ top_logprobs=self.top_logprobs,
259
+ api_version=self.api_version,
260
+ stop_phrases=self.stop_phrases,
261
+ api_key=self.api_key,
262
+ base_url=self.base_url,
263
+ verbose=self.verbose,
264
+ markdown=self.markdown,
265
+ self_reflect=self.self_reflect,
266
+ max_reflect=self.max_reflect,
267
+ min_reflect=self.min_reflect,
268
+ reasoning_steps=self.reasoning_steps,
269
+ extra_settings=self.extra_settings
270
+ )
213
271
 
214
272
  def _is_ollama_provider(self) -> bool:
215
273
  """Detect if this is an Ollama provider regardless of naming convention"""
@@ -530,54 +588,53 @@ class LLM:
530
588
  """Enhanced get_response with all OpenAI-like features"""
531
589
  logging.info(f"Getting response from {self.model}")
532
590
  # Log all self values when in debug mode
533
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
534
- debug_info = {
535
- "model": self.model,
536
- "timeout": self.timeout,
537
- "temperature": self.temperature,
538
- "top_p": self.top_p,
539
- "n": self.n,
540
- "max_tokens": self.max_tokens,
541
- "presence_penalty": self.presence_penalty,
542
- "frequency_penalty": self.frequency_penalty,
543
- "logit_bias": self.logit_bias,
544
- "response_format": self.response_format,
545
- "seed": self.seed,
546
- "logprobs": self.logprobs,
547
- "top_logprobs": self.top_logprobs,
548
- "api_version": self.api_version,
549
- "stop_phrases": self.stop_phrases,
550
- "api_key": "***" if self.api_key else None, # Mask API key for security
551
- "base_url": self.base_url,
552
- "verbose": self.verbose,
553
- "markdown": self.markdown,
554
- "self_reflect": self.self_reflect,
555
- "max_reflect": self.max_reflect,
556
- "min_reflect": self.min_reflect,
557
- "reasoning_steps": self.reasoning_steps
558
- }
559
- logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
560
-
561
- # Log the parameter values passed to get_response
562
- param_info = {
563
- "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
564
- "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
565
- "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
566
- "temperature": temperature,
567
- "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
568
- "output_json": str(output_json.__class__.__name__) if output_json else None,
569
- "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
570
- "verbose": verbose,
571
- "markdown": markdown,
572
- "self_reflect": self_reflect,
573
- "max_reflect": max_reflect,
574
- "min_reflect": min_reflect,
575
- "agent_name": agent_name,
576
- "agent_role": agent_role,
577
- "agent_tools": agent_tools,
578
- "kwargs": str(kwargs)
579
- }
580
- logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
591
+ self._log_llm_config(
592
+ 'LLM instance',
593
+ model=self.model,
594
+ timeout=self.timeout,
595
+ temperature=self.temperature,
596
+ top_p=self.top_p,
597
+ n=self.n,
598
+ max_tokens=self.max_tokens,
599
+ presence_penalty=self.presence_penalty,
600
+ frequency_penalty=self.frequency_penalty,
601
+ logit_bias=self.logit_bias,
602
+ response_format=self.response_format,
603
+ seed=self.seed,
604
+ logprobs=self.logprobs,
605
+ top_logprobs=self.top_logprobs,
606
+ api_version=self.api_version,
607
+ stop_phrases=self.stop_phrases,
608
+ api_key=self.api_key,
609
+ base_url=self.base_url,
610
+ verbose=self.verbose,
611
+ markdown=self.markdown,
612
+ self_reflect=self.self_reflect,
613
+ max_reflect=self.max_reflect,
614
+ min_reflect=self.min_reflect,
615
+ reasoning_steps=self.reasoning_steps
616
+ )
617
+
618
+ # Log the parameter values passed to get_response
619
+ self._log_llm_config(
620
+ 'get_response parameters',
621
+ prompt=prompt,
622
+ system_prompt=system_prompt,
623
+ chat_history=chat_history,
624
+ temperature=temperature,
625
+ tools=tools,
626
+ output_json=output_json,
627
+ output_pydantic=output_pydantic,
628
+ verbose=verbose,
629
+ markdown=markdown,
630
+ self_reflect=self_reflect,
631
+ max_reflect=max_reflect,
632
+ min_reflect=min_reflect,
633
+ agent_name=agent_name,
634
+ agent_role=agent_role,
635
+ agent_tools=agent_tools,
636
+ kwargs=str(kwargs)
637
+ )
581
638
  try:
582
639
  import litellm
583
640
  # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
@@ -1186,54 +1243,53 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1186
1243
  import litellm
1187
1244
  logging.info(f"Getting async response from {self.model}")
1188
1245
  # Log all self values when in debug mode
1189
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1190
- debug_info = {
1191
- "model": self.model,
1192
- "timeout": self.timeout,
1193
- "temperature": self.temperature,
1194
- "top_p": self.top_p,
1195
- "n": self.n,
1196
- "max_tokens": self.max_tokens,
1197
- "presence_penalty": self.presence_penalty,
1198
- "frequency_penalty": self.frequency_penalty,
1199
- "logit_bias": self.logit_bias,
1200
- "response_format": self.response_format,
1201
- "seed": self.seed,
1202
- "logprobs": self.logprobs,
1203
- "top_logprobs": self.top_logprobs,
1204
- "api_version": self.api_version,
1205
- "stop_phrases": self.stop_phrases,
1206
- "api_key": "***" if self.api_key else None, # Mask API key for security
1207
- "base_url": self.base_url,
1208
- "verbose": self.verbose,
1209
- "markdown": self.markdown,
1210
- "self_reflect": self.self_reflect,
1211
- "max_reflect": self.max_reflect,
1212
- "min_reflect": self.min_reflect,
1213
- "reasoning_steps": self.reasoning_steps
1214
- }
1215
- logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
1216
-
1217
- # Log the parameter values passed to get_response_async
1218
- param_info = {
1219
- "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
1220
- "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
1221
- "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
1222
- "temperature": temperature,
1223
- "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
1224
- "output_json": str(output_json.__class__.__name__) if output_json else None,
1225
- "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
1226
- "verbose": verbose,
1227
- "markdown": markdown,
1228
- "self_reflect": self_reflect,
1229
- "max_reflect": max_reflect,
1230
- "min_reflect": min_reflect,
1231
- "agent_name": agent_name,
1232
- "agent_role": agent_role,
1233
- "agent_tools": agent_tools,
1234
- "kwargs": str(kwargs)
1235
- }
1236
- logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
1246
+ self._log_llm_config(
1247
+ 'get_response_async',
1248
+ model=self.model,
1249
+ timeout=self.timeout,
1250
+ temperature=self.temperature,
1251
+ top_p=self.top_p,
1252
+ n=self.n,
1253
+ max_tokens=self.max_tokens,
1254
+ presence_penalty=self.presence_penalty,
1255
+ frequency_penalty=self.frequency_penalty,
1256
+ logit_bias=self.logit_bias,
1257
+ response_format=self.response_format,
1258
+ seed=self.seed,
1259
+ logprobs=self.logprobs,
1260
+ top_logprobs=self.top_logprobs,
1261
+ api_version=self.api_version,
1262
+ stop_phrases=self.stop_phrases,
1263
+ api_key=self.api_key,
1264
+ base_url=self.base_url,
1265
+ verbose=self.verbose,
1266
+ markdown=self.markdown,
1267
+ self_reflect=self.self_reflect,
1268
+ max_reflect=self.max_reflect,
1269
+ min_reflect=self.min_reflect,
1270
+ reasoning_steps=self.reasoning_steps
1271
+ )
1272
+
1273
+ # Log the parameter values passed to get_response_async
1274
+ self._log_llm_config(
1275
+ 'get_response_async parameters',
1276
+ prompt=prompt,
1277
+ system_prompt=system_prompt,
1278
+ chat_history=chat_history,
1279
+ temperature=temperature,
1280
+ tools=tools,
1281
+ output_json=output_json,
1282
+ output_pydantic=output_pydantic,
1283
+ verbose=verbose,
1284
+ markdown=markdown,
1285
+ self_reflect=self_reflect,
1286
+ max_reflect=max_reflect,
1287
+ min_reflect=min_reflect,
1288
+ agent_name=agent_name,
1289
+ agent_role=agent_role,
1290
+ agent_tools=agent_tools,
1291
+ kwargs=str(kwargs)
1292
+ )
1237
1293
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
1238
1294
  litellm.set_verbose = False
1239
1295
 
@@ -1867,22 +1923,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1867
1923
  logger.debug("Using synchronous response function")
1868
1924
 
1869
1925
  # Log all self values when in debug mode
1870
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1871
- debug_info = {
1872
- "model": self.model,
1873
- "timeout": self.timeout,
1874
- "temperature": temperature,
1875
- "top_p": self.top_p,
1876
- "n": self.n,
1877
- "max_tokens": self.max_tokens,
1878
- "presence_penalty": self.presence_penalty,
1879
- "frequency_penalty": self.frequency_penalty,
1880
- "stream": stream,
1881
- "verbose": verbose,
1882
- "markdown": markdown,
1883
- "kwargs": str(kwargs)
1884
- }
1885
- logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1926
+ self._log_llm_config(
1927
+ 'Response method',
1928
+ model=self.model,
1929
+ timeout=self.timeout,
1930
+ temperature=temperature,
1931
+ top_p=self.top_p,
1932
+ n=self.n,
1933
+ max_tokens=self.max_tokens,
1934
+ presence_penalty=self.presence_penalty,
1935
+ frequency_penalty=self.frequency_penalty,
1936
+ stream=stream,
1937
+ verbose=verbose,
1938
+ markdown=markdown,
1939
+ kwargs=str(kwargs)
1940
+ )
1886
1941
 
1887
1942
  # Build messages list using shared helper (simplified version without JSON output)
1888
1943
  messages, _ = self._build_messages(
@@ -1968,22 +2023,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1968
2023
  logger.debug("Using asynchronous response function")
1969
2024
 
1970
2025
  # Log all self values when in debug mode
1971
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1972
- debug_info = {
1973
- "model": self.model,
1974
- "timeout": self.timeout,
1975
- "temperature": temperature,
1976
- "top_p": self.top_p,
1977
- "n": self.n,
1978
- "max_tokens": self.max_tokens,
1979
- "presence_penalty": self.presence_penalty,
1980
- "frequency_penalty": self.frequency_penalty,
1981
- "stream": stream,
1982
- "verbose": verbose,
1983
- "markdown": markdown,
1984
- "kwargs": str(kwargs)
1985
- }
1986
- logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
2026
+ self._log_llm_config(
2027
+ 'Async response method',
2028
+ model=self.model,
2029
+ timeout=self.timeout,
2030
+ temperature=temperature,
2031
+ top_p=self.top_p,
2032
+ n=self.n,
2033
+ max_tokens=self.max_tokens,
2034
+ presence_penalty=self.presence_penalty,
2035
+ frequency_penalty=self.frequency_penalty,
2036
+ stream=stream,
2037
+ verbose=verbose,
2038
+ markdown=markdown,
2039
+ kwargs=str(kwargs)
2040
+ )
1987
2041
 
1988
2042
  # Build messages list using shared helper (simplified version without JSON output)
1989
2043
  messages, _ = self._build_messages(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.119
3
+ Version: 0.0.120
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -3,12 +3,12 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
3
3
  praisonaiagents/main.py,sha256=bamnEu5PaekloGi52VqAFclm-HzjEVeKtWF0Zpdmfzs,15479
4
4
  praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
6
- praisonaiagents/agent/agent.py,sha256=oZaMJJXoWOWJVOFSLmnoBEpF9rb54pnvSqZHgiOhzAw,108660
6
+ praisonaiagents/agent/agent.py,sha256=_ROVyOTPBMB5Porv4YvZ4-kKWr4-tGMbSN7V8uDWZgk,109619
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
10
10
  praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
11
- praisonaiagents/agents/autoagents.py,sha256=njkcv7wgDjrUd5auLL3rMc7qv20Kfo40zdn49UxWR9k,14235
11
+ praisonaiagents/agents/autoagents.py,sha256=NNSlqEsWf4up4lmdQwNl5_iTgodZ5aODUnjlXdp9vEQ,16127
12
12
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
13
13
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
14
14
  praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
18
  praisonaiagents/llm/__init__.py,sha256=6lTeQ8jWi1-KiwjCDCmkHo2e-bRLq2dP0s5iJWqjO3s,1421
19
- praisonaiagents/llm/llm.py,sha256=mfEUXbjT-0jQmiQ3qqgsyDbzgVpWq_s26VSe6l-heEw,106565
19
+ praisonaiagents/llm/llm.py,sha256=I08T3Du9PQndEzIEjDjacHqVkBpwg_AumcO4TsG85b8,107317
20
20
  praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
21
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
22
22
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
@@ -53,7 +53,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
53
53
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
54
54
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
55
55
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
56
- praisonaiagents-0.0.119.dist-info/METADATA,sha256=O1WvcOBDN5jvW1BAbhYat4usg_Lq6lIbVGR6fTxA2fE,1669
57
- praisonaiagents-0.0.119.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
- praisonaiagents-0.0.119.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
- praisonaiagents-0.0.119.dist-info/RECORD,,
56
+ praisonaiagents-0.0.120.dist-info/METADATA,sha256=0degpExWB64MNrEFvMEKEkopgjJp_UlALiDV8tFvxmk,1669
57
+ praisonaiagents-0.0.120.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
+ praisonaiagents-0.0.120.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
+ praisonaiagents-0.0.120.dist-info/RECORD,,