praisonaiagents 0.0.119__py3-none-any.whl → 0.0.121__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1233,7 +1233,8 @@ Your Goal: {self.goal}
1233
1233
  # Add to chat history and return raw response
1234
1234
  self.chat_history.append({"role": "user", "content": original_prompt})
1235
1235
  self.chat_history.append({"role": "assistant", "content": response_text})
1236
- if self.verbose:
1236
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1237
+ if self.verbose and not self._using_custom_llm:
1237
1238
  display_interaction(original_prompt, response_text, markdown=self.markdown,
1238
1239
  generation_time=time.time() - start_time, console=self.console)
1239
1240
  return response_text
@@ -1243,7 +1244,9 @@ Your Goal: {self.goal}
1243
1244
  self.chat_history.append({"role": "assistant", "content": response_text})
1244
1245
  if self.verbose:
1245
1246
  logging.debug(f"Agent {self.name} final response: {response_text}")
1246
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1248
+ if self.verbose and not self._using_custom_llm:
1249
+ display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
1250
  # Return only reasoning content if reasoning_steps is True
1248
1251
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1249
1252
  # Apply guardrail to reasoning content
@@ -1279,7 +1282,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1279
1282
  # Return the original response without reflection
1280
1283
  self.chat_history.append({"role": "user", "content": prompt})
1281
1284
  self.chat_history.append({"role": "assistant", "content": response_text})
1282
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1285
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1286
+ if self.verbose and not self._using_custom_llm:
1287
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1283
1288
  return response_text
1284
1289
 
1285
1290
  reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
@@ -1302,7 +1307,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1302
1307
  display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1303
1308
  self.chat_history.append({"role": "user", "content": prompt})
1304
1309
  self.chat_history.append({"role": "assistant", "content": response_text})
1305
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1310
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1311
+ if self.verbose and not self._using_custom_llm:
1312
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1306
1313
  # Apply guardrail validation after satisfactory reflection
1307
1314
  try:
1308
1315
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -1317,7 +1324,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1317
1324
  display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1318
1325
  self.chat_history.append({"role": "user", "content": prompt})
1319
1326
  self.chat_history.append({"role": "assistant", "content": response_text})
1320
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1327
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1328
+ if self.verbose and not self._using_custom_llm:
1329
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1321
1330
  # Apply guardrail validation after max reflections
1322
1331
  try:
1323
1332
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -13,7 +13,8 @@ import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
15
  from ..main import display_instruction, display_tool_call, display_interaction
16
- from ..llm import get_openai_client
16
+ from ..llm import get_openai_client, LLM, OpenAIClient
17
+ import json
17
18
 
18
19
  # Define Pydantic models for structured output
19
20
  class TaskConfig(BaseModel):
@@ -108,6 +109,8 @@ class AutoAgents(PraisonAIAgents):
108
109
  self.max_execution_time = max_execution_time
109
110
  self.max_iter = max_iter
110
111
  self.reflect_llm = reflect_llm
112
+ self.base_url = base_url
113
+ self.api_key = api_key
111
114
 
112
115
  # Display initial instruction
113
116
  if self.verbose:
@@ -238,33 +241,77 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
238
241
  """
239
242
 
240
243
  try:
241
- # Get OpenAI client
244
+ # Try to use OpenAI's structured output if available
245
+ use_openai_structured = False
246
+ client = None
247
+
242
248
  try:
243
- client = get_openai_client()
244
- except ValueError as e:
245
- # AutoAgents requires OpenAI for structured output generation
246
- raise ValueError(
247
- "AutoAgents requires OpenAI API for automatic agent generation. "
248
- "Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
249
- "with manually configured agents for non-OpenAI providers."
250
- ) from e
249
+ # Check if we have OpenAI API and the model supports structured output
250
+ if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
251
+ # Create a new client instance if custom parameters are provided
252
+ if self.api_key or self.base_url:
253
+ client = OpenAIClient(api_key=self.api_key, base_url=self.base_url)
254
+ else:
255
+ client = get_openai_client()
256
+ use_openai_structured = True
257
+ except:
258
+ # If OpenAI client is not available, we'll use the LLM class
259
+ pass
260
+
261
+ if use_openai_structured and client:
262
+ # Use OpenAI's structured output for OpenAI models (backward compatibility)
263
+ config = client.parse_structured_output(
264
+ messages=[
265
+ {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
266
+ {"role": "user", "content": prompt}
267
+ ],
268
+ response_format=AutoAgentsConfig,
269
+ model=self.llm
270
+ )
271
+ else:
272
+ # Use LLM class for all other providers (Gemini, Anthropic, etc.)
273
+ llm_instance = LLM(
274
+ model=self.llm,
275
+ base_url=self.base_url,
276
+ api_key=self.api_key
277
+ )
251
278
 
252
- response = client.beta.chat.completions.parse(
253
- model=self.llm,
254
- response_format=AutoAgentsConfig,
255
- messages=[
256
- {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
257
- {"role": "user", "content": prompt}
258
- ]
259
- )
279
+ response_text = llm_instance.response(
280
+ prompt=prompt,
281
+ system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
282
+ output_pydantic=AutoAgentsConfig,
283
+ temperature=0.7,
284
+ stream=False,
285
+ verbose=False
286
+ )
287
+
288
+ # Parse the JSON response
289
+ try:
290
+ # First try to parse as is
291
+ config_dict = json.loads(response_text)
292
+ config = AutoAgentsConfig(**config_dict)
293
+ except json.JSONDecodeError:
294
+ # If that fails, try to extract JSON from the response
295
+ # Handle cases where the model might wrap JSON in markdown blocks
296
+ cleaned_response = response_text.strip()
297
+ if cleaned_response.startswith("```json"):
298
+ cleaned_response = cleaned_response[7:]
299
+ if cleaned_response.startswith("```"):
300
+ cleaned_response = cleaned_response[3:]
301
+ if cleaned_response.endswith("```"):
302
+ cleaned_response = cleaned_response[:-3]
303
+ cleaned_response = cleaned_response.strip()
304
+
305
+ config_dict = json.loads(cleaned_response)
306
+ config = AutoAgentsConfig(**config_dict)
260
307
 
261
308
  # Ensure we have exactly max_agents number of agents
262
- if len(response.choices[0].message.parsed.agents) > self.max_agents:
263
- response.choices[0].message.parsed.agents = response.choices[0].message.parsed.agents[:self.max_agents]
264
- elif len(response.choices[0].message.parsed.agents) < self.max_agents:
265
- logging.warning(f"Generated {len(response.choices[0].message.parsed.agents)} agents, expected {self.max_agents}")
309
+ if len(config.agents) > self.max_agents:
310
+ config.agents = config.agents[:self.max_agents]
311
+ elif len(config.agents) < self.max_agents:
312
+ logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
266
313
 
267
- return response.choices[0].message.parsed
314
+ return config
268
315
  except Exception as e:
269
316
  logging.error(f"Error generating configuration: {e}")
270
317
  raise
@@ -308,7 +355,9 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
308
355
  max_rpm=self.max_rpm,
309
356
  max_execution_time=self.max_execution_time,
310
357
  max_iter=self.max_iter,
311
- reflect_llm=self.reflect_llm
358
+ reflect_llm=self.reflect_llm,
359
+ base_url=self.base_url,
360
+ api_key=self.api_key
312
361
  )
313
362
  agents.append(agent)
314
363
 
@@ -87,6 +87,65 @@ class LLM:
87
87
  "llama-3.2-90b-text-preview": 6144 # 8,192 actual
88
88
  }
89
89
 
90
+ def _log_llm_config(self, method_name: str, **config):
91
+ """Centralized debug logging for LLM configuration and parameters.
92
+
93
+ Args:
94
+ method_name: The name of the method calling this logger (e.g., '__init__', 'get_response')
95
+ **config: Configuration parameters to log
96
+ """
97
+ # Check for debug logging - either global debug level OR explicit verbose mode
98
+ verbose = config.get('verbose', self.verbose if hasattr(self, 'verbose') else False)
99
+ should_log = logging.getLogger().getEffectiveLevel() == logging.DEBUG or (not isinstance(verbose, bool) and verbose >= 10)
100
+
101
+ if should_log:
102
+ # Mask sensitive information
103
+ safe_config = config.copy()
104
+ if 'api_key' in safe_config:
105
+ safe_config['api_key'] = "***" if safe_config['api_key'] is not None else None
106
+ if 'extra_settings' in safe_config and isinstance(safe_config['extra_settings'], dict):
107
+ safe_config['extra_settings'] = {k: v for k, v in safe_config['extra_settings'].items() if k not in ["api_key"]}
108
+
109
+ # Handle special formatting for certain fields
110
+ if 'prompt' in safe_config:
111
+ prompt = safe_config['prompt']
112
+ # Convert to string first for consistent logging behavior
113
+ prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
114
+ if len(prompt_str) > 100:
115
+ safe_config['prompt'] = prompt_str[:100] + "..."
116
+ else:
117
+ safe_config['prompt'] = prompt_str
118
+ if 'system_prompt' in safe_config:
119
+ sp = safe_config['system_prompt']
120
+ if sp and isinstance(sp, str) and len(sp) > 100:
121
+ safe_config['system_prompt'] = sp[:100] + "..."
122
+ if 'chat_history' in safe_config:
123
+ ch = safe_config['chat_history']
124
+ safe_config['chat_history'] = f"[{len(ch)} messages]" if ch else None
125
+ if 'tools' in safe_config:
126
+ tools = safe_config['tools']
127
+ # Check if tools is iterable before processing
128
+ if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
129
+ safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
130
+ else:
131
+ safe_config['tools'] = None
132
+ if 'output_json' in safe_config:
133
+ oj = safe_config['output_json']
134
+ safe_config['output_json'] = str(oj.__class__.__name__) if oj else None
135
+ if 'output_pydantic' in safe_config:
136
+ op = safe_config['output_pydantic']
137
+ safe_config['output_pydantic'] = str(op.__class__.__name__) if op else None
138
+
139
+ # Log based on method name - check more specific conditions first
140
+ if method_name == '__init__':
141
+ logging.debug(f"LLM instance initialized with: {json.dumps(safe_config, indent=2, default=str)}")
142
+ elif "parameters" in method_name:
143
+ logging.debug(f"{method_name}: {json.dumps(safe_config, indent=2, default=str)}")
144
+ elif "_async" in method_name:
145
+ logging.debug(f"LLM async instance configuration: {json.dumps(safe_config, indent=2, default=str)}")
146
+ else:
147
+ logging.debug(f"{method_name} configuration: {json.dumps(safe_config, indent=2, default=str)}")
148
+
90
149
  def __init__(
91
150
  self,
92
151
  model: str,
@@ -181,35 +240,34 @@ class LLM:
181
240
  litellm.modify_params = True
182
241
  self._setup_event_tracking(events)
183
242
 
184
- # Log all initialization parameters when in debug mode
185
- if not isinstance(verbose, bool) and verbose >= 10:
186
- debug_info = {
187
- "model": self.model,
188
- "timeout": self.timeout,
189
- "temperature": self.temperature,
190
- "top_p": self.top_p,
191
- "n": self.n,
192
- "max_tokens": self.max_tokens,
193
- "presence_penalty": self.presence_penalty,
194
- "frequency_penalty": self.frequency_penalty,
195
- "logit_bias": self.logit_bias,
196
- "response_format": self.response_format,
197
- "seed": self.seed,
198
- "logprobs": self.logprobs,
199
- "top_logprobs": self.top_logprobs,
200
- "api_version": self.api_version,
201
- "stop_phrases": self.stop_phrases,
202
- "api_key": "***" if self.api_key else None, # Mask API key for security
203
- "base_url": self.base_url,
204
- "verbose": self.verbose,
205
- "markdown": self.markdown,
206
- "self_reflect": self.self_reflect,
207
- "max_reflect": self.max_reflect,
208
- "min_reflect": self.min_reflect,
209
- "reasoning_steps": self.reasoning_steps,
210
- "extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
211
- }
212
- logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
243
+ # Log all initialization parameters when in debug mode or verbose >= 10
244
+ self._log_llm_config(
245
+ '__init__',
246
+ model=self.model,
247
+ timeout=self.timeout,
248
+ temperature=self.temperature,
249
+ top_p=self.top_p,
250
+ n=self.n,
251
+ max_tokens=self.max_tokens,
252
+ presence_penalty=self.presence_penalty,
253
+ frequency_penalty=self.frequency_penalty,
254
+ logit_bias=self.logit_bias,
255
+ response_format=self.response_format,
256
+ seed=self.seed,
257
+ logprobs=self.logprobs,
258
+ top_logprobs=self.top_logprobs,
259
+ api_version=self.api_version,
260
+ stop_phrases=self.stop_phrases,
261
+ api_key=self.api_key,
262
+ base_url=self.base_url,
263
+ verbose=self.verbose,
264
+ markdown=self.markdown,
265
+ self_reflect=self.self_reflect,
266
+ max_reflect=self.max_reflect,
267
+ min_reflect=self.min_reflect,
268
+ reasoning_steps=self.reasoning_steps,
269
+ extra_settings=self.extra_settings
270
+ )
213
271
 
214
272
  def _is_ollama_provider(self) -> bool:
215
273
  """Detect if this is an Ollama provider regardless of naming convention"""
@@ -530,54 +588,53 @@ class LLM:
530
588
  """Enhanced get_response with all OpenAI-like features"""
531
589
  logging.info(f"Getting response from {self.model}")
532
590
  # Log all self values when in debug mode
533
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
534
- debug_info = {
535
- "model": self.model,
536
- "timeout": self.timeout,
537
- "temperature": self.temperature,
538
- "top_p": self.top_p,
539
- "n": self.n,
540
- "max_tokens": self.max_tokens,
541
- "presence_penalty": self.presence_penalty,
542
- "frequency_penalty": self.frequency_penalty,
543
- "logit_bias": self.logit_bias,
544
- "response_format": self.response_format,
545
- "seed": self.seed,
546
- "logprobs": self.logprobs,
547
- "top_logprobs": self.top_logprobs,
548
- "api_version": self.api_version,
549
- "stop_phrases": self.stop_phrases,
550
- "api_key": "***" if self.api_key else None, # Mask API key for security
551
- "base_url": self.base_url,
552
- "verbose": self.verbose,
553
- "markdown": self.markdown,
554
- "self_reflect": self.self_reflect,
555
- "max_reflect": self.max_reflect,
556
- "min_reflect": self.min_reflect,
557
- "reasoning_steps": self.reasoning_steps
558
- }
559
- logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
560
-
561
- # Log the parameter values passed to get_response
562
- param_info = {
563
- "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
564
- "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
565
- "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
566
- "temperature": temperature,
567
- "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
568
- "output_json": str(output_json.__class__.__name__) if output_json else None,
569
- "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
570
- "verbose": verbose,
571
- "markdown": markdown,
572
- "self_reflect": self_reflect,
573
- "max_reflect": max_reflect,
574
- "min_reflect": min_reflect,
575
- "agent_name": agent_name,
576
- "agent_role": agent_role,
577
- "agent_tools": agent_tools,
578
- "kwargs": str(kwargs)
579
- }
580
- logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
591
+ self._log_llm_config(
592
+ 'LLM instance',
593
+ model=self.model,
594
+ timeout=self.timeout,
595
+ temperature=self.temperature,
596
+ top_p=self.top_p,
597
+ n=self.n,
598
+ max_tokens=self.max_tokens,
599
+ presence_penalty=self.presence_penalty,
600
+ frequency_penalty=self.frequency_penalty,
601
+ logit_bias=self.logit_bias,
602
+ response_format=self.response_format,
603
+ seed=self.seed,
604
+ logprobs=self.logprobs,
605
+ top_logprobs=self.top_logprobs,
606
+ api_version=self.api_version,
607
+ stop_phrases=self.stop_phrases,
608
+ api_key=self.api_key,
609
+ base_url=self.base_url,
610
+ verbose=self.verbose,
611
+ markdown=self.markdown,
612
+ self_reflect=self.self_reflect,
613
+ max_reflect=self.max_reflect,
614
+ min_reflect=self.min_reflect,
615
+ reasoning_steps=self.reasoning_steps
616
+ )
617
+
618
+ # Log the parameter values passed to get_response
619
+ self._log_llm_config(
620
+ 'get_response parameters',
621
+ prompt=prompt,
622
+ system_prompt=system_prompt,
623
+ chat_history=chat_history,
624
+ temperature=temperature,
625
+ tools=tools,
626
+ output_json=output_json,
627
+ output_pydantic=output_pydantic,
628
+ verbose=verbose,
629
+ markdown=markdown,
630
+ self_reflect=self_reflect,
631
+ max_reflect=max_reflect,
632
+ min_reflect=min_reflect,
633
+ agent_name=agent_name,
634
+ agent_role=agent_role,
635
+ agent_tools=agent_tools,
636
+ kwargs=str(kwargs)
637
+ )
581
638
  try:
582
639
  import litellm
583
640
  # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
@@ -689,6 +746,7 @@ class LLM:
689
746
  )
690
747
  if delta.content:
691
748
  live.update(display_generating(response_text, current_time))
749
+
692
750
  else:
693
751
  # Non-verbose streaming
694
752
  for chunk in litellm.completion(
@@ -702,9 +760,12 @@ class LLM:
702
760
  ):
703
761
  if chunk and chunk.choices and chunk.choices[0].delta:
704
762
  delta = chunk.choices[0].delta
705
- response_text, tool_calls = self._process_stream_delta(
706
- delta, response_text, tool_calls, formatted_tools
707
- )
763
+ if delta.content:
764
+ response_text += delta.content
765
+
766
+ # Capture tool calls from streaming chunks if provider supports it
767
+ if formatted_tools and self._supports_streaming_tools():
768
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
708
769
 
709
770
  response_text = response_text.strip()
710
771
 
@@ -745,20 +806,7 @@ class LLM:
745
806
  # Handle tool calls - Sequential tool calling logic
746
807
  if tool_calls and execute_tool_fn:
747
808
  # Convert tool_calls to a serializable format for all providers
748
- serializable_tool_calls = []
749
- for tc in tool_calls:
750
- if isinstance(tc, dict):
751
- serializable_tool_calls.append(tc) # Already a dict
752
- else:
753
- # Convert object to dict
754
- serializable_tool_calls.append({
755
- "id": tc.id,
756
- "type": getattr(tc, 'type', "function"),
757
- "function": {
758
- "name": tc.function.name,
759
- "arguments": tc.function.arguments
760
- }
761
- })
809
+ serializable_tool_calls = self._serialize_tool_calls(tool_calls)
762
810
  messages.append({
763
811
  "role": "assistant",
764
812
  "content": response_text,
@@ -769,20 +817,8 @@ class LLM:
769
817
  tool_results = [] # Store all tool results
770
818
  for tool_call in tool_calls:
771
819
  # Handle both object and dict access patterns
772
- if isinstance(tool_call, dict):
773
- is_ollama = self._is_ollama_provider()
774
- function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
775
- else:
776
- # Handle object-style tool calls
777
- try:
778
- function_name = tool_call.function.name
779
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
780
- tool_call_id = tool_call.id
781
- except (json.JSONDecodeError, AttributeError) as e:
782
- logging.error(f"Error parsing object-style tool call: {e}")
783
- function_name = "unknown_function"
784
- arguments = {}
785
- tool_call_id = f"tool_{id(tool_call)}"
820
+ is_ollama = self._is_ollama_provider()
821
+ function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
786
822
 
787
823
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
788
824
  tool_result = execute_tool_fn(function_name, arguments)
@@ -1186,54 +1222,53 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1186
1222
  import litellm
1187
1223
  logging.info(f"Getting async response from {self.model}")
1188
1224
  # Log all self values when in debug mode
1189
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1190
- debug_info = {
1191
- "model": self.model,
1192
- "timeout": self.timeout,
1193
- "temperature": self.temperature,
1194
- "top_p": self.top_p,
1195
- "n": self.n,
1196
- "max_tokens": self.max_tokens,
1197
- "presence_penalty": self.presence_penalty,
1198
- "frequency_penalty": self.frequency_penalty,
1199
- "logit_bias": self.logit_bias,
1200
- "response_format": self.response_format,
1201
- "seed": self.seed,
1202
- "logprobs": self.logprobs,
1203
- "top_logprobs": self.top_logprobs,
1204
- "api_version": self.api_version,
1205
- "stop_phrases": self.stop_phrases,
1206
- "api_key": "***" if self.api_key else None, # Mask API key for security
1207
- "base_url": self.base_url,
1208
- "verbose": self.verbose,
1209
- "markdown": self.markdown,
1210
- "self_reflect": self.self_reflect,
1211
- "max_reflect": self.max_reflect,
1212
- "min_reflect": self.min_reflect,
1213
- "reasoning_steps": self.reasoning_steps
1214
- }
1215
- logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
1216
-
1217
- # Log the parameter values passed to get_response_async
1218
- param_info = {
1219
- "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
1220
- "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
1221
- "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
1222
- "temperature": temperature,
1223
- "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
1224
- "output_json": str(output_json.__class__.__name__) if output_json else None,
1225
- "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
1226
- "verbose": verbose,
1227
- "markdown": markdown,
1228
- "self_reflect": self_reflect,
1229
- "max_reflect": max_reflect,
1230
- "min_reflect": min_reflect,
1231
- "agent_name": agent_name,
1232
- "agent_role": agent_role,
1233
- "agent_tools": agent_tools,
1234
- "kwargs": str(kwargs)
1235
- }
1236
- logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
1225
+ self._log_llm_config(
1226
+ 'get_response_async',
1227
+ model=self.model,
1228
+ timeout=self.timeout,
1229
+ temperature=self.temperature,
1230
+ top_p=self.top_p,
1231
+ n=self.n,
1232
+ max_tokens=self.max_tokens,
1233
+ presence_penalty=self.presence_penalty,
1234
+ frequency_penalty=self.frequency_penalty,
1235
+ logit_bias=self.logit_bias,
1236
+ response_format=self.response_format,
1237
+ seed=self.seed,
1238
+ logprobs=self.logprobs,
1239
+ top_logprobs=self.top_logprobs,
1240
+ api_version=self.api_version,
1241
+ stop_phrases=self.stop_phrases,
1242
+ api_key=self.api_key,
1243
+ base_url=self.base_url,
1244
+ verbose=self.verbose,
1245
+ markdown=self.markdown,
1246
+ self_reflect=self.self_reflect,
1247
+ max_reflect=self.max_reflect,
1248
+ min_reflect=self.min_reflect,
1249
+ reasoning_steps=self.reasoning_steps
1250
+ )
1251
+
1252
+ # Log the parameter values passed to get_response_async
1253
+ self._log_llm_config(
1254
+ 'get_response_async parameters',
1255
+ prompt=prompt,
1256
+ system_prompt=system_prompt,
1257
+ chat_history=chat_history,
1258
+ temperature=temperature,
1259
+ tools=tools,
1260
+ output_json=output_json,
1261
+ output_pydantic=output_pydantic,
1262
+ verbose=verbose,
1263
+ markdown=markdown,
1264
+ self_reflect=self_reflect,
1265
+ max_reflect=max_reflect,
1266
+ min_reflect=min_reflect,
1267
+ agent_name=agent_name,
1268
+ agent_role=agent_role,
1269
+ agent_tools=agent_tools,
1270
+ kwargs=str(kwargs)
1271
+ )
1237
1272
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
1238
1273
  litellm.set_verbose = False
1239
1274
 
@@ -1311,6 +1346,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1311
1346
  if delta.content:
1312
1347
  print("\033[K", end="\r")
1313
1348
  print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1349
+
1314
1350
  else:
1315
1351
  # Non-verbose streaming
1316
1352
  async for chunk in await litellm.acompletion(
@@ -1324,9 +1360,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1324
1360
  ):
1325
1361
  if chunk and chunk.choices and chunk.choices[0].delta:
1326
1362
  delta = chunk.choices[0].delta
1327
- response_text, tool_calls = self._process_stream_delta(
1328
- delta, response_text, tool_calls, formatted_tools
1329
- )
1363
+ if delta.content:
1364
+ response_text += delta.content
1365
+
1366
+ # Capture tool calls from streaming chunks if provider supports it
1367
+ if formatted_tools and self._supports_streaming_tools():
1368
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1330
1369
 
1331
1370
  response_text = response_text.strip()
1332
1371
 
@@ -1361,20 +1400,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1361
1400
 
1362
1401
  if tool_calls:
1363
1402
  # Convert tool_calls to a serializable format for all providers
1364
- serializable_tool_calls = []
1365
- for tc in tool_calls:
1366
- if isinstance(tc, dict):
1367
- serializable_tool_calls.append(tc) # Already a dict
1368
- else:
1369
- # Convert object to dict
1370
- serializable_tool_calls.append({
1371
- "id": tc.id,
1372
- "type": getattr(tc, 'type', "function"),
1373
- "function": {
1374
- "name": tc.function.name,
1375
- "arguments": tc.function.arguments
1376
- }
1377
- })
1403
+ serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1378
1404
  messages.append({
1379
1405
  "role": "assistant",
1380
1406
  "content": response_text,
@@ -1384,20 +1410,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1384
1410
  tool_results = [] # Store all tool results
1385
1411
  for tool_call in tool_calls:
1386
1412
  # Handle both object and dict access patterns
1387
- if isinstance(tool_call, dict):
1388
- is_ollama = self._is_ollama_provider()
1389
- function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
1390
- else:
1391
- # Handle object-style tool calls
1392
- try:
1393
- function_name = tool_call.function.name
1394
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1395
- tool_call_id = tool_call.id
1396
- except (json.JSONDecodeError, AttributeError) as e:
1397
- logging.error(f"Error parsing object-style tool call: {e}")
1398
- function_name = "unknown_function"
1399
- arguments = {}
1400
- tool_call_id = f"tool_{id(tool_call)}"
1413
+ is_ollama = self._is_ollama_provider()
1414
+ function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
1401
1415
 
1402
1416
  tool_result = await execute_tool_fn(function_name, arguments)
1403
1417
  tool_results.append(tool_result) # Store the result
@@ -1843,6 +1857,90 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1843
1857
 
1844
1858
  return params
1845
1859
 
1860
+ def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
1861
+ """Prepare debug logging information for response methods"""
1862
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1863
+ debug_info = {
1864
+ "model": self.model,
1865
+ "timeout": self.timeout,
1866
+ "temperature": temperature,
1867
+ "top_p": self.top_p,
1868
+ "n": self.n,
1869
+ "max_tokens": self.max_tokens,
1870
+ "presence_penalty": self.presence_penalty,
1871
+ "frequency_penalty": self.frequency_penalty,
1872
+ "stream": stream,
1873
+ "verbose": verbose,
1874
+ "markdown": markdown,
1875
+ "kwargs": str(kwargs)
1876
+ }
1877
+ return debug_info
1878
+ return None
1879
+
1880
+ def _process_streaming_chunk(self, chunk) -> Optional[str]:
1881
+ """Extract content from a streaming chunk"""
1882
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1883
+ return chunk.choices[0].delta.content
1884
+ return None
1885
+
1886
+ def _process_tool_calls_from_stream(self, delta, tool_calls: List[Dict]) -> List[Dict]:
1887
+ """Process tool calls from streaming delta chunks.
1888
+
1889
+ This handles the accumulation of tool call data from streaming chunks,
1890
+ building up the complete tool call information incrementally.
1891
+ """
1892
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
1893
+ for tc in delta.tool_calls:
1894
+ if tc.index >= len(tool_calls):
1895
+ tool_calls.append({
1896
+ "id": tc.id,
1897
+ "type": "function",
1898
+ "function": {"name": "", "arguments": ""}
1899
+ })
1900
+ if tc.function.name:
1901
+ tool_calls[tc.index]["function"]["name"] = tc.function.name
1902
+ if tc.function.arguments:
1903
+ tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1904
+ return tool_calls
1905
+
1906
+ def _serialize_tool_calls(self, tool_calls) -> List[Dict]:
1907
+ """Convert tool calls to a serializable format for all providers."""
1908
+ serializable_tool_calls = []
1909
+ for tc in tool_calls:
1910
+ if isinstance(tc, dict):
1911
+ serializable_tool_calls.append(tc) # Already a dict
1912
+ else:
1913
+ # Convert object to dict
1914
+ serializable_tool_calls.append({
1915
+ "id": tc.id,
1916
+ "type": getattr(tc, 'type', "function"),
1917
+ "function": {
1918
+ "name": tc.function.name,
1919
+ "arguments": tc.function.arguments
1920
+ }
1921
+ })
1922
+ return serializable_tool_calls
1923
+
1924
+ def _extract_tool_call_info(self, tool_call, is_ollama: bool = False) -> tuple:
1925
+ """Extract function name, arguments, and tool_call_id from a tool call.
1926
+
1927
+ Handles both dict and object formats for tool calls.
1928
+ """
1929
+ if isinstance(tool_call, dict):
1930
+ return self._parse_tool_call_arguments(tool_call, is_ollama)
1931
+ else:
1932
+ # Handle object-style tool calls
1933
+ try:
1934
+ function_name = tool_call.function.name
1935
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1936
+ tool_call_id = tool_call.id
1937
+ except (json.JSONDecodeError, AttributeError) as e:
1938
+ logging.error(f"Error parsing object-style tool call: {e}")
1939
+ function_name = "unknown_function"
1940
+ arguments = {}
1941
+ tool_call_id = f"tool_{id(tool_call)}"
1942
+ return function_name, arguments, tool_call_id
1943
+
1846
1944
  # Response without tool calls
1847
1945
  def response(
1848
1946
  self,
@@ -1867,22 +1965,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1867
1965
  logger.debug("Using synchronous response function")
1868
1966
 
1869
1967
  # Log all self values when in debug mode
1870
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1871
- debug_info = {
1872
- "model": self.model,
1873
- "timeout": self.timeout,
1874
- "temperature": temperature,
1875
- "top_p": self.top_p,
1876
- "n": self.n,
1877
- "max_tokens": self.max_tokens,
1878
- "presence_penalty": self.presence_penalty,
1879
- "frequency_penalty": self.frequency_penalty,
1880
- "stream": stream,
1881
- "verbose": verbose,
1882
- "markdown": markdown,
1883
- "kwargs": str(kwargs)
1884
- }
1885
- logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1968
+ self._log_llm_config(
1969
+ 'Response method',
1970
+ model=self.model,
1971
+ timeout=self.timeout,
1972
+ temperature=temperature,
1973
+ top_p=self.top_p,
1974
+ n=self.n,
1975
+ max_tokens=self.max_tokens,
1976
+ presence_penalty=self.presence_penalty,
1977
+ frequency_penalty=self.frequency_penalty,
1978
+ stream=stream,
1979
+ verbose=verbose,
1980
+ markdown=markdown,
1981
+ kwargs=str(kwargs)
1982
+ )
1886
1983
 
1887
1984
  # Build messages list using shared helper (simplified version without JSON output)
1888
1985
  messages, _ = self._build_messages(
@@ -1891,42 +1988,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1891
1988
  )
1892
1989
 
1893
1990
  # Get response from LiteLLM
1991
+ response_text = ""
1992
+ completion_params = self._build_completion_params(
1993
+ messages=messages,
1994
+ temperature=temperature,
1995
+ stream=stream,
1996
+ **kwargs
1997
+ )
1998
+
1894
1999
  if stream:
1895
- response_text = ""
1896
2000
  if verbose:
1897
2001
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1898
- for chunk in litellm.completion(
1899
- **self._build_completion_params(
1900
- messages=messages,
1901
- temperature=temperature,
1902
- stream=True,
1903
- **kwargs
1904
- )
1905
- ):
1906
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1907
- content = chunk.choices[0].delta.content
2002
+ for chunk in litellm.completion(**completion_params):
2003
+ content = self._process_streaming_chunk(chunk)
2004
+ if content:
1908
2005
  response_text += content
1909
2006
  live.update(display_generating(response_text, start_time))
1910
2007
  else:
1911
- for chunk in litellm.completion(
1912
- **self._build_completion_params(
1913
- messages=messages,
1914
- temperature=temperature,
1915
- stream=True,
1916
- **kwargs
1917
- )
1918
- ):
1919
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1920
- response_text += chunk.choices[0].delta.content
2008
+ for chunk in litellm.completion(**completion_params):
2009
+ content = self._process_streaming_chunk(chunk)
2010
+ if content:
2011
+ response_text += content
1921
2012
  else:
1922
- response = litellm.completion(
1923
- **self._build_completion_params(
1924
- messages=messages,
1925
- temperature=temperature,
1926
- stream=False,
1927
- **kwargs
1928
- )
1929
- )
2013
+ response = litellm.completion(**completion_params)
1930
2014
  response_text = response.choices[0].message.content.strip()
1931
2015
 
1932
2016
  if verbose:
@@ -1967,23 +2051,23 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1967
2051
 
1968
2052
  logger.debug("Using asynchronous response function")
1969
2053
 
2054
+
1970
2055
  # Log all self values when in debug mode
1971
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1972
- debug_info = {
1973
- "model": self.model,
1974
- "timeout": self.timeout,
1975
- "temperature": temperature,
1976
- "top_p": self.top_p,
1977
- "n": self.n,
1978
- "max_tokens": self.max_tokens,
1979
- "presence_penalty": self.presence_penalty,
1980
- "frequency_penalty": self.frequency_penalty,
1981
- "stream": stream,
1982
- "verbose": verbose,
1983
- "markdown": markdown,
1984
- "kwargs": str(kwargs)
1985
- }
1986
- logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
2056
+ self._log_llm_config(
2057
+ 'Async response method',
2058
+ model=self.model,
2059
+ timeout=self.timeout,
2060
+ temperature=temperature,
2061
+ top_p=self.top_p,
2062
+ n=self.n,
2063
+ max_tokens=self.max_tokens,
2064
+ presence_penalty=self.presence_penalty,
2065
+ frequency_penalty=self.frequency_penalty,
2066
+ stream=stream,
2067
+ verbose=verbose,
2068
+ markdown=markdown,
2069
+ kwargs=str(kwargs)
2070
+ )
1987
2071
 
1988
2072
  # Build messages list using shared helper (simplified version without JSON output)
1989
2073
  messages, _ = self._build_messages(
@@ -1992,42 +2076,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1992
2076
  )
1993
2077
 
1994
2078
  # Get response from LiteLLM
2079
+ response_text = ""
2080
+ completion_params = self._build_completion_params(
2081
+ messages=messages,
2082
+ temperature=temperature,
2083
+ stream=stream,
2084
+ **kwargs
2085
+ )
2086
+
1995
2087
  if stream:
1996
- response_text = ""
1997
2088
  if verbose:
1998
2089
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1999
- async for chunk in await litellm.acompletion(
2000
- **self._build_completion_params(
2001
- messages=messages,
2002
- temperature=temperature,
2003
- stream=True,
2004
- **kwargs
2005
- )
2006
- ):
2007
- if chunk and chunk.choices and chunk.choices[0].delta.content:
2008
- content = chunk.choices[0].delta.content
2090
+ async for chunk in await litellm.acompletion(**completion_params):
2091
+ content = self._process_streaming_chunk(chunk)
2092
+ if content:
2009
2093
  response_text += content
2010
2094
  live.update(display_generating(response_text, start_time))
2011
2095
  else:
2012
- async for chunk in await litellm.acompletion(
2013
- **self._build_completion_params(
2014
- messages=messages,
2015
- temperature=temperature,
2016
- stream=True,
2017
- **kwargs
2018
- )
2019
- ):
2020
- if chunk and chunk.choices and chunk.choices[0].delta.content:
2021
- response_text += chunk.choices[0].delta.content
2096
+ async for chunk in await litellm.acompletion(**completion_params):
2097
+ content = self._process_streaming_chunk(chunk)
2098
+ if content:
2099
+ response_text += content
2022
2100
  else:
2023
- response = await litellm.acompletion(
2024
- **self._build_completion_params(
2025
- messages=messages,
2026
- temperature=temperature,
2027
- stream=False,
2028
- **kwargs
2029
- )
2030
- )
2101
+ response = await litellm.acompletion(**completion_params)
2031
2102
  response_text = response.choices[0].message.content.strip()
2032
2103
 
2033
2104
  if verbose:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.119
3
+ Version: 0.0.121
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -3,12 +3,12 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
3
3
  praisonaiagents/main.py,sha256=bamnEu5PaekloGi52VqAFclm-HzjEVeKtWF0Zpdmfzs,15479
4
4
  praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
6
- praisonaiagents/agent/agent.py,sha256=oZaMJJXoWOWJVOFSLmnoBEpF9rb54pnvSqZHgiOhzAw,108660
6
+ praisonaiagents/agent/agent.py,sha256=_ROVyOTPBMB5Porv4YvZ4-kKWr4-tGMbSN7V8uDWZgk,109619
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
10
10
  praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
11
- praisonaiagents/agents/autoagents.py,sha256=njkcv7wgDjrUd5auLL3rMc7qv20Kfo40zdn49UxWR9k,14235
11
+ praisonaiagents/agents/autoagents.py,sha256=gLzNsYkvefY667p3xbbvgEBLu4VzEZeyh3a_3yxt1e8,16478
12
12
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
13
13
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
14
14
  praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
18
  praisonaiagents/llm/__init__.py,sha256=6lTeQ8jWi1-KiwjCDCmkHo2e-bRLq2dP0s5iJWqjO3s,1421
19
- praisonaiagents/llm/llm.py,sha256=mfEUXbjT-0jQmiQ3qqgsyDbzgVpWq_s26VSe6l-heEw,106565
19
+ praisonaiagents/llm/llm.py,sha256=8cDahPVMPI882J0psA1cXreJGXvO33eSOpMNy7FLCS4,107383
20
20
  praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
21
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
22
22
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
@@ -53,7 +53,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
53
53
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
54
54
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
55
55
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
56
- praisonaiagents-0.0.119.dist-info/METADATA,sha256=O1WvcOBDN5jvW1BAbhYat4usg_Lq6lIbVGR6fTxA2fE,1669
57
- praisonaiagents-0.0.119.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
- praisonaiagents-0.0.119.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
- praisonaiagents-0.0.119.dist-info/RECORD,,
56
+ praisonaiagents-0.0.121.dist-info/METADATA,sha256=okAbJt5iVUK3GgBI66uk_0sKZSSs2orSgKepbSpQb-8,1669
57
+ praisonaiagents-0.0.121.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
+ praisonaiagents-0.0.121.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
+ praisonaiagents-0.0.121.dist-info/RECORD,,