praisonaiagents 0.0.114__py3-none-any.whl → 0.0.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,15 +7,199 @@ supporting both synchronous and asynchronous operations.
7
7
 
8
8
  import os
9
9
  import logging
10
- from typing import Any, Dict, List, Optional, Union, AsyncIterator, Iterator
10
+ import time
11
+ import json
12
+ import asyncio
13
+ from typing import Any, Dict, List, Optional, Union, AsyncIterator, Iterator, Callable, Tuple
11
14
  from openai import OpenAI, AsyncOpenAI
12
15
  from openai.types.chat import ChatCompletionChunk
13
- import asyncio
14
16
  from pydantic import BaseModel
17
+ from dataclasses import dataclass
18
+ from rich.console import Console
19
+ from rich.live import Live
20
+ import inspect
15
21
 
16
22
  # Constants
17
23
  LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
18
24
 
25
+ # Data Classes for OpenAI Response Structure
26
+ @dataclass
27
+ class ChatCompletionMessage:
28
+ content: str
29
+ role: str = "assistant"
30
+ refusal: Optional[str] = None
31
+ audio: Optional[str] = None
32
+ function_call: Optional[dict] = None
33
+ tool_calls: Optional[List] = None
34
+ reasoning_content: Optional[str] = None
35
+
36
+ @dataclass
37
+ class Choice:
38
+ finish_reason: Optional[str]
39
+ index: int
40
+ message: ChatCompletionMessage
41
+ logprobs: Optional[dict] = None
42
+
43
+ @dataclass
44
+ class CompletionTokensDetails:
45
+ accepted_prediction_tokens: Optional[int] = None
46
+ audio_tokens: Optional[int] = None
47
+ reasoning_tokens: Optional[int] = None
48
+ rejected_prediction_tokens: Optional[int] = None
49
+
50
+ @dataclass
51
+ class PromptTokensDetails:
52
+ audio_tokens: Optional[int] = None
53
+ cached_tokens: int = 0
54
+
55
+ @dataclass
56
+ class CompletionUsage:
57
+ completion_tokens: int = 0
58
+ prompt_tokens: int = 0
59
+ total_tokens: int = 0
60
+ completion_tokens_details: Optional[CompletionTokensDetails] = None
61
+ prompt_tokens_details: Optional[PromptTokensDetails] = None
62
+ prompt_cache_hit_tokens: int = 0
63
+ prompt_cache_miss_tokens: int = 0
64
+
65
+ @dataclass
66
+ class ChatCompletion:
67
+ id: str
68
+ choices: List[Choice]
69
+ created: int
70
+ model: str
71
+ object: str = "chat.completion"
72
+ system_fingerprint: Optional[str] = None
73
+ service_tier: Optional[str] = None
74
+ usage: Optional[CompletionUsage] = None
75
+
76
+ @dataclass
77
+ class ToolCall:
78
+ """Tool call representation compatible with OpenAI format"""
79
+ id: str
80
+ type: str
81
+ function: Dict[str, Any]
82
+
83
+
84
+ def process_stream_chunks(chunks):
85
+ """Process streaming chunks into combined response"""
86
+ if not chunks:
87
+ return None
88
+
89
+ try:
90
+ first_chunk = chunks[0]
91
+ last_chunk = chunks[-1]
92
+
93
+ # Basic metadata
94
+ id = getattr(first_chunk, "id", None)
95
+ created = getattr(first_chunk, "created", None)
96
+ model = getattr(first_chunk, "model", None)
97
+ system_fingerprint = getattr(first_chunk, "system_fingerprint", None)
98
+
99
+ # Track usage
100
+ completion_tokens = 0
101
+ prompt_tokens = 0
102
+
103
+ content_list = []
104
+ reasoning_list = []
105
+ tool_calls = []
106
+ current_tool_call = None
107
+
108
+ # First pass: Get initial tool call data
109
+ for chunk in chunks:
110
+ if not hasattr(chunk, "choices") or not chunk.choices:
111
+ continue
112
+
113
+ delta = getattr(chunk.choices[0], "delta", None)
114
+ if not delta:
115
+ continue
116
+
117
+ # Handle content and reasoning
118
+ if hasattr(delta, "content") and delta.content:
119
+ content_list.append(delta.content)
120
+ if hasattr(delta, "reasoning_content") and delta.reasoning_content:
121
+ reasoning_list.append(delta.reasoning_content)
122
+
123
+ # Handle tool calls
124
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
125
+ for tool_call_delta in delta.tool_calls:
126
+ if tool_call_delta.index is not None and tool_call_delta.id:
127
+ # Found the initial tool call
128
+ current_tool_call = {
129
+ "id": tool_call_delta.id,
130
+ "type": "function",
131
+ "function": {
132
+ "name": tool_call_delta.function.name,
133
+ "arguments": ""
134
+ }
135
+ }
136
+ while len(tool_calls) <= tool_call_delta.index:
137
+ tool_calls.append(None)
138
+ tool_calls[tool_call_delta.index] = current_tool_call
139
+ current_tool_call = tool_calls[tool_call_delta.index]
140
+ elif current_tool_call is not None and hasattr(tool_call_delta.function, "arguments"):
141
+ if tool_call_delta.function.arguments:
142
+ current_tool_call["function"]["arguments"] += tool_call_delta.function.arguments
143
+
144
+ # Remove any None values and empty tool calls
145
+ tool_calls = [tc for tc in tool_calls if tc and tc["id"] and tc["function"]["name"]]
146
+
147
+ combined_content = "".join(content_list) if content_list else ""
148
+ combined_reasoning = "".join(reasoning_list) if reasoning_list else None
149
+ finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
150
+
151
+ # Create ToolCall objects
152
+ processed_tool_calls = []
153
+ if tool_calls:
154
+ try:
155
+ for tc in tool_calls:
156
+ tool_call = ToolCall(
157
+ id=tc["id"],
158
+ type=tc["type"],
159
+ function={
160
+ "name": tc["function"]["name"],
161
+ "arguments": tc["function"]["arguments"]
162
+ }
163
+ )
164
+ processed_tool_calls.append(tool_call)
165
+ except Exception as e:
166
+ print(f"Error processing tool call: {e}")
167
+
168
+ message = ChatCompletionMessage(
169
+ content=combined_content,
170
+ role="assistant",
171
+ reasoning_content=combined_reasoning,
172
+ tool_calls=processed_tool_calls if processed_tool_calls else None
173
+ )
174
+
175
+ choice = Choice(
176
+ finish_reason=finish_reason or "tool_calls" if processed_tool_calls else None,
177
+ index=0,
178
+ message=message
179
+ )
180
+
181
+ usage = CompletionUsage(
182
+ completion_tokens=completion_tokens,
183
+ prompt_tokens=prompt_tokens,
184
+ total_tokens=completion_tokens + prompt_tokens,
185
+ completion_tokens_details=CompletionTokensDetails(),
186
+ prompt_tokens_details=PromptTokensDetails()
187
+ )
188
+
189
+ return ChatCompletion(
190
+ id=id,
191
+ choices=[choice],
192
+ created=created,
193
+ model=model,
194
+ system_fingerprint=system_fingerprint,
195
+ usage=usage
196
+ )
197
+
198
+ except Exception as e:
199
+ print(f"Error processing chunks: {e}")
200
+ return None
201
+
202
+
19
203
  class OpenAIClient:
20
204
  """
21
205
  Unified OpenAI client wrapper for sync/async operations.
@@ -52,6 +236,9 @@ class OpenAIClient:
52
236
 
53
237
  # Set up logging
54
238
  self.logger = logging.getLogger(__name__)
239
+
240
+ # Initialize console for display
241
+ self.console = Console()
55
242
 
56
243
  @property
57
244
  def sync_client(self) -> OpenAI:
@@ -65,6 +252,427 @@ class OpenAIClient:
65
252
  self._async_client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
66
253
  return self._async_client
67
254
 
255
+ def build_messages(
256
+ self,
257
+ prompt: Union[str, List[Dict]],
258
+ system_prompt: Optional[str] = None,
259
+ chat_history: Optional[List[Dict]] = None,
260
+ output_json: Optional[BaseModel] = None,
261
+ output_pydantic: Optional[BaseModel] = None
262
+ ) -> Tuple[List[Dict], Union[str, List[Dict]]]:
263
+ """
264
+ Build messages list for OpenAI completion.
265
+
266
+ Args:
267
+ prompt: The user prompt (str or list)
268
+ system_prompt: Optional system prompt
269
+ chat_history: Optional list of previous messages
270
+ output_json: Optional Pydantic model for JSON output
271
+ output_pydantic: Optional Pydantic model for JSON output (alias)
272
+
273
+ Returns:
274
+ tuple: (messages list, original prompt)
275
+ """
276
+ messages = []
277
+
278
+ # Handle system prompt
279
+ if system_prompt:
280
+ # Append JSON schema if needed
281
+ if output_json:
282
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
283
+ elif output_pydantic:
284
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
285
+
286
+ messages.append({"role": "system", "content": system_prompt})
287
+
288
+ # Add chat history if provided
289
+ if chat_history:
290
+ messages.extend(chat_history)
291
+
292
+ # Handle prompt modifications for JSON output
293
+ original_prompt = prompt
294
+ if output_json or output_pydantic:
295
+ if isinstance(prompt, str):
296
+ prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
297
+ elif isinstance(prompt, list):
298
+ # Create a copy to avoid modifying the original
299
+ prompt = prompt.copy()
300
+ for item in prompt:
301
+ if item.get("type") == "text":
302
+ item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
303
+ break
304
+
305
+ # Add prompt to messages
306
+ if isinstance(prompt, list):
307
+ messages.append({"role": "user", "content": prompt})
308
+ else:
309
+ messages.append({"role": "user", "content": prompt})
310
+
311
+ return messages, original_prompt
312
+
313
+ def _fix_array_schemas(self, schema: Dict) -> Dict:
314
+ """
315
+ Recursively fix array schemas by adding missing 'items' attribute.
316
+
317
+ This ensures compatibility with OpenAI's function calling format which
318
+ requires array types to specify the type of items they contain.
319
+
320
+ Args:
321
+ schema: The schema dictionary to fix
322
+
323
+ Returns:
324
+ dict: The fixed schema
325
+ """
326
+ if not isinstance(schema, dict):
327
+ return schema
328
+
329
+ # Create a copy to avoid modifying the original
330
+ fixed_schema = schema.copy()
331
+
332
+ # Fix array types at the current level
333
+ if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
334
+ # Add a default items schema for arrays without it
335
+ fixed_schema["items"] = {"type": "string"}
336
+
337
+ # Recursively fix nested schemas in properties
338
+ if "properties" in fixed_schema and isinstance(fixed_schema["properties"], dict):
339
+ fixed_properties = {}
340
+ for prop_name, prop_schema in fixed_schema["properties"].items():
341
+ if isinstance(prop_schema, dict):
342
+ fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
343
+ else:
344
+ fixed_properties[prop_name] = prop_schema
345
+ fixed_schema["properties"] = fixed_properties
346
+
347
+ # Fix items schema if it exists
348
+ if "items" in fixed_schema and isinstance(fixed_schema["items"], dict):
349
+ fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
350
+
351
+ return fixed_schema
352
+
353
+ def format_tools(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
354
+ """
355
+ Format tools for OpenAI API.
356
+
357
+ Supports:
358
+ - Pre-formatted OpenAI tools (dicts with type='function')
359
+ - Lists of pre-formatted tools
360
+ - Callable functions
361
+ - String function names
362
+ - MCP tools
363
+
364
+ Args:
365
+ tools: List of tools in various formats
366
+
367
+ Returns:
368
+ List of formatted tools or None
369
+ """
370
+ if not tools:
371
+ return None
372
+
373
+ formatted_tools = []
374
+ for tool in tools:
375
+ # Check if the tool is already in OpenAI format
376
+ if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
377
+ if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
378
+ logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
379
+ # Fix array schemas in the tool parameters
380
+ fixed_tool = tool.copy()
381
+ if 'parameters' in fixed_tool['function']:
382
+ fixed_tool['function']['parameters'] = self._fix_array_schemas(fixed_tool['function']['parameters'])
383
+ formatted_tools.append(fixed_tool)
384
+ else:
385
+ logging.debug("Skipping malformed OpenAI tool: missing function or name")
386
+ # Handle lists of tools
387
+ elif isinstance(tool, list):
388
+ for subtool in tool:
389
+ if isinstance(subtool, dict) and 'type' in subtool and subtool['type'] == 'function':
390
+ if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
391
+ logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
392
+ # Fix array schemas in the tool parameters
393
+ fixed_tool = subtool.copy()
394
+ if 'parameters' in fixed_tool['function']:
395
+ fixed_tool['function']['parameters'] = self._fix_array_schemas(fixed_tool['function']['parameters'])
396
+ formatted_tools.append(fixed_tool)
397
+ else:
398
+ logging.debug("Skipping malformed OpenAI tool in list: missing function or name")
399
+ elif callable(tool):
400
+ tool_def = self._generate_tool_definition(tool)
401
+ if tool_def:
402
+ formatted_tools.append(tool_def)
403
+ elif isinstance(tool, str):
404
+ tool_def = self._generate_tool_definition_from_name(tool)
405
+ if tool_def:
406
+ formatted_tools.append(tool_def)
407
+ else:
408
+ logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
409
+
410
+ # Validate JSON serialization before returning
411
+ if formatted_tools:
412
+ try:
413
+ json.dumps(formatted_tools) # Validate serialization
414
+ except (TypeError, ValueError) as e:
415
+ logging.error(f"Tools are not JSON serializable: {e}")
416
+ return None
417
+
418
+ return formatted_tools if formatted_tools else None
419
+
420
+ def _generate_tool_definition(self, func: Callable) -> Optional[Dict]:
421
+ """Generate a tool definition from a callable function."""
422
+ try:
423
+ sig = inspect.signature(func)
424
+
425
+ # Skip self, *args, **kwargs
426
+ parameters_list = []
427
+ for name, param in sig.parameters.items():
428
+ if name == "self":
429
+ continue
430
+ if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
431
+ continue
432
+ parameters_list.append((name, param))
433
+
434
+ parameters = {
435
+ "type": "object",
436
+ "properties": {},
437
+ "required": []
438
+ }
439
+
440
+ # Parse docstring for parameter descriptions
441
+ docstring = inspect.getdoc(func)
442
+ param_descriptions = {}
443
+ if docstring:
444
+ import re
445
+ param_section = re.split(r'\s*Args:\s*', docstring)
446
+ if len(param_section) > 1:
447
+ param_lines = param_section[1].split('\n')
448
+ for line in param_lines:
449
+ line = line.strip()
450
+ if line and ':' in line:
451
+ param_name, param_desc = line.split(':', 1)
452
+ param_descriptions[param_name.strip()] = param_desc.strip()
453
+
454
+ for name, param in parameters_list:
455
+ param_type = "string" # Default type
456
+ param_info = {}
457
+
458
+ if param.annotation != inspect.Parameter.empty:
459
+ if param.annotation is int:
460
+ param_type = "integer"
461
+ elif param.annotation is float:
462
+ param_type = "number"
463
+ elif param.annotation is bool:
464
+ param_type = "boolean"
465
+ elif param.annotation is list:
466
+ param_type = "array"
467
+ # OpenAI requires 'items' for array types
468
+ param_info["items"] = {"type": "string"}
469
+ elif param.annotation is dict:
470
+ param_type = "object"
471
+
472
+ param_info["type"] = param_type
473
+ if name in param_descriptions:
474
+ param_info["description"] = param_descriptions[name]
475
+
476
+ parameters["properties"][name] = param_info
477
+ if param.default == inspect.Parameter.empty:
478
+ parameters["required"].append(name)
479
+
480
+ # Extract description from docstring
481
+ description = docstring.split('\n')[0] if docstring else f"Function {func.__name__}"
482
+
483
+ return {
484
+ "type": "function",
485
+ "function": {
486
+ "name": func.__name__,
487
+ "description": description,
488
+ "parameters": parameters
489
+ }
490
+ }
491
+ except Exception as e:
492
+ logging.error(f"Error generating tool definition: {e}")
493
+ return None
494
+
495
+ def _generate_tool_definition_from_name(self, function_name: str) -> Optional[Dict]:
496
+ """Generate a tool definition from a function name."""
497
+ # This is a placeholder - in agent.py this would look up the function
498
+ # For now, return None as the actual implementation would need access to the function
499
+ logging.debug(f"Tool definition generation from name '{function_name}' requires function reference")
500
+ return None
501
+
502
+ def process_stream_response(
503
+ self,
504
+ messages: List[Dict],
505
+ model: str,
506
+ temperature: float = 0.7,
507
+ tools: Optional[List[Dict]] = None,
508
+ start_time: Optional[float] = None,
509
+ console: Optional[Console] = None,
510
+ display_fn: Optional[Callable] = None,
511
+ reasoning_steps: bool = False,
512
+ **kwargs
513
+ ) -> Optional[ChatCompletion]:
514
+ """
515
+ Process streaming response and return final response.
516
+
517
+ Args:
518
+ messages: List of messages for the conversation
519
+ model: Model to use
520
+ temperature: Temperature for generation
521
+ tools: Optional formatted tools
522
+ start_time: Start time for timing display
523
+ console: Console for output
524
+ display_fn: Display function for live updates
525
+ reasoning_steps: Whether to show reasoning steps
526
+ **kwargs: Additional parameters for the API
527
+
528
+ Returns:
529
+ ChatCompletion object or None if error
530
+ """
531
+ try:
532
+ # Default start time and console if not provided
533
+ if start_time is None:
534
+ start_time = time.time()
535
+ if console is None:
536
+ console = self.console
537
+
538
+ # Create the response stream
539
+ response_stream = self._sync_client.chat.completions.create(
540
+ model=model,
541
+ messages=messages,
542
+ temperature=temperature,
543
+ tools=tools if tools else None,
544
+ stream=True,
545
+ **kwargs
546
+ )
547
+
548
+ full_response_text = ""
549
+ reasoning_content = ""
550
+ chunks = []
551
+
552
+ # If display function provided, use Live display
553
+ if display_fn:
554
+ with Live(
555
+ display_fn("", start_time),
556
+ console=console,
557
+ refresh_per_second=4,
558
+ transient=True,
559
+ vertical_overflow="ellipsis",
560
+ auto_refresh=True
561
+ ) as live:
562
+ for chunk in response_stream:
563
+ chunks.append(chunk)
564
+ if chunk.choices[0].delta.content:
565
+ full_response_text += chunk.choices[0].delta.content
566
+ live.update(display_fn(full_response_text, start_time))
567
+
568
+ # Update live display with reasoning content if enabled
569
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
570
+ rc = chunk.choices[0].delta.reasoning_content
571
+ if rc:
572
+ reasoning_content += rc
573
+ live.update(display_fn(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
574
+
575
+ # Clear the last generating display with a blank line
576
+ console.print()
577
+ else:
578
+ # Just collect chunks without display
579
+ for chunk in response_stream:
580
+ chunks.append(chunk)
581
+
582
+ final_response = process_stream_chunks(chunks)
583
+ return final_response
584
+
585
+ except Exception as e:
586
+ self.logger.error(f"Error in stream processing: {e}")
587
+ return None
588
+
589
+ async def process_stream_response_async(
590
+ self,
591
+ messages: List[Dict],
592
+ model: str,
593
+ temperature: float = 0.7,
594
+ tools: Optional[List[Dict]] = None,
595
+ start_time: Optional[float] = None,
596
+ console: Optional[Console] = None,
597
+ display_fn: Optional[Callable] = None,
598
+ reasoning_steps: bool = False,
599
+ **kwargs
600
+ ) -> Optional[ChatCompletion]:
601
+ """
602
+ Async version of process_stream_response.
603
+
604
+ Args:
605
+ messages: List of messages for the conversation
606
+ model: Model to use
607
+ temperature: Temperature for generation
608
+ tools: Optional formatted tools
609
+ start_time: Start time for timing display
610
+ console: Console for output
611
+ display_fn: Display function for live updates
612
+ reasoning_steps: Whether to show reasoning steps
613
+ **kwargs: Additional parameters for the API
614
+
615
+ Returns:
616
+ ChatCompletion object or None if error
617
+ """
618
+ try:
619
+ # Default start time and console if not provided
620
+ if start_time is None:
621
+ start_time = time.time()
622
+ if console is None:
623
+ console = self.console
624
+
625
+ # Create the response stream
626
+ response_stream = await self.async_client.chat.completions.create(
627
+ model=model,
628
+ messages=messages,
629
+ temperature=temperature,
630
+ tools=tools if tools else None,
631
+ stream=True,
632
+ **kwargs
633
+ )
634
+
635
+ full_response_text = ""
636
+ reasoning_content = ""
637
+ chunks = []
638
+
639
+ # If display function provided, use Live display
640
+ if display_fn:
641
+ with Live(
642
+ display_fn("", start_time),
643
+ console=console,
644
+ refresh_per_second=4,
645
+ transient=True,
646
+ vertical_overflow="ellipsis",
647
+ auto_refresh=True
648
+ ) as live:
649
+ async for chunk in response_stream:
650
+ chunks.append(chunk)
651
+ if chunk.choices[0].delta.content:
652
+ full_response_text += chunk.choices[0].delta.content
653
+ live.update(display_fn(full_response_text, start_time))
654
+
655
+ # Update live display with reasoning content if enabled
656
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
657
+ rc = chunk.choices[0].delta.reasoning_content
658
+ if rc:
659
+ reasoning_content += rc
660
+ live.update(display_fn(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
661
+
662
+ # Clear the last generating display with a blank line
663
+ console.print()
664
+ else:
665
+ # Just collect chunks without display
666
+ async for chunk in response_stream:
667
+ chunks.append(chunk)
668
+
669
+ final_response = process_stream_chunks(chunks)
670
+ return final_response
671
+
672
+ except Exception as e:
673
+ self.logger.error(f"Error in async stream processing: {e}")
674
+ return None
675
+
68
676
  def create_completion(
69
677
  self,
70
678
  messages: List[Dict[str, Any]],
@@ -155,6 +763,357 @@ class OpenAIClient:
155
763
  self.logger.error(f"Error creating async completion: {e}")
156
764
  raise
157
765
 
766
+ def chat_completion_with_tools(
767
+ self,
768
+ messages: List[Dict[str, Any]],
769
+ model: str = "gpt-4o",
770
+ temperature: float = 0.7,
771
+ tools: Optional[List[Any]] = None,
772
+ execute_tool_fn: Optional[Callable] = None,
773
+ stream: bool = True,
774
+ console: Optional[Console] = None,
775
+ display_fn: Optional[Callable] = None,
776
+ reasoning_steps: bool = False,
777
+ verbose: bool = True,
778
+ max_iterations: int = 10,
779
+ **kwargs
780
+ ) -> Optional[ChatCompletion]:
781
+ """
782
+ Create a chat completion with tool support and streaming.
783
+
784
+ This method handles the full tool execution loop, including:
785
+ - Formatting tools for OpenAI API
786
+ - Making the initial API call
787
+ - Executing tool calls if present
788
+ - Getting final response after tool execution
789
+
790
+ Args:
791
+ messages: List of message dictionaries
792
+ model: Model to use
793
+ temperature: Temperature for generation
794
+ tools: List of tools (can be callables, dicts, or strings)
795
+ execute_tool_fn: Function to execute tools
796
+ stream: Whether to stream responses
797
+ console: Console for output
798
+ display_fn: Display function for streaming
799
+ reasoning_steps: Whether to show reasoning
800
+ verbose: Whether to show verbose output
801
+ max_iterations: Maximum tool calling iterations
802
+ **kwargs: Additional API parameters
803
+
804
+ Returns:
805
+ Final ChatCompletion response or None if error
806
+ """
807
+ start_time = time.time()
808
+
809
+ # Format tools for OpenAI API
810
+ formatted_tools = self.format_tools(tools)
811
+
812
+ # Continue tool execution loop until no more tool calls are needed
813
+ iteration_count = 0
814
+
815
+ while iteration_count < max_iterations:
816
+ if stream:
817
+ # Process as streaming response with formatted tools
818
+ final_response = self.process_stream_response(
819
+ messages=messages,
820
+ model=model,
821
+ temperature=temperature,
822
+ tools=formatted_tools,
823
+ start_time=start_time,
824
+ console=console,
825
+ display_fn=display_fn,
826
+ reasoning_steps=reasoning_steps,
827
+ **kwargs
828
+ )
829
+ else:
830
+ # Process as regular non-streaming response
831
+ final_response = self.create_completion(
832
+ messages=messages,
833
+ model=model,
834
+ temperature=temperature,
835
+ tools=formatted_tools,
836
+ stream=False,
837
+ **kwargs
838
+ )
839
+
840
+ if not final_response:
841
+ return None
842
+
843
+ # Check for tool calls
844
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
845
+
846
+ if tool_calls and execute_tool_fn:
847
+ # Convert ToolCall dataclass objects to dict for JSON serialization
848
+ serializable_tool_calls = []
849
+ for tc in tool_calls:
850
+ if isinstance(tc, ToolCall):
851
+ # Convert dataclass to dict
852
+ serializable_tool_calls.append({
853
+ "id": tc.id,
854
+ "type": tc.type,
855
+ "function": tc.function
856
+ })
857
+ else:
858
+ # Already an OpenAI object, keep as is
859
+ serializable_tool_calls.append(tc)
860
+
861
+ messages.append({
862
+ "role": "assistant",
863
+ "content": final_response.choices[0].message.content,
864
+ "tool_calls": serializable_tool_calls
865
+ })
866
+
867
+ for tool_call in tool_calls:
868
+ # Handle both ToolCall dataclass and OpenAI object
869
+ if isinstance(tool_call, ToolCall):
870
+ function_name = tool_call.function["name"]
871
+ arguments = json.loads(tool_call.function["arguments"])
872
+ else:
873
+ function_name = tool_call.function.name
874
+ arguments = json.loads(tool_call.function.arguments)
875
+
876
+ if verbose and console:
877
+ console.print(f"[bold]Calling function:[/bold] {function_name}")
878
+ console.print(f"[dim]Arguments:[/dim] {arguments}")
879
+
880
+ # Execute the tool
881
+ tool_result = execute_tool_fn(function_name, arguments)
882
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
883
+
884
+ if verbose and console:
885
+ console.print(f"[dim]Result:[/dim] {results_str}")
886
+
887
+ messages.append({
888
+ "role": "tool",
889
+ "tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
890
+ "content": results_str
891
+ })
892
+
893
+ # Check if we should continue (for tools like sequential thinking)
894
+ should_continue = False
895
+ for tool_call in tool_calls:
896
+ # Handle both ToolCall dataclass and OpenAI object
897
+ if isinstance(tool_call, ToolCall):
898
+ function_name = tool_call.function["name"]
899
+ arguments = json.loads(tool_call.function["arguments"])
900
+ else:
901
+ function_name = tool_call.function.name
902
+ arguments = json.loads(tool_call.function.arguments)
903
+
904
+ # For sequential thinking tool, check if nextThoughtNeeded is True
905
+ if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
906
+ should_continue = True
907
+ break
908
+
909
+ if not should_continue:
910
+ # Get final response after tool calls
911
+ if stream:
912
+ final_response = self.process_stream_response(
913
+ messages=messages,
914
+ model=model,
915
+ temperature=temperature,
916
+ tools=formatted_tools,
917
+ start_time=start_time,
918
+ console=console,
919
+ display_fn=display_fn,
920
+ reasoning_steps=reasoning_steps,
921
+ **kwargs
922
+ )
923
+ else:
924
+ final_response = self.create_completion(
925
+ messages=messages,
926
+ model=model,
927
+ temperature=temperature,
928
+ stream=False,
929
+ **kwargs
930
+ )
931
+ break
932
+
933
+ iteration_count += 1
934
+ else:
935
+ # No tool calls, we're done
936
+ break
937
+
938
+ return final_response
939
+
940
+ async def achat_completion_with_tools(
941
+ self,
942
+ messages: List[Dict[str, Any]],
943
+ model: str = "gpt-4o",
944
+ temperature: float = 0.7,
945
+ tools: Optional[List[Any]] = None,
946
+ execute_tool_fn: Optional[Callable] = None,
947
+ stream: bool = True,
948
+ console: Optional[Console] = None,
949
+ display_fn: Optional[Callable] = None,
950
+ reasoning_steps: bool = False,
951
+ verbose: bool = True,
952
+ max_iterations: int = 10,
953
+ **kwargs
954
+ ) -> Optional[ChatCompletion]:
955
+ """
956
+ Async version of chat_completion_with_tools.
957
+
958
+ Args:
959
+ messages: List of message dictionaries
960
+ model: Model to use
961
+ temperature: Temperature for generation
962
+ tools: List of tools (can be callables, dicts, or strings)
963
+ execute_tool_fn: Async function to execute tools
964
+ stream: Whether to stream responses
965
+ console: Console for output
966
+ display_fn: Display function for streaming
967
+ reasoning_steps: Whether to show reasoning
968
+ verbose: Whether to show verbose output
969
+ max_iterations: Maximum tool calling iterations
970
+ **kwargs: Additional API parameters
971
+
972
+ Returns:
973
+ Final ChatCompletion response or None if error
974
+ """
975
+ start_time = time.time()
976
+
977
+ # Format tools for OpenAI API
978
+ formatted_tools = self.format_tools(tools)
979
+
980
+ # Continue tool execution loop until no more tool calls are needed
981
+ iteration_count = 0
982
+
983
+ while iteration_count < max_iterations:
984
+ if stream:
985
+ # Process as streaming response with formatted tools
986
+ final_response = await self.process_stream_response_async(
987
+ messages=messages,
988
+ model=model,
989
+ temperature=temperature,
990
+ tools=formatted_tools,
991
+ start_time=start_time,
992
+ console=console,
993
+ display_fn=display_fn,
994
+ reasoning_steps=reasoning_steps,
995
+ **kwargs
996
+ )
997
+ else:
998
+ # Process as regular non-streaming response
999
+ final_response = await self.acreate_completion(
1000
+ messages=messages,
1001
+ model=model,
1002
+ temperature=temperature,
1003
+ tools=formatted_tools,
1004
+ stream=False,
1005
+ **kwargs
1006
+ )
1007
+
1008
+ if not final_response:
1009
+ return None
1010
+
1011
+ # Check for tool calls
1012
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
1013
+
1014
+ if tool_calls and execute_tool_fn:
1015
+ # Convert ToolCall dataclass objects to dict for JSON serialization
1016
+ serializable_tool_calls = []
1017
+ for tc in tool_calls:
1018
+ if isinstance(tc, ToolCall):
1019
+ # Convert dataclass to dict
1020
+ serializable_tool_calls.append({
1021
+ "id": tc.id,
1022
+ "type": tc.type,
1023
+ "function": tc.function
1024
+ })
1025
+ else:
1026
+ # Already an OpenAI object, keep as is
1027
+ serializable_tool_calls.append(tc)
1028
+
1029
+ messages.append({
1030
+ "role": "assistant",
1031
+ "content": final_response.choices[0].message.content,
1032
+ "tool_calls": serializable_tool_calls
1033
+ })
1034
+
1035
+ for tool_call in tool_calls:
1036
+ # Handle both ToolCall dataclass and OpenAI object
1037
+ if isinstance(tool_call, ToolCall):
1038
+ function_name = tool_call.function["name"]
1039
+ arguments = json.loads(tool_call.function["arguments"])
1040
+ else:
1041
+ function_name = tool_call.function.name
1042
+ arguments = json.loads(tool_call.function.arguments)
1043
+
1044
+ if verbose and console:
1045
+ console.print(f"[bold]Calling function:[/bold] {function_name}")
1046
+ console.print(f"[dim]Arguments:[/dim] {arguments}")
1047
+
1048
+ # Execute the tool (async)
1049
+ if asyncio.iscoroutinefunction(execute_tool_fn):
1050
+ tool_result = await execute_tool_fn(function_name, arguments)
1051
+ else:
1052
+ # Run sync function in executor
1053
+ loop = asyncio.get_event_loop()
1054
+ tool_result = await loop.run_in_executor(
1055
+ None,
1056
+ lambda: execute_tool_fn(function_name, arguments)
1057
+ )
1058
+
1059
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
1060
+
1061
+ if verbose and console:
1062
+ console.print(f"[dim]Result:[/dim] {results_str}")
1063
+
1064
+ messages.append({
1065
+ "role": "tool",
1066
+ "tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
1067
+ "content": results_str
1068
+ })
1069
+
1070
+ # Check if we should continue (for tools like sequential thinking)
1071
+ should_continue = False
1072
+ for tool_call in tool_calls:
1073
+ # Handle both ToolCall dataclass and OpenAI object
1074
+ if isinstance(tool_call, ToolCall):
1075
+ function_name = tool_call.function["name"]
1076
+ arguments = json.loads(tool_call.function["arguments"])
1077
+ else:
1078
+ function_name = tool_call.function.name
1079
+ arguments = json.loads(tool_call.function.arguments)
1080
+
1081
+ # For sequential thinking tool, check if nextThoughtNeeded is True
1082
+ if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1083
+ should_continue = True
1084
+ break
1085
+
1086
+ if not should_continue:
1087
+ # Get final response after tool calls
1088
+ if stream:
1089
+ final_response = await self.process_stream_response_async(
1090
+ messages=messages,
1091
+ model=model,
1092
+ temperature=temperature,
1093
+ tools=formatted_tools,
1094
+ start_time=start_time,
1095
+ console=console,
1096
+ display_fn=display_fn,
1097
+ reasoning_steps=reasoning_steps,
1098
+ **kwargs
1099
+ )
1100
+ else:
1101
+ final_response = await self.acreate_completion(
1102
+ messages=messages,
1103
+ model=model,
1104
+ temperature=temperature,
1105
+ stream=False,
1106
+ **kwargs
1107
+ )
1108
+ break
1109
+
1110
+ iteration_count += 1
1111
+ else:
1112
+ # No tool calls, we're done
1113
+ break
1114
+
1115
+ return final_response
1116
+
158
1117
  def parse_structured_output(
159
1118
  self,
160
1119
  messages: List[Dict[str, Any]],