PraisonAI 2.2.24__tar.gz → 2.2.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (80) hide show
  1. {praisonai-2.2.24 → praisonai-2.2.25}/PKG-INFO +2 -2
  2. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/deploy.py +1 -1
  3. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/chat.py +1 -1
  4. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/code.py +1 -1
  5. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/realtime.py +20 -8
  6. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/realtimeclient/__init__.py +115 -27
  7. {praisonai-2.2.24 → praisonai-2.2.25}/pyproject.toml +4 -4
  8. praisonai-2.2.24/praisonai/ui/realtimeclient/realtimedocs.txt +0 -1484
  9. {praisonai-2.2.24 → praisonai-2.2.25}/README.md +0 -0
  10. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/README.md +0 -0
  11. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/__init__.py +0 -0
  12. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/__main__.py +0 -0
  13. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/agents_generator.py +0 -0
  14. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/api/call.py +0 -0
  15. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/auto.py +0 -0
  16. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/chainlit_ui.py +0 -0
  17. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/cli.py +0 -0
  18. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/inbuilt_tools/__init__.py +0 -0
  19. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  20. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/inc/__init__.py +0 -0
  21. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/inc/config.py +0 -0
  22. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/inc/models.py +0 -0
  23. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/android-chrome-192x192.png +0 -0
  24. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/android-chrome-512x512.png +0 -0
  25. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/apple-touch-icon.png +0 -0
  26. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/fantasy.svg +0 -0
  27. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/favicon-16x16.png +0 -0
  28. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/favicon-32x32.png +0 -0
  29. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/favicon.ico +0 -0
  30. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/game.svg +0 -0
  31. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/logo_dark.png +0 -0
  32. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/logo_light.png +0 -0
  33. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/movie.svg +0 -0
  34. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
  35. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/praison-ai-agents-architecture.png +0 -0
  36. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/public/thriller.svg +0 -0
  37. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/scheduler.py +0 -0
  38. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup/__init__.py +0 -0
  39. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup/build.py +0 -0
  40. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup/config.yaml +0 -0
  41. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup/post_install.py +0 -0
  42. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup/setup_conda_env.py +0 -0
  43. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup/setup_conda_env.sh +0 -0
  44. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/setup.py +0 -0
  45. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/test.py +0 -0
  46. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/train.py +0 -0
  47. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/train_vision.py +0 -0
  48. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/README.md +0 -0
  49. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/agents.py +0 -0
  50. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/callbacks.py +0 -0
  51. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/colab.py +0 -0
  52. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/colab_chainlit.py +0 -0
  53. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/components/aicoder.py +0 -0
  54. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/chainlit.md +0 -0
  55. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/bn.json +0 -0
  56. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/en-US.json +0 -0
  57. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/gu.json +0 -0
  58. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/he-IL.json +0 -0
  59. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/hi.json +0 -0
  60. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/kn.json +0 -0
  61. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/ml.json +0 -0
  62. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/mr.json +0 -0
  63. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/ta.json +0 -0
  64. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/te.json +0 -0
  65. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/config/translations/zh-CN.json +0 -0
  66. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/context.py +0 -0
  67. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/database_config.py +0 -0
  68. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/db.py +0 -0
  69. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/fantasy.svg +0 -0
  70. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/game.svg +0 -0
  71. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/logo_dark.png +0 -0
  72. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/logo_light.png +0 -0
  73. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/movie.svg +0 -0
  74. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/praison.css +0 -0
  75. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/public/thriller.svg +0 -0
  76. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/realtimeclient/tools.py +0 -0
  77. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/sql_alchemy.py +0 -0
  78. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/ui/tools.md +0 -0
  79. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/upload_vision.py +0 -0
  80. {praisonai-2.2.24 → praisonai-2.2.25}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: PraisonAI
3
- Version: 2.2.24
3
+ Version: 2.2.25
4
4
  Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -64,7 +64,7 @@ Requires-Dist: playwright (>=1.47.0) ; extra == "code"
64
64
  Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
65
65
  Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "autogen"
66
66
  Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "crewai"
67
- Requires-Dist: praisonaiagents (>=0.0.95)
67
+ Requires-Dist: praisonaiagents (>=0.0.96)
68
68
  Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
69
69
  Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
70
70
  Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==2.2.24 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==2.2.25 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -12,7 +12,7 @@ import base64
12
12
  from dotenv import load_dotenv
13
13
  from PIL import Image
14
14
  from tavily import TavilyClient
15
- from crawl4ai import AsyncAsyncWebCrawler
15
+ from crawl4ai import AsyncWebCrawler
16
16
 
17
17
  # Local application/library imports
18
18
  import chainlit as cl
@@ -12,7 +12,7 @@ from dotenv import load_dotenv
12
12
  from PIL import Image
13
13
  from context import ContextGatherer
14
14
  from tavily import TavilyClient
15
- from crawl4ai import AsyncAsyncWebCrawler
15
+ from crawl4ai import AsyncWebCrawler
16
16
 
17
17
  # Local application/library imports
18
18
  import chainlit as cl
@@ -229,7 +229,7 @@ except Exception as e:
229
229
  @cl.on_chat_start
230
230
  async def start():
231
231
  initialize_db()
232
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview")
232
+ model_name = os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview-2024-12-17")
233
233
  cl.user_session.set("model_name", model_name)
234
234
  cl.user_session.set("message_history", []) # Initialize message history
235
235
  logger.debug(f"Model name: {model_name}")
@@ -238,7 +238,7 @@ async def start():
238
238
  # TextInput(
239
239
  # id="model_name",
240
240
  # label="Enter the Model Name",
241
- # placeholder="e.g., gpt-4o-mini-realtime-preview",
241
+ # placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
242
242
  # initial=model_name
243
243
  # )
244
244
  # ]
@@ -382,7 +382,8 @@ async def on_audio_start():
382
382
  openai_realtime = cl.user_session.get("openai_realtime")
383
383
 
384
384
  if not openai_realtime.is_connected():
385
- await openai_realtime.connect()
385
+ model_name = cl.user_session.get("model_name", "gpt-4o-mini-realtime-preview-2024-12-17")
386
+ await openai_realtime.connect(model_name)
386
387
 
387
388
  logger.info("Connected to OpenAI realtime")
388
389
  return True
@@ -394,11 +395,22 @@ async def on_audio_start():
394
395
 
395
396
  @cl.on_audio_chunk
396
397
  async def on_audio_chunk(chunk: cl.InputAudioChunk):
397
- openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
398
+ openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
399
+
400
+ if not openai_realtime:
401
+ logger.debug("No realtime client available")
402
+ return
403
+
398
404
  if openai_realtime.is_connected():
399
- await openai_realtime.append_input_audio(chunk.data)
405
+ try:
406
+ success = await openai_realtime.append_input_audio(chunk.data)
407
+ if not success:
408
+ logger.debug("Failed to append audio data - connection may be lost")
409
+ except Exception as e:
410
+ logger.debug(f"Error processing audio chunk: {e}")
411
+ # Optionally try to reconnect here if needed
400
412
  else:
401
- logger.info("RealtimeClient is not connected")
413
+ logger.debug("RealtimeClient is not connected - audio chunk ignored")
402
414
 
403
415
  @cl.on_audio_end
404
416
  @cl.on_chat_end
@@ -423,14 +435,14 @@ def auth_callback(username: str, password: str):
423
435
  @cl.on_chat_resume
424
436
  async def on_chat_resume(thread: ThreadDict):
425
437
  logger.info(f"Resuming chat: {thread['id']}")
426
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview"
438
+ model_name = os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview-2024-12-17"
427
439
  logger.debug(f"Model name: {model_name}")
428
440
  settings = cl.ChatSettings(
429
441
  [
430
442
  TextInput(
431
443
  id="model_name",
432
444
  label="Enter the Model Name",
433
- placeholder="e.g., gpt-4o-mini-realtime-preview",
445
+ placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
434
446
  initial=model_name
435
447
  )
436
448
  ]
@@ -6,6 +6,7 @@ import inspect
6
6
  import numpy as np
7
7
  import json
8
8
  import websockets
9
+ from websockets.exceptions import ConnectionClosed
9
10
  from datetime import datetime
10
11
  from collections import defaultdict
11
12
  import base64
@@ -97,29 +98,66 @@ class RealtimeAPI(RealtimeEventHandler):
97
98
  self.ws = None
98
99
 
99
100
  def is_connected(self):
100
- return self.ws is not None
101
+ if self.ws is None:
102
+ return False
103
+ # Some websockets versions don't have a closed attribute
104
+ try:
105
+ return not self.ws.closed
106
+ except AttributeError:
107
+ # Fallback: check if websocket is still alive by checking state
108
+ try:
109
+ return hasattr(self.ws, 'state') and self.ws.state.name == 'OPEN'
110
+ except:
111
+ # Last fallback: assume connected if ws exists
112
+ return True
101
113
 
102
114
  def log(self, *args):
103
115
  logger.debug(f"[Websocket/{datetime.utcnow().isoformat()}]", *args)
104
116
 
105
- async def connect(self, model='gpt-4o-realtime-preview-2024-10-01'):
117
+ async def connect(self, model='gpt-4o-mini-realtime-preview-2024-12-17'):
106
118
  if self.is_connected():
107
119
  raise Exception("Already connected")
108
- self.ws = await websockets.connect(f"{self.url}?model={model}", extra_headers={
120
+
121
+ headers = {
109
122
  'Authorization': f'Bearer {self.api_key}',
110
123
  'OpenAI-Beta': 'realtime=v1'
111
- })
124
+ }
125
+
126
+ # Try different header parameter names for compatibility
127
+ try:
128
+ self.ws = await websockets.connect(f"{self.url}?model={model}", additional_headers=headers)
129
+ except TypeError:
130
+ # Fallback to older websockets versions
131
+ try:
132
+ self.ws = await websockets.connect(f"{self.url}?model={model}", extra_headers=headers)
133
+ except TypeError:
134
+ # Last fallback - some versions might not support headers parameter
135
+ raise Exception("Websockets library version incompatible. Please update websockets to version 11.0 or higher.")
136
+
112
137
  self.log(f"Connected to {self.url}")
113
138
  asyncio.create_task(self._receive_messages())
114
139
 
115
140
  async def _receive_messages(self):
116
- async for message in self.ws:
117
- event = json.loads(message)
118
- if event['type'] == "error":
119
- logger.error("ERROR", event)
120
- self.log("received:", event)
121
- self.dispatch(f"server.{event['type']}", event)
122
- self.dispatch("server.*", event)
141
+ try:
142
+ async for message in self.ws:
143
+ event = json.loads(message)
144
+ if event['type'] == "error":
145
+ logger.error(f"OpenAI Realtime API Error: {event}")
146
+ self.log("received:", event)
147
+ self.dispatch(f"server.{event['type']}", event)
148
+ self.dispatch("server.*", event)
149
+ except ConnectionClosed as e:
150
+ logger.info(f"WebSocket connection closed normally: {e}")
151
+ # Mark connection as closed
152
+ self.ws = None
153
+ # Dispatch disconnection event
154
+ self.dispatch("disconnected", {"reason": str(e)})
155
+ except Exception as e:
156
+ logger.warning(f"WebSocket receive loop ended: {e}")
157
+ # Mark connection as closed
158
+ self.ws = None
159
+ # Dispatch disconnection event
160
+ self.dispatch("disconnected", {"reason": str(e)})
123
161
 
124
162
  async def send(self, event_name, data=None):
125
163
  if not self.is_connected():
@@ -135,16 +173,33 @@ class RealtimeAPI(RealtimeEventHandler):
135
173
  self.dispatch(f"client.{event_name}", event)
136
174
  self.dispatch("client.*", event)
137
175
  self.log("sent:", event)
138
- await self.ws.send(json.dumps(event))
176
+
177
+ try:
178
+ await self.ws.send(json.dumps(event))
179
+ except ConnectionClosed as e:
180
+ logger.info(f"WebSocket connection closed during send: {e}")
181
+ # Mark connection as closed if send fails
182
+ self.ws = None
183
+ raise Exception(f"WebSocket connection lost: {e}")
184
+ except Exception as e:
185
+ logger.error(f"Failed to send WebSocket message: {e}")
186
+ # Mark connection as closed if send fails
187
+ self.ws = None
188
+ raise Exception(f"WebSocket connection lost: {e}")
139
189
 
140
190
  def _generate_id(self, prefix):
141
191
  return f"{prefix}{int(datetime.utcnow().timestamp() * 1000)}"
142
192
 
143
193
  async def disconnect(self):
144
194
  if self.ws:
145
- await self.ws.close()
146
- self.ws = None
147
- self.log(f"Disconnected from {self.url}")
195
+ try:
196
+ await self.ws.close()
197
+ logger.info(f"Disconnected from {self.url}")
198
+ except Exception as e:
199
+ logger.warning(f"Error during WebSocket close: {e}")
200
+ finally:
201
+ self.ws = None
202
+ self.log(f"WebSocket connection cleaned up")
148
203
 
149
204
  class RealtimeConversation:
150
205
  default_frequency = config.features.audio.sample_rate
@@ -341,8 +396,7 @@ class RealtimeConversation:
341
396
  return None, None
342
397
  array_buffer = base64_to_array_buffer(delta)
343
398
  append_values = array_buffer.tobytes()
344
- # TODO: make it work
345
- # item['formatted']['audio'] = merge_int16_arrays(item['formatted']['audio'], append_values)
399
+ item['formatted']['audio'].append(append_values)
346
400
  return item, {'audio': append_values}
347
401
 
348
402
  def _process_text_delta(self, event):
@@ -381,7 +435,6 @@ class RealtimeClient(RealtimeEventHandler):
381
435
  "tools": [],
382
436
  "tool_choice": "auto",
383
437
  "temperature": 0.8,
384
- "max_response_output_tokens": 4096,
385
438
  }
386
439
  self.session_config = {}
387
440
  self.transcription_models = [{"model": "whisper-1"}]
@@ -431,8 +484,13 @@ class RealtimeClient(RealtimeEventHandler):
431
484
  self.dispatch("realtime.event", realtime_event)
432
485
 
433
486
  def _on_session_created(self, event):
434
- print(f"Session created: {event}")
435
- logger.debug(f"Session created: {event}")
487
+ try:
488
+ session_id = event.get('session', {}).get('id', 'unknown')
489
+ model = event.get('session', {}).get('model', 'unknown')
490
+ logger.info(f"OpenAI Realtime session created - ID: {session_id}, Model: {model}")
491
+ except Exception as e:
492
+ logger.warning(f"Error processing session created event: {e}")
493
+ logger.debug(f"Session event details: {event}")
436
494
  self.session_created = True
437
495
 
438
496
  def _process_event(self, event, *args):
@@ -497,10 +555,15 @@ class RealtimeClient(RealtimeEventHandler):
497
555
  self._add_api_event_handlers()
498
556
  return True
499
557
 
500
- async def connect(self):
558
+ async def connect(self, model=None):
501
559
  if self.is_connected():
502
560
  raise Exception("Already connected, use .disconnect() first")
503
- await self.realtime.connect()
561
+
562
+ # Use provided model or default
563
+ if model is None:
564
+ model = 'gpt-4o-mini-realtime-preview-2024-12-17'
565
+
566
+ await self.realtime.connect(model)
504
567
  await self.update_session()
505
568
  return True
506
569
 
@@ -516,6 +579,7 @@ class RealtimeClient(RealtimeEventHandler):
516
579
  self.conversation.clear()
517
580
  if self.realtime.is_connected():
518
581
  await self.realtime.disconnect()
582
+ logger.info("RealtimeClient disconnected")
519
583
 
520
584
  def get_turn_detection_type(self):
521
585
  return self.session_config.get("turn_detection", {}).get("type")
@@ -579,11 +643,22 @@ class RealtimeClient(RealtimeEventHandler):
579
643
  return True
580
644
 
581
645
  async def append_input_audio(self, array_buffer):
646
+ if not self.is_connected():
647
+ logger.warning("Cannot append audio: RealtimeClient is not connected")
648
+ return False
649
+
582
650
  if len(array_buffer) > 0:
583
- await self.realtime.send("input_audio_buffer.append", {
584
- "audio": array_buffer_to_base64(np.array(array_buffer)),
585
- })
586
- self.input_audio_buffer.extend(array_buffer)
651
+ try:
652
+ await self.realtime.send("input_audio_buffer.append", {
653
+ "audio": array_buffer_to_base64(np.array(array_buffer)),
654
+ })
655
+ self.input_audio_buffer.extend(array_buffer)
656
+ except Exception as e:
657
+ logger.error(f"Failed to append input audio: {e}")
658
+ # Connection might be lost, mark as disconnected
659
+ if "connection" in str(e).lower() or "closed" in str(e).lower():
660
+ logger.warning("WebSocket connection appears to be lost. Audio input will be queued until reconnection.")
661
+ return False
587
662
  return True
588
663
 
589
664
  async def create_response(self):
@@ -650,4 +725,17 @@ class RealtimeClient(RealtimeEventHandler):
650
725
  logger.debug(f"Unhandled item type:\n{json.dumps(item, indent=2)}")
651
726
 
652
727
  # Additional debug logging
653
- logger.debug(f"Processed Chainlit message for item: {item.get('id', 'unknown')}")
728
+ logger.debug(f"Processed Chainlit message for item: {item.get('id', 'unknown')}")
729
+
730
+ async def ensure_connected(self):
731
+ """Check connection health and attempt reconnection if needed"""
732
+ if not self.is_connected():
733
+ try:
734
+ logger.info("Attempting to reconnect to OpenAI Realtime API...")
735
+ model = 'gpt-4o-mini-realtime-preview-2024-12-17'
736
+ await self.connect(model)
737
+ return True
738
+ except Exception as e:
739
+ logger.error(f"Failed to reconnect: {e}")
740
+ return False
741
+ return True
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "PraisonAI"
3
- version = "2.2.24"
3
+ version = "2.2.25"
4
4
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
5
5
  readme = "README.md"
6
6
  license = ""
@@ -12,7 +12,7 @@ dependencies = [
12
12
  "rich>=13.7",
13
13
  "markdown>=3.5",
14
14
  "pyparsing>=3.0.0",
15
- "praisonaiagents>=0.0.95",
15
+ "praisonaiagents>=0.0.96",
16
16
  "python-dotenv>=0.19.0",
17
17
  "instructor>=1.3.3",
18
18
  "PyYAML>=6.0",
@@ -95,7 +95,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.15", "crewai"]
95
95
 
96
96
  [tool.poetry]
97
97
  name = "PraisonAI"
98
- version = "2.2.24"
98
+ version = "2.2.25"
99
99
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
100
100
  authors = ["Mervin Praison"]
101
101
  license = ""
@@ -113,7 +113,7 @@ python = ">=3.10,<3.13"
113
113
  rich = ">=13.7"
114
114
  markdown = ">=3.5"
115
115
  pyparsing = ">=3.0.0"
116
- praisonaiagents = ">=0.0.95"
116
+ praisonaiagents = ">=0.0.96"
117
117
  python-dotenv = ">=0.19.0"
118
118
  instructor = ">=1.3.3"
119
119
  PyYAML = ">=6.0"