PraisonAI 2.2.24__cp313-cp313-manylinux_2_39_x86_64.whl → 2.2.26__cp313-cp313-manylinux_2_39_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

praisonai/deploy.py CHANGED
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==2.2.24 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==2.2.26 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
praisonai/ui/chat.py CHANGED
@@ -12,7 +12,7 @@ import base64
12
12
  from dotenv import load_dotenv
13
13
  from PIL import Image
14
14
  from tavily import TavilyClient
15
- from crawl4ai import AsyncAsyncWebCrawler
15
+ from crawl4ai import AsyncWebCrawler
16
16
 
17
17
  # Local application/library imports
18
18
  import chainlit as cl
praisonai/ui/code.py CHANGED
@@ -12,7 +12,7 @@ from dotenv import load_dotenv
12
12
  from PIL import Image
13
13
  from context import ContextGatherer
14
14
  from tavily import TavilyClient
15
- from crawl4ai import AsyncAsyncWebCrawler
15
+ from crawl4ai import AsyncWebCrawler
16
16
 
17
17
  # Local application/library imports
18
18
  import chainlit as cl
@@ -2,6 +2,7 @@ import os
2
2
  import asyncio
3
3
  from pathlib import Path
4
4
  import difflib
5
+ import platform
5
6
  from typing import Dict, Any
6
7
  from litellm import acompletion
7
8
  import json
@@ -119,10 +120,24 @@ class AICoder:
119
120
  except:
120
121
  return None
121
122
 
123
+ def get_shell_command(self, command: str) -> str:
124
+ """
125
+ Convert command to be cross-platform compatible.
126
+ On Windows, use cmd /c for shell commands.
127
+ On Unix-like systems, use the command as-is.
128
+ """
129
+ if platform.system() == "Windows":
130
+ # For Windows, escape quotes and wrap command in cmd /c
131
+ escaped_command = command.replace('"', '\\"')
132
+ return f'cmd /c "{escaped_command}"'
133
+ return command
134
+
122
135
  async def execute_command(self, command: str):
123
136
  try:
137
+ # Make command cross-platform compatible
138
+ shell_command = self.get_shell_command(command)
124
139
  process = await asyncio.create_subprocess_shell(
125
- command,
140
+ shell_command,
126
141
  stdout=asyncio.subprocess.PIPE,
127
142
  stderr=asyncio.subprocess.PIPE,
128
143
  cwd=self.cwd
praisonai/ui/realtime.py CHANGED
@@ -229,7 +229,7 @@ except Exception as e:
229
229
  @cl.on_chat_start
230
230
  async def start():
231
231
  initialize_db()
232
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview")
232
+ model_name = os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview-2024-12-17")
233
233
  cl.user_session.set("model_name", model_name)
234
234
  cl.user_session.set("message_history", []) # Initialize message history
235
235
  logger.debug(f"Model name: {model_name}")
@@ -238,7 +238,7 @@ async def start():
238
238
  # TextInput(
239
239
  # id="model_name",
240
240
  # label="Enter the Model Name",
241
- # placeholder="e.g., gpt-4o-mini-realtime-preview",
241
+ # placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
242
242
  # initial=model_name
243
243
  # )
244
244
  # ]
@@ -382,7 +382,8 @@ async def on_audio_start():
382
382
  openai_realtime = cl.user_session.get("openai_realtime")
383
383
 
384
384
  if not openai_realtime.is_connected():
385
- await openai_realtime.connect()
385
+ model_name = cl.user_session.get("model_name") or os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview-2024-12-17")
386
+ await openai_realtime.connect(model_name)
386
387
 
387
388
  logger.info("Connected to OpenAI realtime")
388
389
  return True
@@ -394,11 +395,22 @@ async def on_audio_start():
394
395
 
395
396
  @cl.on_audio_chunk
396
397
  async def on_audio_chunk(chunk: cl.InputAudioChunk):
397
- openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
398
+ openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
399
+
400
+ if not openai_realtime:
401
+ logger.debug("No realtime client available")
402
+ return
403
+
398
404
  if openai_realtime.is_connected():
399
- await openai_realtime.append_input_audio(chunk.data)
405
+ try:
406
+ success = await openai_realtime.append_input_audio(chunk.data)
407
+ if not success:
408
+ logger.debug("Failed to append audio data - connection may be lost")
409
+ except Exception as e:
410
+ logger.debug(f"Error processing audio chunk: {e}")
411
+ # Optionally try to reconnect here if needed
400
412
  else:
401
- logger.info("RealtimeClient is not connected")
413
+ logger.debug("RealtimeClient is not connected - audio chunk ignored")
402
414
 
403
415
  @cl.on_audio_end
404
416
  @cl.on_chat_end
@@ -423,14 +435,14 @@ def auth_callback(username: str, password: str):
423
435
  @cl.on_chat_resume
424
436
  async def on_chat_resume(thread: ThreadDict):
425
437
  logger.info(f"Resuming chat: {thread['id']}")
426
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview"
438
+ model_name = os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview-2024-12-17"
427
439
  logger.debug(f"Model name: {model_name}")
428
440
  settings = cl.ChatSettings(
429
441
  [
430
442
  TextInput(
431
443
  id="model_name",
432
444
  label="Enter the Model Name",
433
- placeholder="e.g., gpt-4o-mini-realtime-preview",
445
+ placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
434
446
  initial=model_name
435
447
  )
436
448
  ]
@@ -6,6 +6,7 @@ import inspect
6
6
  import numpy as np
7
7
  import json
8
8
  import websockets
9
+ from websockets.exceptions import ConnectionClosed
9
10
  from datetime import datetime
10
11
  from collections import defaultdict
11
12
  import base64
@@ -92,34 +93,86 @@ class RealtimeAPI(RealtimeEventHandler):
92
93
  def __init__(self, url=None, api_key=None):
93
94
  super().__init__()
94
95
  self.default_url = 'wss://api.openai.com/v1/realtime'
95
- self.url = url or self.default_url
96
+
97
+ # Support custom base URL from environment variable
98
+ base_url = os.getenv("OPENAI_BASE_URL")
99
+ if base_url:
100
+ # Convert HTTP/HTTPS base URL to WebSocket URL for realtime API
101
+ if base_url.startswith('https://'):
102
+ ws_url = base_url.replace('https://', 'wss://').rstrip('/') + '/realtime'
103
+ elif base_url.startswith('http://'):
104
+ ws_url = base_url.replace('http://', 'ws://').rstrip('/') + '/realtime'
105
+ else:
106
+ # Assume it's already a WebSocket URL
107
+ ws_url = base_url.rstrip('/') + '/realtime' if not base_url.endswith('/realtime') else base_url
108
+ self.url = url or ws_url
109
+ else:
110
+ self.url = url or self.default_url
111
+
96
112
  self.api_key = api_key or os.getenv("OPENAI_API_KEY")
97
113
  self.ws = None
98
114
 
99
115
  def is_connected(self):
100
- return self.ws is not None
116
+ if self.ws is None:
117
+ return False
118
+ # Some websockets versions don't have a closed attribute
119
+ try:
120
+ return not self.ws.closed
121
+ except AttributeError:
122
+ # Fallback: check if websocket is still alive by checking state
123
+ try:
124
+ return hasattr(self.ws, 'state') and self.ws.state.name == 'OPEN'
125
+ except:
126
+ # Last fallback: assume connected if ws exists
127
+ return True
101
128
 
102
129
  def log(self, *args):
103
130
  logger.debug(f"[Websocket/{datetime.utcnow().isoformat()}]", *args)
104
131
 
105
- async def connect(self, model='gpt-4o-realtime-preview-2024-10-01'):
132
+ async def connect(self, model='gpt-4o-mini-realtime-preview-2024-12-17'):
106
133
  if self.is_connected():
107
134
  raise Exception("Already connected")
108
- self.ws = await websockets.connect(f"{self.url}?model={model}", extra_headers={
135
+
136
+ headers = {
109
137
  'Authorization': f'Bearer {self.api_key}',
110
138
  'OpenAI-Beta': 'realtime=v1'
111
- })
139
+ }
140
+
141
+ # Try different header parameter names for compatibility
142
+ try:
143
+ self.ws = await websockets.connect(f"{self.url}?model={model}", additional_headers=headers)
144
+ except TypeError:
145
+ # Fallback to older websockets versions
146
+ try:
147
+ self.ws = await websockets.connect(f"{self.url}?model={model}", extra_headers=headers)
148
+ except TypeError:
149
+ # Last fallback - some versions might not support headers parameter
150
+ raise Exception("Websockets library version incompatible. Please update websockets to version 11.0 or higher.")
151
+
112
152
  self.log(f"Connected to {self.url}")
113
153
  asyncio.create_task(self._receive_messages())
114
154
 
115
155
  async def _receive_messages(self):
116
- async for message in self.ws:
117
- event = json.loads(message)
118
- if event['type'] == "error":
119
- logger.error("ERROR", event)
120
- self.log("received:", event)
121
- self.dispatch(f"server.{event['type']}", event)
122
- self.dispatch("server.*", event)
156
+ try:
157
+ async for message in self.ws:
158
+ event = json.loads(message)
159
+ if event['type'] == "error":
160
+ logger.error(f"OpenAI Realtime API Error: {event}")
161
+ self.log("received:", event)
162
+ self.dispatch(f"server.{event['type']}", event)
163
+ self.dispatch("server.*", event)
164
+ except ConnectionClosed as e:
165
+ logger.info(f"WebSocket connection closed normally: {e}")
166
+ # Mark connection as closed
167
+ self.ws = None
168
+ # Dispatch disconnection event
169
+ self.dispatch("disconnected", {"reason": str(e)})
170
+ except Exception as e:
171
+ logger.warning(f"WebSocket receive loop ended: {e}")
172
+ # Mark connection as closed
173
+ self.ws = None
174
+ # Dispatch disconnection event
175
+ self.dispatch("disconnected", {"reason": str(e)})
123
176
 
124
177
  async def send(self, event_name, data=None):
125
178
  if not self.is_connected():
@@ -135,16 +188,33 @@ class RealtimeAPI(RealtimeEventHandler):
135
188
  self.dispatch(f"client.{event_name}", event)
136
189
  self.dispatch("client.*", event)
137
190
  self.log("sent:", event)
138
- await self.ws.send(json.dumps(event))
191
+
192
+ try:
193
+ await self.ws.send(json.dumps(event))
194
+ except ConnectionClosed as e:
195
+ logger.info(f"WebSocket connection closed during send: {e}")
196
+ # Mark connection as closed if send fails
197
+ self.ws = None
198
+ raise Exception(f"WebSocket connection lost: {e}")
199
+ except Exception as e:
200
+ logger.error(f"Failed to send WebSocket message: {e}")
201
+ # Mark connection as closed if send fails
202
+ self.ws = None
203
+ raise Exception(f"WebSocket connection lost: {e}")
139
204
 
140
205
  def _generate_id(self, prefix):
141
206
  return f"{prefix}{int(datetime.utcnow().timestamp() * 1000)}"
142
207
 
143
208
  async def disconnect(self):
144
209
  if self.ws:
145
- await self.ws.close()
146
- self.ws = None
147
- self.log(f"Disconnected from {self.url}")
210
+ try:
211
+ await self.ws.close()
212
+ logger.info(f"Disconnected from {self.url}")
213
+ except Exception as e:
214
+ logger.warning(f"Error during WebSocket close: {e}")
215
+ finally:
216
+ self.ws = None
217
+ self.log(f"WebSocket connection cleaned up")
148
218
 
149
219
  class RealtimeConversation:
150
220
  default_frequency = config.features.audio.sample_rate
@@ -341,8 +411,7 @@ class RealtimeConversation:
341
411
  return None, None
342
412
  array_buffer = base64_to_array_buffer(delta)
343
413
  append_values = array_buffer.tobytes()
344
- # TODO: make it work
345
- # item['formatted']['audio'] = merge_int16_arrays(item['formatted']['audio'], append_values)
414
+ item['formatted']['audio'].append(append_values)
346
415
  return item, {'audio': append_values}
347
416
 
348
417
  def _process_text_delta(self, event):
@@ -381,7 +450,6 @@ class RealtimeClient(RealtimeEventHandler):
381
450
  "tools": [],
382
451
  "tool_choice": "auto",
383
452
  "temperature": 0.8,
384
- "max_response_output_tokens": 4096,
385
453
  }
386
454
  self.session_config = {}
387
455
  self.transcription_models = [{"model": "whisper-1"}]
@@ -431,8 +499,13 @@ class RealtimeClient(RealtimeEventHandler):
431
499
  self.dispatch("realtime.event", realtime_event)
432
500
 
433
501
  def _on_session_created(self, event):
434
- print(f"Session created: {event}")
435
- logger.debug(f"Session created: {event}")
502
+ try:
503
+ session_id = event.get('session', {}).get('id', 'unknown')
504
+ model = event.get('session', {}).get('model', 'unknown')
505
+ logger.info(f"OpenAI Realtime session created - ID: {session_id}, Model: {model}")
506
+ except Exception as e:
507
+ logger.warning(f"Error processing session created event: {e}")
508
+ logger.debug(f"Session event details: {event}")
436
509
  self.session_created = True
437
510
 
438
511
  def _process_event(self, event, *args):
@@ -497,10 +570,15 @@ class RealtimeClient(RealtimeEventHandler):
497
570
  self._add_api_event_handlers()
498
571
  return True
499
572
 
500
- async def connect(self):
573
+ async def connect(self, model=None):
501
574
  if self.is_connected():
502
575
  raise Exception("Already connected, use .disconnect() first")
503
- await self.realtime.connect()
576
+
577
+ # Use provided model, OPENAI_MODEL_NAME environment variable, or default
578
+ if model is None:
579
+ model = os.getenv("OPENAI_MODEL_NAME", 'gpt-4o-mini-realtime-preview-2024-12-17')
580
+
581
+ await self.realtime.connect(model)
504
582
  await self.update_session()
505
583
  return True
506
584
 
@@ -516,6 +594,7 @@ class RealtimeClient(RealtimeEventHandler):
516
594
  self.conversation.clear()
517
595
  if self.realtime.is_connected():
518
596
  await self.realtime.disconnect()
597
+ logger.info("RealtimeClient disconnected")
519
598
 
520
599
  def get_turn_detection_type(self):
521
600
  return self.session_config.get("turn_detection", {}).get("type")
@@ -579,11 +658,22 @@ class RealtimeClient(RealtimeEventHandler):
579
658
  return True
580
659
 
581
660
  async def append_input_audio(self, array_buffer):
661
+ if not self.is_connected():
662
+ logger.warning("Cannot append audio: RealtimeClient is not connected")
663
+ return False
664
+
582
665
  if len(array_buffer) > 0:
583
- await self.realtime.send("input_audio_buffer.append", {
584
- "audio": array_buffer_to_base64(np.array(array_buffer)),
585
- })
586
- self.input_audio_buffer.extend(array_buffer)
666
+ try:
667
+ await self.realtime.send("input_audio_buffer.append", {
668
+ "audio": array_buffer_to_base64(np.array(array_buffer)),
669
+ })
670
+ self.input_audio_buffer.extend(array_buffer)
671
+ except Exception as e:
672
+ logger.error(f"Failed to append input audio: {e}")
673
+ # Connection might be lost, mark as disconnected
674
+ if "connection" in str(e).lower() or "closed" in str(e).lower():
675
+ logger.warning("WebSocket connection appears to be lost. Audio input will be queued until reconnection.")
676
+ return False
587
677
  return True
588
678
 
589
679
  async def create_response(self):
@@ -650,4 +740,17 @@ class RealtimeClient(RealtimeEventHandler):
650
740
  logger.debug(f"Unhandled item type:\n{json.dumps(item, indent=2)}")
651
741
 
652
742
  # Additional debug logging
653
- logger.debug(f"Processed Chainlit message for item: {item.get('id', 'unknown')}")
743
+ logger.debug(f"Processed Chainlit message for item: {item.get('id', 'unknown')}")
744
+
745
+ async def ensure_connected(self):
746
+ """Check connection health and attempt reconnection if needed"""
747
+ if not self.is_connected():
748
+ try:
749
+ logger.info("Attempting to reconnect to OpenAI Realtime API...")
750
+ model = os.getenv("OPENAI_MODEL_NAME", 'gpt-4o-mini-realtime-preview-2024-12-17')
751
+ await self.connect(model)
752
+ return True
753
+ except Exception as e:
754
+ logger.error(f"Failed to reconnect: {e}")
755
+ return False
756
+ return True
@@ -22,8 +22,14 @@ logger.setLevel(log_level)
22
22
  tavily_api_key = os.getenv("TAVILY_API_KEY")
23
23
  tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
24
24
 
25
- # Set up OpenAI client
26
- openai_client = OpenAI()
25
+ # Set up OpenAI client with support for custom base URL
26
+ openai_base_url = os.getenv("OPENAI_BASE_URL")
27
+ openai_api_key = os.getenv("OPENAI_API_KEY")
28
+
29
+ if openai_base_url:
30
+ openai_client = OpenAI(base_url=openai_base_url, api_key=openai_api_key)
31
+ else:
32
+ openai_client = OpenAI(api_key=openai_api_key)
27
33
 
28
34
  query_stock_price_def = {
29
35
  "name": "query_stock_price",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: PraisonAI
3
- Version: 2.2.24
3
+ Version: 2.2.26
4
4
  Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -64,7 +64,7 @@ Requires-Dist: playwright (>=1.47.0) ; extra == "code"
64
64
  Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
65
65
  Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "autogen"
66
66
  Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "crewai"
67
- Requires-Dist: praisonaiagents (>=0.0.95)
67
+ Requires-Dist: praisonaiagents (>=0.0.97)
68
68
  Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
69
69
  Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
70
70
  Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
@@ -6,7 +6,7 @@ praisonai/api/call.py,sha256=-dV9DKNDi4w9vN6K63TUh15_PC0M5KzYOmBqHbuJqq0,11079
6
6
  praisonai/auto.py,sha256=0omuyIIuu-zBAXpsGo3JwuhX6zpjQg3ZtqbPtF5LZbg,12331
7
7
  praisonai/chainlit_ui.py,sha256=VKf_--cONLIBMymMY8j-oj6Pq_rw3pHtXOqF2wZ9gYI,12220
8
8
  praisonai/cli.py,sha256=LK6__iJP9jr1QAmG7E4kDbmlYqKIRivu9GedfBRz0_w,36311
9
- praisonai/deploy.py,sha256=n1aCQD1dzxnh9XYgn-rNiuXMsNMjKj8E3rt2wvDPBr4,6028
9
+ praisonai/deploy.py,sha256=hrwFyJUkKJY5jOuHI4N5fSrE45QnxP9E2eIDK9yNeJI,6028
10
10
  praisonai/inbuilt_tools/__init__.py,sha256=mZOEximj3zCyJHq9Lz0bGXhQpBsa_QR-R-yA9UKC3zI,565
11
11
  praisonai/inbuilt_tools/autogen_tools.py,sha256=kJdEv61BTYvdHOaURNEpBcWq8Rs-oC03loNFTIjT-ak,4687
12
12
  praisonai/inc/__init__.py,sha256=sPDlYBBwdk0VlWzaaM_lG0_LD07lS2HRGvPdxXJFiYg,62
@@ -40,11 +40,11 @@ praisonai/train_vision.py,sha256=OLDtr5u9rszWQ80LC5iFy37yPuYguES6AQybm_2RtM4,125
40
40
  praisonai/ui/README.md,sha256=QG9yucvBieVjCjWFzu6hL9xNtYllkoqyJ_q1b0YYAco,1124
41
41
  praisonai/ui/agents.py,sha256=wWtVHCQAvLxAe3vtcnivM0JWGuxshbhhwbX8t5VYTD4,32817
42
42
  praisonai/ui/callbacks.py,sha256=V4_-GjxmjDFmugUZGfQHKtNSysx7rT6i1UblbM_8lIM,1968
43
- praisonai/ui/chat.py,sha256=8pLbn5gnmQFRCc9U_P5MyQgYUGHHloN3qjul8rm6ASI,13575
44
- praisonai/ui/code.py,sha256=9Fw_bpo2M6tUxXCyfQ0Qoh_j_6l_TcxwDttvtlKc6Oc,16670
43
+ praisonai/ui/chat.py,sha256=mfNU-fmJt4-x3sKe10DuiieOTZYsP5046yGlZq3yVI0,13570
44
+ praisonai/ui/code.py,sha256=W4lNfbHTl6VeVYCdGi1T3qOL8VN4guUVKA68ZUCunJU,16665
45
45
  praisonai/ui/colab.py,sha256=A2NceDVazMy53mIpp-NIn5w3y8aQKwQu5LmHTepVwlo,19584
46
46
  praisonai/ui/colab_chainlit.py,sha256=wrB1O0ttRlmOH8aMxU8QdGpse-X54U87ZcEEA3R1aFg,2432
47
- praisonai/ui/components/aicoder.py,sha256=E2Tz3sWR9WKIPquO30T7aNzpe41XwYwy9UY3CXvSTlw,11165
47
+ praisonai/ui/components/aicoder.py,sha256=kcO0jLZVFnA3TP8-WZnSYrIQMpFTwqeByxwOhreMX-A,11781
48
48
  praisonai/ui/config/chainlit.md,sha256=YCjGjkKOeW0w711tkAdEfC6sPgBRm6G3bxYPFeHx72U,28
49
49
  praisonai/ui/config/translations/bn.json,sha256=m2TAaGMS-18_siW5dw4sbosh0Wn8ENWWzdGYkHaBrXw,22679
50
50
  praisonai/ui/config/translations/en-US.json,sha256=QoQAg8P5Q5gbGASc-HAHcfhufk71-Uc1u_ewIBfHuLc,9821
@@ -67,15 +67,14 @@ praisonai/ui/public/logo_light.png,sha256=8cQRti_Ysa30O3_7C3ku2w40LnVUUlUok47H-3
67
67
  praisonai/ui/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk,1289
68
68
  praisonai/ui/public/praison.css,sha256=fBYbJn4Uuv2AH6ThWkMmdAy_uBbw9a9ZeW0hIGsqotA,75
69
69
  praisonai/ui/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
70
- praisonai/ui/realtime.py,sha256=aVK-lbA57J9KHo3Lrknk4aaO1V1tRkiKXr_01zWrl30,17845
71
- praisonai/ui/realtimeclient/__init__.py,sha256=zA2xa7rBUSw77wFkndJMQNNPqdH6ywQ3uf4WSYHjNfs,27513
72
- praisonai/ui/realtimeclient/realtimedocs.txt,sha256=hmgd8Uwy2SkjSndyyF_-ZOaNxiyHwGaQLGc67DvV-sI,26395
73
- praisonai/ui/realtimeclient/tools.py,sha256=ujkTZQIha6DQBIfTkhInI-iYD3wi3do2r_EBJCddQy8,8364
70
+ praisonai/ui/realtime.py,sha256=-Tf9gSIQgQEsppHtt4cjI0tqSZT1dsPDEQK1mmUzFOw,18482
71
+ praisonai/ui/realtimeclient/__init__.py,sha256=uAsEOveayAQRRrZ3P6ws46hjpszXBPfMc45dIVF4ZHk,32334
72
+ praisonai/ui/realtimeclient/tools.py,sha256=H0_m0z9-x9R4zV8Q_2Ky6UjF5cxCXjCYSTFsdBs8Nkc,8619
74
73
  praisonai/ui/sql_alchemy.py,sha256=ilWAWicUGja7ADbXW9_OgIYeyKNuAQ1ZI_RMqjmMI9k,29667
75
74
  praisonai/ui/tools.md,sha256=Ad3YH_ZCLMWlz3mDXllQnQ_S5l55LWqLdcZSh-EXrHI,3956
76
75
  praisonai/upload_vision.py,sha256=lMpFn993UiYVJxRNZQTmcbPbEajQ5TFKCNGK1Icn_hg,5253
77
76
  praisonai/version.py,sha256=ugyuFliEqtAwQmH4sTlc16YXKYbFWDmfyk87fErB8-8,21
78
- praisonai-2.2.24.dist-info/METADATA,sha256=gQeynN_Esn3YmXS62aXjkBR7F4vwihFmxLya9oabsGs,4761
79
- praisonai-2.2.24.dist-info/WHEEL,sha256=dCzwOzx-VmbmLA5u8QpkARaxx3rsePBxa1nmZphhNQk,110
80
- praisonai-2.2.24.dist-info/entry_points.txt,sha256=QSSfuXjZMhf16FZ201I_oSoX_s1nWYbi_4_UXPE3S-o,145
81
- praisonai-2.2.24.dist-info/RECORD,,
77
+ praisonai-2.2.26.dist-info/METADATA,sha256=mrwTOT67VANYlvoAwWpOpAfj8Bq-M5DS3oVGeBnQ2UU,4761
78
+ praisonai-2.2.26.dist-info/WHEEL,sha256=dCzwOzx-VmbmLA5u8QpkARaxx3rsePBxa1nmZphhNQk,110
79
+ praisonai-2.2.26.dist-info/entry_points.txt,sha256=QSSfuXjZMhf16FZ201I_oSoX_s1nWYbi_4_UXPE3S-o,145
80
+ praisonai-2.2.26.dist-info/RECORD,,