PraisonAI 0.0.70__cp312-cp312-manylinux_2_35_x86_64.whl → 0.0.72__cp312-cp312-manylinux_2_35_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

praisonai/deploy.py CHANGED
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.70 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.72 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
praisonai/ui/chat.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import chainlit as cl
2
2
  from chainlit.input_widget import TextInput
3
- from chainlit.types import ThreadDict
3
+ from chainlit.types import ThreadDict # Change this import
4
4
  from litellm import acompletion
5
5
  import os
6
6
  import sqlite3
@@ -14,6 +14,9 @@ from literalai.helper import utc_now
14
14
  import logging
15
15
  import json
16
16
  from sql_alchemy import SQLAlchemyDataLayer
17
+ from tavily import TavilyClient
18
+ from crawl4ai import WebCrawler
19
+ import asyncio
17
20
 
18
21
  # Set up logging
19
22
  logger = logging.getLogger(__name__)
@@ -171,6 +174,69 @@ deleted_thread_ids = [] # type: List[str]
171
174
 
172
175
  cl_data._data_layer = SQLAlchemyDataLayer(conninfo=f"sqlite+aiosqlite:///{DB_PATH}")
173
176
 
177
+ # Set Tavily API key
178
+ tavily_api_key = os.getenv("TAVILY_API_KEY")
179
+ tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
180
+
181
+ # Modify the tavily_web_search function to be synchronous
182
+ def tavily_web_search(query):
183
+ if not tavily_client:
184
+ return json.dumps({
185
+ "query": query,
186
+ "error": "Tavily API key is not set. Web search is unavailable."
187
+ })
188
+
189
+ response = tavily_client.search(query)
190
+ logger.debug(f"Tavily search response: {response}")
191
+
192
+ # Create an instance of WebCrawler
193
+ crawler = WebCrawler()
194
+
195
+ # Warm up the crawler (load necessary models)
196
+ crawler.warmup()
197
+
198
+ # Prepare the results
199
+ results = []
200
+ for result in response.get('results', []):
201
+ url = result.get('url')
202
+ if url:
203
+ try:
204
+ # Run the crawler on each URL
205
+ crawl_result = crawler.run(url=url)
206
+ results.append({
207
+ "content": result.get('content'),
208
+ "url": url,
209
+ "full_content": crawl_result.markdown
210
+ })
211
+ except Exception as e:
212
+ logger.error(f"Error crawling {url}: {str(e)}")
213
+ results.append({
214
+ "content": result.get('content'),
215
+ "url": url,
216
+ "full_content": "Error: Unable to crawl this URL"
217
+ })
218
+
219
+ return json.dumps({
220
+ "query": query,
221
+ "results": results
222
+ })
223
+
224
+ # Update the tools definition
225
+ tools = [{
226
+ "type": "function",
227
+ "function": {
228
+ "name": "tavily_web_search",
229
+ "description": "Search the web using Tavily API and crawl the resulting URLs",
230
+ "parameters": {
231
+ "type": "object",
232
+ "properties": {
233
+ "query": {"type": "string", "description": "Search query"}
234
+ },
235
+ "required": ["query"]
236
+ }
237
+ }
238
+ }] if tavily_api_key else []
239
+
174
240
  @cl.on_chat_start
175
241
  async def start():
176
242
  initialize_db()
@@ -224,31 +290,131 @@ async def setup_agent(settings):
224
290
  async def main(message: cl.Message):
225
291
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
226
292
  message_history = cl.user_session.get("message_history", [])
227
- message_history.append({"role": "user", "content": message.content})
293
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
294
+
295
+ # Add the current date and time to the user's message
296
+ user_message = f"""
297
+ Answer the question and use tools if needed:\n{message.content}.\n\n
298
+ Current Date and Time: {now}
299
+ """
300
+ message_history.append({"role": "user", "content": user_message})
228
301
 
229
302
  msg = cl.Message(content="")
230
303
  await msg.send()
231
304
 
232
- response = await acompletion(
233
- model=model_name,
234
- messages=message_history,
235
- stream=True,
236
- # temperature=0.7,
237
- # max_tokens=500,
238
- # top_p=1
239
- )
305
+ # Prepare the completion parameters
306
+ completion_params = {
307
+ "model": model_name,
308
+ "messages": message_history,
309
+ "stream": True,
310
+ }
311
+
312
+ # Only add tools and tool_choice if Tavily API key is available
313
+ if tavily_api_key:
314
+ completion_params["tools"] = tools
315
+ completion_params["tool_choice"] = "auto"
316
+
317
+ response = await acompletion(**completion_params)
240
318
 
241
319
  full_response = ""
320
+ tool_calls = []
321
+ current_tool_call = None
322
+
242
323
  async for part in response:
243
- if token := part['choices'][0]['delta']['content']:
244
- await msg.stream_token(token)
245
- full_response += token
324
+ if 'choices' in part and len(part['choices']) > 0:
325
+ delta = part['choices'][0].get('delta', {})
326
+
327
+ if 'content' in delta and delta['content'] is not None:
328
+ token = delta['content']
329
+ await msg.stream_token(token)
330
+ full_response += token
331
+
332
+ if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
333
+ for tool_call in delta['tool_calls']:
334
+ if current_tool_call is None or tool_call.index != current_tool_call['index']:
335
+ if current_tool_call:
336
+ tool_calls.append(current_tool_call)
337
+ current_tool_call = {
338
+ 'id': tool_call.id,
339
+ 'type': tool_call.type,
340
+ 'index': tool_call.index,
341
+ 'function': {
342
+ 'name': tool_call.function.name if tool_call.function else None,
343
+ 'arguments': ''
344
+ }
345
+ }
346
+ if tool_call.function:
347
+ if tool_call.function.name:
348
+ current_tool_call['function']['name'] = tool_call.function.name
349
+ if tool_call.function.arguments:
350
+ current_tool_call['function']['arguments'] += tool_call.function.arguments
351
+
352
+ if current_tool_call:
353
+ tool_calls.append(current_tool_call)
354
+
246
355
  logger.debug(f"Full response: {full_response}")
356
+ logger.debug(f"Tool calls: {tool_calls}")
247
357
  message_history.append({"role": "assistant", "content": full_response})
248
358
  logger.debug(f"Message history: {message_history}")
249
359
  cl.user_session.set("message_history", message_history)
250
360
  await msg.update()
251
361
 
362
+ if tavily_api_key and tool_calls:
363
+ available_functions = {
364
+ "tavily_web_search": tavily_web_search,
365
+ }
366
+ messages = message_history + [{"role": "assistant", "content": None, "function_call": {
367
+ "name": tool_calls[0]['function']['name'],
368
+ "arguments": tool_calls[0]['function']['arguments']
369
+ }}]
370
+
371
+ for tool_call in tool_calls:
372
+ function_name = tool_call['function']['name']
373
+ if function_name in available_functions:
374
+ function_to_call = available_functions[function_name]
375
+ function_args = tool_call['function']['arguments']
376
+ if function_args:
377
+ try:
378
+ function_args = json.loads(function_args)
379
+ # Call the function synchronously
380
+ function_response = function_to_call(
381
+ query=function_args.get("query"),
382
+ )
383
+ messages.append(
384
+ {
385
+ "role": "function",
386
+ "name": function_name,
387
+ "content": function_response,
388
+ }
389
+ )
390
+ except json.JSONDecodeError:
391
+ logger.error(f"Failed to parse function arguments: {function_args}")
392
+
393
+ second_response = await acompletion(
394
+ model=model_name,
395
+ stream=True,
396
+ messages=messages,
397
+ )
398
+ logger.debug(f"Second LLM response: {second_response}")
399
+
400
+ # Handle the streaming response
401
+ full_response = ""
402
+ async for part in second_response:
403
+ if 'choices' in part and len(part['choices']) > 0:
404
+ delta = part['choices'][0].get('delta', {})
405
+ if 'content' in delta and delta['content'] is not None:
406
+ token = delta['content']
407
+ await msg.stream_token(token)
408
+ full_response += token
409
+
410
+ # Update the message content
411
+ msg.content = full_response
412
+ await msg.update()
413
+ else:
414
+ # If no tool calls or Tavily API key is not set, the full_response is already set
415
+ msg.content = full_response
416
+ await msg.update()
417
+
252
418
  username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
253
419
  password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
254
420
 
@@ -267,7 +433,7 @@ async def send_count():
267
433
  ).send()
268
434
 
269
435
  @cl.on_chat_resume
270
- async def on_chat_resume(thread: cl_data.ThreadDict):
436
+ async def on_chat_resume(thread: ThreadDict): # Change the type hint here
271
437
  logger.info(f"Resuming chat: {thread['id']}")
272
438
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
273
439
  logger.debug(f"Model name: {model_name}")
@@ -285,8 +451,14 @@ async def on_chat_resume(thread: cl_data.ThreadDict):
285
451
  thread_id = thread["id"]
286
452
  cl.user_session.set("thread_id", thread["id"])
287
453
 
288
- # The metadata should now already be a dictionary
454
+ # Ensure metadata is a dictionary
289
455
  metadata = thread.get("metadata", {})
456
+ if isinstance(metadata, str):
457
+ try:
458
+ metadata = json.loads(metadata)
459
+ except json.JSONDecodeError:
460
+ metadata = {}
461
+
290
462
  cl.user_session.set("metadata", metadata)
291
463
 
292
464
  message_history = cl.user_session.get("message_history", [])
@@ -298,7 +470,14 @@ async def on_chat_resume(thread: cl_data.ThreadDict):
298
470
  message_history.append({"role": "user", "content": message.get("output", "")})
299
471
  elif msg_type == "assistant_message":
300
472
  message_history.append({"role": "assistant", "content": message.get("output", "")})
473
+ elif msg_type == "run":
474
+ # Handle 'run' type messages
475
+ if message.get("isError"):
476
+ message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
477
+ else:
478
+ # You might want to handle non-error 'run' messages differently
479
+ pass
301
480
  else:
302
- logger.warning(f"Message without type: {message}")
481
+ logger.warning(f"Message without recognized type: {message}")
303
482
 
304
483
  cl.user_session.set("message_history", message_history)
praisonai/ui/code.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import chainlit as cl
2
2
  from chainlit.input_widget import TextInput
3
3
  from chainlit.types import ThreadDict
4
- from litellm import acompletion
4
+ from litellm import acompletion, completion
5
5
  import os
6
6
  import sqlite3
7
7
  from datetime import datetime
@@ -15,6 +15,9 @@ import logging
15
15
  import json
16
16
  from sql_alchemy import SQLAlchemyDataLayer
17
17
  from context import ContextGatherer
18
+ from tavily import TavilyClient
19
+ from datetime import datetime
20
+ from crawl4ai import WebCrawler
18
21
 
19
22
  # Set up logging
20
23
  logger = logging.getLogger(__name__)
@@ -177,7 +180,7 @@ async def start():
177
180
  initialize_db()
178
181
  model_name = load_setting("model_name")
179
182
 
180
- if model_name:
183
+ if (model_name):
181
184
  cl.user_session.set("model_name", model_name)
182
185
  else:
183
186
  # If no setting found, use default or environment variable
@@ -227,12 +230,75 @@ async def setup_agent(settings):
227
230
 
228
231
  metadata["model_name"] = model_name
229
232
 
230
- # Always store metadata as a JSON string
231
- await cl_data._data_layer.update_thread(thread_id, metadata=json.dumps(metadata))
233
+ # Always store metadata as a dictionary
234
+ await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
232
235
 
233
236
  # Update the user session with the new metadata
234
237
  cl.user_session.set("metadata", metadata)
235
238
 
239
+ # Set Tavily API key
240
+ tavily_api_key = os.getenv("TAVILY_API_KEY")
241
+ tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
242
+
243
+ # Function to call Tavily Search API and crawl the results
244
+ def tavily_web_search(query):
245
+ if not tavily_client:
246
+ return json.dumps({
247
+ "query": query,
248
+ "error": "Tavily API key is not set. Web search is unavailable."
249
+ })
250
+
251
+ response = tavily_client.search(query)
252
+ logger.debug(f"Tavily search response: {response}")
253
+
254
+ # Create an instance of WebCrawler
255
+ crawler = WebCrawler()
256
+
257
+ # Warm up the crawler (load necessary models)
258
+ crawler.warmup()
259
+
260
+ # Prepare the results
261
+ results = []
262
+ for result in response.get('results', []):
263
+ url = result.get('url')
264
+ if url:
265
+ try:
266
+ # Run the crawler on each URL
267
+ crawl_result = crawler.run(url=url)
268
+ results.append({
269
+ "content": result.get('content'),
270
+ "url": url,
271
+ "full_content": crawl_result.markdown
272
+ })
273
+ except Exception as e:
274
+ logger.error(f"Error crawling {url}: {str(e)}")
275
+ results.append({
276
+ "content": result.get('content'),
277
+ "url": url,
278
+ "full_content": "Error: Unable to crawl this URL"
279
+ })
280
+
281
+ return json.dumps({
282
+ "query": query,
283
+ "results": results
284
+ })
285
+
286
+ # Define the tool for function calling
287
+ tools = [{
288
+ "type": "function",
289
+ "function": {
290
+ "name": "tavily_web_search",
291
+ "description": "Search the web using Tavily API and crawl the resulting URLs",
292
+ "parameters": {
293
+ "type": "object",
294
+ "properties": {
295
+ "query": {"type": "string", "description": "Search query"}
296
+ },
297
+ "required": ["query"]
298
+ }
299
+ }
300
+ }] if tavily_api_key else []
301
+
236
302
  @cl.on_message
237
303
  async def main(message: cl.Message):
238
304
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
@@ -240,35 +306,131 @@ async def main(message: cl.Message):
240
306
  message_history.append({"role": "user", "content": message.content})
241
307
  gatherer = ContextGatherer()
242
308
  context, token_count, context_tree = gatherer.run()
309
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
243
310
  prompt_history = message_history
244
311
  prompt_history.append({"role": "user", "content": """
245
- Answer the question:\n{question}.\n\n
312
+ Answer the question and use tools if needed:\n{question}.\n\n
313
+ Current Date and Time: {now}
246
314
  Below is the Context:\n{context}\n\n"""
247
- .format(context=context, question=message.content)})
315
+ .format(context=context, question=message.content, now=now)})
248
316
 
249
317
  msg = cl.Message(content="")
250
318
  await msg.send()
251
319
 
252
- response = await acompletion(
253
- model=model_name,
254
- messages=prompt_history,
255
- stream=True,
256
- # temperature=0.7,
257
- # max_tokens=500,
258
- # top_p=1
259
- )
320
+ # Prepare the completion parameters
321
+ completion_params = {
322
+ "model": model_name,
323
+ "messages": prompt_history,
324
+ "stream": True,
325
+ }
326
+
327
+ # Only add tools and tool_choice if Tavily API key is available
328
+ if tavily_api_key:
329
+ completion_params["tools"] = tools
330
+ completion_params["tool_choice"] = "auto"
331
+
332
+ response = await acompletion(**completion_params)
333
+ logger.debug(f"LLM response: {response}")
260
334
 
261
335
  full_response = ""
336
+ tool_calls = []
337
+ current_tool_call = None
338
+
262
339
  async for part in response:
263
- if token := part['choices'][0]['delta']['content']:
264
- await msg.stream_token(token)
265
- full_response += token
340
+ logger.debug(f"LLM part: {part}")
341
+ if 'choices' in part and len(part['choices']) > 0:
342
+ delta = part['choices'][0].get('delta', {})
343
+
344
+ if 'content' in delta and delta['content'] is not None:
345
+ token = delta['content']
346
+ await msg.stream_token(token)
347
+ full_response += token
348
+
349
+ if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
350
+ for tool_call in delta['tool_calls']:
351
+ if current_tool_call is None or tool_call.index != current_tool_call['index']:
352
+ if current_tool_call:
353
+ tool_calls.append(current_tool_call)
354
+ current_tool_call = {
355
+ 'id': tool_call.id,
356
+ 'type': tool_call.type,
357
+ 'index': tool_call.index,
358
+ 'function': {
359
+ 'name': tool_call.function.name if tool_call.function else None,
360
+ 'arguments': ''
361
+ }
362
+ }
363
+ if tool_call.function:
364
+ if tool_call.function.name:
365
+ current_tool_call['function']['name'] = tool_call.function.name
366
+ if tool_call.function.arguments:
367
+ current_tool_call['function']['arguments'] += tool_call.function.arguments
368
+
369
+ if current_tool_call:
370
+ tool_calls.append(current_tool_call)
371
+
266
372
  logger.debug(f"Full response: {full_response}")
373
+ logger.debug(f"Tool calls: {tool_calls}")
267
374
  message_history.append({"role": "assistant", "content": full_response})
268
375
  logger.debug(f"Message history: {message_history}")
269
376
  cl.user_session.set("message_history", message_history)
270
377
  await msg.update()
271
378
 
379
+ if tavily_api_key and tool_calls:
380
+ available_functions = {
381
+ "tavily_web_search": tavily_web_search,
382
+ }
383
+ messages = prompt_history + [{"role": "assistant", "content": None, "function_call": {
384
+ "name": tool_calls[0]['function']['name'],
385
+ "arguments": tool_calls[0]['function']['arguments']
386
+ }}]
387
+
388
+ for tool_call in tool_calls:
389
+ function_name = tool_call['function']['name']
390
+ if function_name in available_functions:
391
+ function_to_call = available_functions[function_name]
392
+ function_args = tool_call['function']['arguments']
393
+ if function_args:
394
+ try:
395
+ function_args = json.loads(function_args)
396
+ function_response = function_to_call(
397
+ query=function_args.get("query"),
398
+ )
399
+ messages.append(
400
+ {
401
+ "role": "function",
402
+ "name": function_name,
403
+ "content": function_response,
404
+ }
405
+ )
406
+ except json.JSONDecodeError:
407
+ logger.error(f"Failed to parse function arguments: {function_args}")
408
+
409
+ second_response = await acompletion(
410
+ model=model_name,
411
+ stream=True,
412
+ messages=messages,
413
+ )
414
+ logger.debug(f"Second LLM response: {second_response}")
415
+
416
+ # Handle the streaming response
417
+ full_response = ""
418
+ async for part in second_response:
419
+ if 'choices' in part and len(part['choices']) > 0:
420
+ delta = part['choices'][0].get('delta', {})
421
+ if 'content' in delta and delta['content'] is not None:
422
+ token = delta['content']
423
+ await msg.stream_token(token)
424
+ full_response += token
425
+
426
+ # Update the message content
427
+ msg.content = full_response
428
+ await msg.update()
429
+ else:
430
+ # If no tool calls or Tavily API key is not set, the full_response is already set
431
+ msg.content = full_response
432
+ await msg.update()
433
+
272
434
  username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
273
435
  password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
274
436
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.70
3
+ Version: 0.0.72
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -19,9 +19,10 @@ Provides-Extra: gradio
19
19
  Provides-Extra: openai
20
20
  Provides-Extra: train
21
21
  Provides-Extra: ui
22
- Requires-Dist: agentops (>=0.2.6) ; extra == "agentops"
22
+ Requires-Dist: agentops (>=0.3.12) ; extra == "agentops"
23
23
  Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code"
24
24
  Requires-Dist: chainlit (==1.2.0) ; extra == "ui" or extra == "chat" or extra == "code"
25
+ Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat" or extra == "code"
25
26
  Requires-Dist: crewai (>=0.32.0)
26
27
  Requires-Dist: flask (>=3.0.0) ; extra == "api"
27
28
  Requires-Dist: gradio (>=4.26.0) ; extra == "gradio"
@@ -36,6 +37,7 @@ Requires-Dist: praisonai-tools (>=0.0.7)
36
37
  Requires-Dist: pyautogen (>=0.2.19)
37
38
  Requires-Dist: pyparsing (>=3.0.0)
38
39
  Requires-Dist: rich (>=13.7)
40
+ Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code"
39
41
  Project-URL: Homepage, https://docs.praison.ai
40
42
  Project-URL: Repository, https://github.com/mervinpraison/PraisonAI
41
43
  Description-Content-Type: text/markdown
@@ -4,7 +4,7 @@ praisonai/agents_generator.py,sha256=8d1WRbubvEkBrW1HZ7_xnGyqgJi0yxmXa3MgTIqef1c
4
4
  praisonai/auto.py,sha256=9spTXqj47Hmmqv5QHRYE_RzSVHH_KoPbaZjskUj2UcE,7895
5
5
  praisonai/chainlit_ui.py,sha256=bNR7s509lp0I9JlJNvwCZRUZosC64qdvlFCt8NmFamQ,12216
6
6
  praisonai/cli.py,sha256=M23MbUUzNS7z9ZHz3cGawUrGWzXM-FzhMyiRWQPZSEk,18485
7
- praisonai/deploy.py,sha256=4zEREC97vaLRB5Xii388u8Wrx3hn3L-mP_pcIQsFtfM,6028
7
+ praisonai/deploy.py,sha256=7F24Wlty3Rs4tn5He1Rw1i24B6dR5E0SIJyLEPaCPX8,6028
8
8
  praisonai/inbuilt_tools/__init__.py,sha256=mUKnbL6Gram9c9f2m8wJwEzURBLmPEOcHzwySBH89YA,74
9
9
  praisonai/inbuilt_tools/autogen_tools.py,sha256=svYkM2N7DVFvbiwgoAS7U_MqTOD8rHf8VD3BaFUV5_Y,14907
10
10
  praisonai/inc/__init__.py,sha256=sPDlYBBwdk0VlWzaaM_lG0_LD07lS2HRGvPdxXJFiYg,62
@@ -30,8 +30,8 @@ praisonai/setup/setup_conda_env.py,sha256=4QiWrqgEObivzOMwfJgWaCPpUEpB68cQ6lFwVw
30
30
  praisonai/setup/setup_conda_env.sh,sha256=te7s0KHsTi7XM-vkNvE0dKC1HeU2tXxqE-sPUScV6fY,2718
31
31
  praisonai/test.py,sha256=OL-wesjA5JTohr8rtr6kWoaS4ImkJg2l0GXJ-dUUfRU,4090
32
32
  praisonai/train.py,sha256=DvORlrwKOD-2v4r_z84eV3LsfzpNs-WnPKb5cQB3_t4,11071
33
- praisonai/ui/chat.py,sha256=V4kG2jog8FK0zHbcG_gTS58JzeriG_ZTorCjEnjJz38,9383
34
- praisonai/ui/code.py,sha256=eRjV4aIXYbdJndNQEe5wtnD2LnllXw0-g4xSnffLG28,10874
33
+ praisonai/ui/chat.py,sha256=89wW2WcYpixokGDOdfdW76zb-hJSu61xv73hrd1JqJY,16244
34
+ praisonai/ui/code.py,sha256=YC94_BGNqM6ldiERnGTnmLaedbajeUqTbaTYIk4Df3o,17156
35
35
  praisonai/ui/context.py,sha256=oWO2I_WBZb7kZnuXItf18EJX0ZQv-1nAd8rxhwhuuDU,11871
36
36
  praisonai/ui/public/fantasy.svg,sha256=4Gs3kIOux-pjGtw6ogI_rv5_viVJxnE5gRwGilsSg0o,1553
37
37
  praisonai/ui/public/game.svg,sha256=y2QMaA01m8XzuDjTOBWzupOC3-TpnUl9ah89mIhviUw,2406
@@ -41,8 +41,8 @@ praisonai/ui/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk
41
41
  praisonai/ui/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
42
42
  praisonai/ui/sql_alchemy.py,sha256=kf025P_37C505YDDJZ-dPSmN_d62J2DCrkxbDAzXyrM,29884
43
43
  praisonai/version.py,sha256=ugyuFliEqtAwQmH4sTlc16YXKYbFWDmfyk87fErB8-8,21
44
- praisonai-0.0.70.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
45
- praisonai-0.0.70.dist-info/METADATA,sha256=4XKqdOgDGcazLtA64vhExcx_ZJcNWsYjzKFCFg40ErA,11397
46
- praisonai-0.0.70.dist-info/WHEEL,sha256=HBsDV7Hj4OTiS1GX6ua7iQXUQTB9UHftbBxr7Q8Xm9c,110
47
- praisonai-0.0.70.dist-info/entry_points.txt,sha256=jB078LEGLY3Ky_indhclomRIVVpXrPSksHjJ-tcBZ-o,133
48
- praisonai-0.0.70.dist-info/RECORD,,
44
+ praisonai-0.0.72.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
45
+ praisonai-0.0.72.dist-info/METADATA,sha256=EwHoCnCTGI3osmoSQYl5snX1h7iYoG6l8t3LWIvzW9Q,11545
46
+ praisonai-0.0.72.dist-info/WHEEL,sha256=HBsDV7Hj4OTiS1GX6ua7iQXUQTB9UHftbBxr7Q8Xm9c,110
47
+ praisonai-0.0.72.dist-info/entry_points.txt,sha256=jB078LEGLY3Ky_indhclomRIVVpXrPSksHjJ-tcBZ-o,133
48
+ praisonai-0.0.72.dist-info/RECORD,,