PraisonAI 2.2.83__tar.gz → 2.2.84__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (79) hide show
  1. {praisonai-2.2.83 → praisonai-2.2.84}/PKG-INFO +2 -2
  2. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/agents_generator.py +6 -6
  3. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/auto.py +1 -1
  4. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/chainlit_ui.py +1 -1
  5. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/cli.py +2 -2
  6. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/deploy.py +3 -3
  7. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/inc/models.py +1 -1
  8. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/test.py +1 -1
  9. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/agents.py +2 -2
  10. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/chat.py +5 -5
  11. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/code.py +5 -5
  12. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/realtime.py +5 -5
  13. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/realtimeclient/__init__.py +3 -3
  14. {praisonai-2.2.83 → praisonai-2.2.84}/pyproject.toml +4 -4
  15. {praisonai-2.2.83 → praisonai-2.2.84}/README.md +0 -0
  16. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/README.md +0 -0
  17. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/__init__.py +0 -0
  18. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/__main__.py +0 -0
  19. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/api/call.py +0 -0
  20. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/inbuilt_tools/__init__.py +0 -0
  21. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  22. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/inc/__init__.py +0 -0
  23. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/inc/config.py +0 -0
  24. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/android-chrome-192x192.png +0 -0
  25. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/android-chrome-512x512.png +0 -0
  26. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/apple-touch-icon.png +0 -0
  27. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/fantasy.svg +0 -0
  28. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/favicon-16x16.png +0 -0
  29. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/favicon-32x32.png +0 -0
  30. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/favicon.ico +0 -0
  31. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/game.svg +0 -0
  32. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/logo_dark.png +0 -0
  33. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/logo_light.png +0 -0
  34. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/movie.svg +0 -0
  35. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
  36. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/praison-ai-agents-architecture.png +0 -0
  37. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/public/thriller.svg +0 -0
  38. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/scheduler.py +0 -0
  39. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup/__init__.py +0 -0
  40. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup/build.py +0 -0
  41. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup/config.yaml +0 -0
  42. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup/post_install.py +0 -0
  43. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup/setup_conda_env.py +0 -0
  44. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup/setup_conda_env.sh +0 -0
  45. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/setup.py +0 -0
  46. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/train.py +0 -0
  47. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/train_vision.py +0 -0
  48. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/README.md +0 -0
  49. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/callbacks.py +0 -0
  50. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/colab.py +0 -0
  51. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/colab_chainlit.py +0 -0
  52. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/components/aicoder.py +0 -0
  53. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/chainlit.md +0 -0
  54. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/bn.json +0 -0
  55. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/en-US.json +0 -0
  56. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/gu.json +0 -0
  57. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/he-IL.json +0 -0
  58. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/hi.json +0 -0
  59. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/kn.json +0 -0
  60. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/ml.json +0 -0
  61. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/mr.json +0 -0
  62. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/ta.json +0 -0
  63. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/te.json +0 -0
  64. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/config/translations/zh-CN.json +0 -0
  65. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/context.py +0 -0
  66. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/database_config.py +0 -0
  67. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/db.py +0 -0
  68. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/fantasy.svg +0 -0
  69. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/game.svg +0 -0
  70. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/logo_dark.png +0 -0
  71. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/logo_light.png +0 -0
  72. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/movie.svg +0 -0
  73. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/praison.css +0 -0
  74. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/public/thriller.svg +0 -0
  75. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/realtimeclient/tools.py +0 -0
  76. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/sql_alchemy.py +0 -0
  77. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/ui/tools.md +0 -0
  78. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/upload_vision.py +0 -0
  79. {praisonai-2.2.83 → praisonai-2.2.84}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: PraisonAI
3
- Version: 2.2.83
3
+ Version: 2.2.84
4
4
  Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -70,7 +70,7 @@ Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
70
70
  Requires-Dist: praisonai-tools (>=0.0.22) ; extra == "autogen"
71
71
  Requires-Dist: praisonai-tools (>=0.0.22) ; extra == "autogen-v4"
72
72
  Requires-Dist: praisonai-tools (>=0.0.22) ; extra == "crewai"
73
- Requires-Dist: praisonaiagents (>=0.0.157)
73
+ Requires-Dist: praisonaiagents (>=0.0.158)
74
74
  Requires-Dist: pyautogen (==0.2.29) ; extra == "autogen"
75
75
  Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
76
76
  Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
@@ -616,7 +616,7 @@ class AgentsGenerator:
616
616
  llm_model = details.get('llm')
617
617
  if llm_model:
618
618
  llm = PraisonAIModel(
619
- model=llm_model.get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o-mini",
619
+ model=llm_model.get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-5-nano",
620
620
  base_url=self.config_list[0].get('base_url') if self.config_list else None,
621
621
  api_key=self.config_list[0].get('api_key') if self.config_list else None
622
622
  ).get_model()
@@ -630,7 +630,7 @@ class AgentsGenerator:
630
630
  function_calling_llm_model = details.get('function_calling_llm')
631
631
  if function_calling_llm_model:
632
632
  function_calling_llm = PraisonAIModel(
633
- model=function_calling_llm_model.get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o-mini",
633
+ model=function_calling_llm_model.get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-5-nano",
634
634
  base_url=self.config_list[0].get('base_url') if self.config_list else None,
635
635
  api_key=self.config_list[0].get('api_key') if self.config_list else None
636
636
  ).get_model()
@@ -746,8 +746,8 @@ class AgentsGenerator:
746
746
  backstory=backstory_filled,
747
747
  tools=tools_list, # Pass the entire tools list to the agent
748
748
  allow_delegation=details.get('allow_delegation', False),
749
- llm=details.get('llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o-mini",
750
- function_calling_llm=details.get('function_calling_llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o-mini",
749
+ llm=details.get('llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-5-nano",
750
+ function_calling_llm=details.get('function_calling_llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-5-nano",
751
751
  max_iter=details.get('max_iter', 15),
752
752
  max_rpm=details.get('max_rpm'),
753
753
  max_execution_time=details.get('max_execution_time'),
@@ -756,7 +756,7 @@ class AgentsGenerator:
756
756
  system_template=details.get('system_template'),
757
757
  prompt_template=details.get('prompt_template'),
758
758
  response_template=details.get('response_template'),
759
- reflect_llm=details.get('reflect_llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o-mini",
759
+ reflect_llm=details.get('reflect_llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-5-nano",
760
760
  min_reflect=details.get('min_reflect', 1),
761
761
  max_reflect=details.get('max_reflect', 3),
762
762
  )
@@ -812,7 +812,7 @@ class AgentsGenerator:
812
812
  tasks=tasks,
813
813
  verbose=True,
814
814
  process="hierarchical",
815
- manager_llm=config.get('manager_llm') or os.environ.get("MODEL_NAME") or "openai/gpt-4o-mini",
815
+ manager_llm=config.get('manager_llm') or os.environ.get("MODEL_NAME") or "openai/gpt-5-nano",
816
816
  memory=memory
817
817
  )
818
818
  else:
@@ -108,7 +108,7 @@ Tools are not available for {framework}. To use tools, install:
108
108
 
109
109
  # Support multiple environment variable patterns for better compatibility
110
110
  # Priority order: MODEL_NAME > OPENAI_MODEL_NAME for model selection
111
- model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
111
+ model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-5-nano")
112
112
 
113
113
  # Priority order for base_url: OPENAI_BASE_URL > OPENAI_API_BASE > OLLAMA_API_BASE
114
114
  # OPENAI_BASE_URL is the standard OpenAI SDK environment variable
@@ -16,7 +16,7 @@ logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO').upper(), format='%(
16
16
  framework = "crewai"
17
17
  config_list = [
18
18
  {
19
- 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini"),
19
+ 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-5-nano"),
20
20
  'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
21
21
  'api_key': os.environ.get("OPENAI_API_KEY", "")
22
22
  }
@@ -124,7 +124,7 @@ class PraisonAI:
124
124
  # Create config_list with AutoGen compatibility
125
125
  # Support multiple environment variable patterns for better compatibility
126
126
  # Priority order: MODEL_NAME > OPENAI_MODEL_NAME for model selection
127
- model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
127
+ model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-5-nano")
128
128
 
129
129
  # Priority order for base_url: OPENAI_BASE_URL > OPENAI_API_BASE > OLLAMA_API_BASE
130
130
  # OPENAI_BASE_URL is the standard OpenAI SDK environment variable
@@ -910,7 +910,7 @@ class PraisonAI:
910
910
 
911
911
  # Use the same model configuration pattern as other CLI commands
912
912
  # Priority order: MODEL_NAME > OPENAI_MODEL_NAME for model selection
913
- model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
913
+ model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-5-nano")
914
914
 
915
915
  # Create ContextAgent with user's LLM configuration
916
916
  agent = ContextAgent(llm=model_name, auto_analyze=auto_analyze)
@@ -57,7 +57,7 @@ class CloudDeployer:
57
57
  file.write("FROM python:3.11-slim\n")
58
58
  file.write("WORKDIR /app\n")
59
59
  file.write("COPY . .\n")
60
- file.write("RUN pip install flask praisonai==2.2.83 gunicorn markdown\n")
60
+ file.write("RUN pip install flask praisonai==2.2.84 gunicorn markdown\n")
61
61
  file.write("EXPOSE 8080\n")
62
62
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
63
63
 
@@ -91,7 +91,7 @@ class CloudDeployer:
91
91
 
92
92
  def set_environment_variables(self):
93
93
  """Sets environment variables with fallback to .env values or defaults."""
94
- os.environ["OPENAI_MODEL_NAME"] = os.getenv("OPENAI_MODEL_NAME", "gpt-4o-mini")
94
+ os.environ["OPENAI_MODEL_NAME"] = os.getenv("OPENAI_MODEL_NAME", "gpt-5-nano")
95
95
  os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "Enter your API key")
96
96
  os.environ["OPENAI_API_BASE"] = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
97
97
 
@@ -110,7 +110,7 @@ class CloudDeployer:
110
110
 
111
111
  This method sets environment variables for the application. It uses the `os.environ` dictionary to set the following environment variables:
112
112
 
113
- - `OPENAI_MODEL_NAME`: The name of the OpenAI model to use. If not specified in the .env file, it defaults to "gpt-4o-mini".
113
+ - `OPENAI_MODEL_NAME`: The name of the OpenAI model to use. If not specified in the .env file, it defaults to "gpt-5-nano".
114
114
  - `OPENAI_API_KEY`: The API key for accessing the OpenAI API. If not specified in the .env file, it defaults to "Enter your API key".
115
115
  - `OPENAI_API_BASE`: The base URL for the OpenAI API. If not specified in the .env file, it defaults to "https://api.openai.com/v1".
116
116
  """
@@ -44,7 +44,7 @@ class PraisonAIModel:
44
44
  base_url (str, optional): The base URL for the OpenAI API. Defaults to None.
45
45
  api_key (str, optional): The explicit API key to use. Takes precedence over environment variables. Defaults to None.
46
46
  """
47
- self.model = model or os.getenv("OPENAI_MODEL_NAME", "gpt-4o-mini")
47
+ self.model = model or os.getenv("OPENAI_MODEL_NAME", "gpt-5-nano")
48
48
  if self.model.startswith("openai/"):
49
49
  self.api_key_var = "OPENAI_API_KEY"
50
50
  self.base_url = base_url or "https://api.openai.com/v1"
@@ -7,7 +7,7 @@ load_dotenv()
7
7
  import autogen
8
8
  config_list = [
9
9
  {
10
- 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini"),
10
+ 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-5-nano"),
11
11
  'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
12
12
  'api_key': os.environ.get("OPENAI_API_KEY")
13
13
  }
@@ -10,7 +10,7 @@ from praisonaiagents import Agent, Task, PraisonAIAgents, register_display_callb
10
10
  framework = "praisonai"
11
11
  config_list = [
12
12
  {
13
- 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini"),
13
+ 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-5-nano"),
14
14
  'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
15
15
  'api_key': os.environ.get("OPENAI_API_KEY", "")
16
16
  }
@@ -641,7 +641,7 @@ async def set_profiles(current_user: cl.User):
641
641
  @cl.on_chat_start
642
642
  async def start_chat():
643
643
  try:
644
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
644
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-5-nano")
645
645
  cl.user_session.set("model_name", model_name)
646
646
  logger.debug(f"Model name: {model_name}")
647
647
 
@@ -225,7 +225,7 @@ async def send_count():
225
225
 
226
226
  @cl.on_chat_start
227
227
  async def start():
228
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
228
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-5-nano")
229
229
  cl.user_session.set("model_name", model_name)
230
230
  logger.debug(f"Model name: {model_name}")
231
231
  settings = cl.ChatSettings(
@@ -233,7 +233,7 @@ async def start():
233
233
  TextInput(
234
234
  id="model_name",
235
235
  label="Enter the Model Name",
236
- placeholder="e.g., gpt-4o-mini",
236
+ placeholder="e.g., gpt-5-nano",
237
237
  initial=model_name
238
238
  )
239
239
  ]
@@ -266,7 +266,7 @@ async def setup_agent(settings):
266
266
 
267
267
  @cl.on_message
268
268
  async def main(message: cl.Message):
269
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
269
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-5-nano")
270
270
  message_history = cl.user_session.get("message_history", [])
271
271
  now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
272
272
 
@@ -452,14 +452,14 @@ User Question: {message.content}
452
452
  @cl.on_chat_resume
453
453
  async def on_chat_resume(thread: ThreadDict):
454
454
  logger.info(f"Resuming chat: {thread['id']}")
455
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
455
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-5-nano")
456
456
  logger.debug(f"Model name: {model_name}")
457
457
  settings = cl.ChatSettings(
458
458
  [
459
459
  TextInput(
460
460
  id="model_name",
461
461
  label="Enter the Model Name",
462
- placeholder="e.g., gpt-4o-mini",
462
+ placeholder="e.g., gpt-5-nano",
463
463
  initial=model_name
464
464
  )
465
465
  ]
@@ -232,7 +232,7 @@ async def start():
232
232
  cl.user_session.set("model_name", model_name)
233
233
  else:
234
234
  # If no setting found, use default or environment variable
235
- model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
235
+ model_name = os.getenv("MODEL_NAME", "gpt-5-nano")
236
236
  cl.user_session.set("model_name", model_name)
237
237
  logger.debug(f"Model name: {model_name}")
238
238
 
@@ -246,7 +246,7 @@ async def start():
246
246
  TextInput(
247
247
  id="model_name",
248
248
  label="Enter the Model Name",
249
- placeholder="e.g., gpt-4o-mini",
249
+ placeholder="e.g., gpt-5-nano",
250
250
  initial=model_name
251
251
  ),
252
252
  Switch(
@@ -361,7 +361,7 @@ tools = [{
361
361
 
362
362
  @cl.on_message
363
363
  async def main(message: cl.Message):
364
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
364
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-5-nano"
365
365
  claude_code_enabled = cl.user_session.get("claude_code_enabled", False)
366
366
  message_history = cl.user_session.get("message_history", [])
367
367
  repo_path_to_use = os.environ.get("PRAISONAI_CODE_REPO_PATH", ".")
@@ -634,7 +634,7 @@ async def send_count():
634
634
  @cl.on_chat_resume
635
635
  async def on_chat_resume(thread: ThreadDict):
636
636
  logger.info(f"Resuming chat: {thread['id']}")
637
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
637
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-5-nano"
638
638
  # Load Claude Code setting (check CLI flag first, then database setting)
639
639
  claude_code_enabled = os.getenv("PRAISONAI_CLAUDECODE_ENABLED", "false").lower() == "true"
640
640
  if not claude_code_enabled:
@@ -645,7 +645,7 @@ async def on_chat_resume(thread: ThreadDict):
645
645
  TextInput(
646
646
  id="model_name",
647
647
  label="Enter the Model Name",
648
- placeholder="e.g., gpt-4o-mini",
648
+ placeholder="e.g., gpt-5-nano",
649
649
  initial=model_name
650
650
  ),
651
651
  Switch(
@@ -229,7 +229,7 @@ except Exception as e:
229
229
  @cl.on_chat_start
230
230
  async def start():
231
231
  initialize_db()
232
- model_name = os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview-2024-12-17")
232
+ model_name = os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME", "gpt-5-nano-realtime-preview-2024-12-17")
233
233
  cl.user_session.set("model_name", model_name)
234
234
  cl.user_session.set("message_history", []) # Initialize message history
235
235
  logger.debug(f"Model name: {model_name}")
@@ -238,7 +238,7 @@ async def start():
238
238
  # TextInput(
239
239
  # id="model_name",
240
240
  # label="Enter the Model Name",
241
- # placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
241
+ # placeholder="e.g., gpt-5-nano-realtime-preview-2024-12-17",
242
242
  # initial=model_name
243
243
  # )
244
244
  # ]
@@ -382,7 +382,7 @@ async def on_audio_start():
382
382
  openai_realtime = cl.user_session.get("openai_realtime")
383
383
 
384
384
  if not openai_realtime.is_connected():
385
- model_name = cl.user_session.get("model_name") or os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview-2024-12-17")
385
+ model_name = cl.user_session.get("model_name") or os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME", "gpt-5-nano-realtime-preview-2024-12-17")
386
386
  await openai_realtime.connect(model_name)
387
387
 
388
388
  logger.info("Connected to OpenAI realtime")
@@ -435,14 +435,14 @@ def auth_callback(username: str, password: str):
435
435
  @cl.on_chat_resume
436
436
  async def on_chat_resume(thread: ThreadDict):
437
437
  logger.info(f"Resuming chat: {thread['id']}")
438
- model_name = os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview-2024-12-17"
438
+ model_name = os.getenv("OPENAI_MODEL_NAME") or os.getenv("MODEL_NAME") or "gpt-5-nano-realtime-preview-2024-12-17"
439
439
  logger.debug(f"Model name: {model_name}")
440
440
  settings = cl.ChatSettings(
441
441
  [
442
442
  TextInput(
443
443
  id="model_name",
444
444
  label="Enter the Model Name",
445
- placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
445
+ placeholder="e.g., gpt-5-nano-realtime-preview-2024-12-17",
446
446
  initial=model_name
447
447
  )
448
448
  ]
@@ -129,7 +129,7 @@ class RealtimeAPI(RealtimeEventHandler):
129
129
  def log(self, *args):
130
130
  logger.debug(f"[Websocket/{datetime.utcnow().isoformat()}]", *args)
131
131
 
132
- async def connect(self, model='gpt-4o-mini-realtime-preview-2024-12-17'):
132
+ async def connect(self, model='gpt-5-nano-realtime-preview-2024-12-17'):
133
133
  if self.is_connected():
134
134
  raise Exception("Already connected")
135
135
 
@@ -576,7 +576,7 @@ class RealtimeClient(RealtimeEventHandler):
576
576
 
577
577
  # Use provided model, OPENAI_MODEL_NAME environment variable, or default
578
578
  if model is None:
579
- model = os.getenv("OPENAI_MODEL_NAME", 'gpt-4o-mini-realtime-preview-2024-12-17')
579
+ model = os.getenv("OPENAI_MODEL_NAME", 'gpt-5-nano-realtime-preview-2024-12-17')
580
580
 
581
581
  await self.realtime.connect(model)
582
582
  await self.update_session()
@@ -747,7 +747,7 @@ class RealtimeClient(RealtimeEventHandler):
747
747
  if not self.is_connected():
748
748
  try:
749
749
  logger.info("Attempting to reconnect to OpenAI Realtime API...")
750
- model = os.getenv("OPENAI_MODEL_NAME", 'gpt-4o-mini-realtime-preview-2024-12-17')
750
+ model = os.getenv("OPENAI_MODEL_NAME", 'gpt-5-nano-realtime-preview-2024-12-17')
751
751
  await self.connect(model)
752
752
  return True
753
753
  except Exception as e:
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "PraisonAI"
3
- version = "2.2.83"
3
+ version = "2.2.84"
4
4
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
5
5
  readme = "README.md"
6
6
  license = ""
@@ -12,7 +12,7 @@ dependencies = [
12
12
  "rich>=13.7",
13
13
  "markdown>=3.5",
14
14
  "pyparsing>=3.0.0",
15
- "praisonaiagents>=0.0.157",
15
+ "praisonaiagents>=0.0.158",
16
16
  "python-dotenv>=0.19.0",
17
17
  "instructor>=1.3.3",
18
18
  "PyYAML>=6.0",
@@ -102,7 +102,7 @@ autogen-v4 = [
102
102
 
103
103
  [tool.poetry]
104
104
  name = "PraisonAI"
105
- version = "2.2.83"
105
+ version = "2.2.84"
106
106
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
107
107
  authors = ["Mervin Praison"]
108
108
  license = ""
@@ -120,7 +120,7 @@ python = ">=3.10,<3.13"
120
120
  rich = ">=13.7"
121
121
  markdown = ">=3.5"
122
122
  pyparsing = ">=3.0.0"
123
- praisonaiagents = ">=0.0.157"
123
+ praisonaiagents = ">=0.0.158"
124
124
  python-dotenv = ">=0.19.0"
125
125
  instructor = ">=1.3.3"
126
126
  PyYAML = ">=6.0"
File without changes