PraisonAI 2.2.32__tar.gz → 2.2.34__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (79) hide show
  1. {praisonai-2.2.32 → praisonai-2.2.34}/PKG-INFO +2 -2
  2. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/cli.py +5 -0
  3. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/deploy.py +1 -1
  4. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/code.py +244 -12
  5. {praisonai-2.2.32 → praisonai-2.2.34}/pyproject.toml +4 -4
  6. {praisonai-2.2.32 → praisonai-2.2.34}/README.md +0 -0
  7. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/README.md +0 -0
  8. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/__init__.py +0 -0
  9. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/__main__.py +0 -0
  10. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/agents_generator.py +0 -0
  11. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/api/call.py +0 -0
  12. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/auto.py +0 -0
  13. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/chainlit_ui.py +0 -0
  14. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/inbuilt_tools/__init__.py +0 -0
  15. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  16. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/inc/__init__.py +0 -0
  17. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/inc/config.py +0 -0
  18. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/inc/models.py +0 -0
  19. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/android-chrome-192x192.png +0 -0
  20. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/android-chrome-512x512.png +0 -0
  21. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/apple-touch-icon.png +0 -0
  22. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/fantasy.svg +0 -0
  23. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/favicon-16x16.png +0 -0
  24. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/favicon-32x32.png +0 -0
  25. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/favicon.ico +0 -0
  26. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/game.svg +0 -0
  27. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/logo_dark.png +0 -0
  28. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/logo_light.png +0 -0
  29. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/movie.svg +0 -0
  30. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
  31. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/praison-ai-agents-architecture.png +0 -0
  32. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/public/thriller.svg +0 -0
  33. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/scheduler.py +0 -0
  34. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup/__init__.py +0 -0
  35. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup/build.py +0 -0
  36. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup/config.yaml +0 -0
  37. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup/post_install.py +0 -0
  38. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup/setup_conda_env.py +0 -0
  39. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup/setup_conda_env.sh +0 -0
  40. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/setup.py +0 -0
  41. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/test.py +0 -0
  42. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/train.py +0 -0
  43. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/train_vision.py +0 -0
  44. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/README.md +0 -0
  45. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/agents.py +0 -0
  46. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/callbacks.py +0 -0
  47. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/chat.py +0 -0
  48. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/colab.py +0 -0
  49. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/colab_chainlit.py +0 -0
  50. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/components/aicoder.py +0 -0
  51. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/chainlit.md +0 -0
  52. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/bn.json +0 -0
  53. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/en-US.json +0 -0
  54. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/gu.json +0 -0
  55. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/he-IL.json +0 -0
  56. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/hi.json +0 -0
  57. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/kn.json +0 -0
  58. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/ml.json +0 -0
  59. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/mr.json +0 -0
  60. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/ta.json +0 -0
  61. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/te.json +0 -0
  62. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/config/translations/zh-CN.json +0 -0
  63. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/context.py +0 -0
  64. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/database_config.py +0 -0
  65. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/db.py +0 -0
  66. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/fantasy.svg +0 -0
  67. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/game.svg +0 -0
  68. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/logo_dark.png +0 -0
  69. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/logo_light.png +0 -0
  70. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/movie.svg +0 -0
  71. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/praison.css +0 -0
  72. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/public/thriller.svg +0 -0
  73. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/realtime.py +0 -0
  74. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/realtimeclient/__init__.py +0 -0
  75. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/realtimeclient/tools.py +0 -0
  76. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/sql_alchemy.py +0 -0
  77. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/ui/tools.md +0 -0
  78. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/upload_vision.py +0 -0
  79. {praisonai-2.2.32 → praisonai-2.2.34}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: PraisonAI
3
- Version: 2.2.32
3
+ Version: 2.2.34
4
4
  Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -64,7 +64,7 @@ Requires-Dist: playwright (>=1.47.0) ; extra == "code"
64
64
  Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
65
65
  Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "autogen"
66
66
  Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "crewai"
67
- Requires-Dist: praisonaiagents (>=0.0.105)
67
+ Requires-Dist: praisonaiagents (>=0.0.107)
68
68
  Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
69
69
  Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
70
70
  Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
@@ -529,6 +529,7 @@ class PraisonAI:
529
529
  parser.add_argument("--call", action="store_true", help="Start the PraisonAI Call server")
530
530
  parser.add_argument("--public", action="store_true", help="Use ngrok to expose the server publicly (only with --call)")
531
531
  parser.add_argument("--merge", action="store_true", help="Merge existing agents.yaml with auto-generated agents instead of overwriting")
532
+ parser.add_argument("--claudecode", action="store_true", help="Enable Claude Code integration for file modifications and coding tasks")
532
533
 
533
534
  # If we're in a test environment, parse with empty args to avoid pytest interference
534
535
  if in_test_env:
@@ -549,6 +550,10 @@ class PraisonAI:
549
550
  if args.command == 'code':
550
551
  args.ui = 'chainlit'
551
552
  args.code = True
553
+
554
+ # Handle --claudecode flag for code command
555
+ if getattr(args, 'claudecode', False):
556
+ os.environ["PRAISONAI_CLAUDECODE_ENABLED"] = "true"
552
557
  if args.command == 'realtime':
553
558
  args.realtime = True
554
559
  if args.command == 'call':
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==2.2.32 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==2.2.34 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -13,16 +13,25 @@ from PIL import Image
13
13
  from context import ContextGatherer
14
14
  from tavily import TavilyClient
15
15
  from crawl4ai import AsyncWebCrawler
16
+ import subprocess
16
17
 
17
18
  # Local application/library imports
18
19
  import chainlit as cl
19
- from chainlit.input_widget import TextInput
20
+ from chainlit.input_widget import TextInput, Switch
20
21
  from chainlit.types import ThreadDict
21
22
  import chainlit.data as cl_data
22
- from litellm import acompletion
23
- import litellm
24
23
  from db import DatabaseManager
25
24
 
25
+ # PraisonAI Agents imports
26
+ try:
27
+ from praisonaiagents import Agent
28
+ PRAISONAI_AGENTS_AVAILABLE = True
29
+ except ImportError:
30
+ PRAISONAI_AGENTS_AVAILABLE = False
31
+ # Fallback to litellm for backward compatibility
32
+ from litellm import acompletion
33
+ import litellm
34
+
26
35
  # Load environment variables
27
36
  load_dotenv()
28
37
 
@@ -41,14 +50,131 @@ logger.addHandler(console_handler)
41
50
  # Set the logging level for the logger
42
51
  logger.setLevel(log_level)
43
52
 
44
- # Configure litellm same as in llm.py
45
- litellm.set_verbose = False
46
- litellm.success_callback = []
47
- litellm._async_success_callback = []
48
- litellm.callbacks = []
49
- litellm.drop_params = True
50
- litellm.modify_params = True
51
- litellm.suppress_debug_messages = True
53
+ # Configure litellm for backward compatibility (only if praisonaiagents not available)
54
+ if not PRAISONAI_AGENTS_AVAILABLE:
55
+ import litellm
56
+ litellm.set_verbose = False
57
+ litellm.success_callback = []
58
+ litellm._async_success_callback = []
59
+ litellm.callbacks = []
60
+ litellm.drop_params = True
61
+ litellm.modify_params = True
62
+ litellm.suppress_debug_messages = True
63
+
64
+ # Claude Code Tool Function
65
+ async def claude_code_tool(query: str) -> str:
66
+ """
67
+ Execute Claude Code CLI commands for file modifications and coding tasks.
68
+
69
+ Args:
70
+ query: The user's request that requires file modifications or coding assistance
71
+
72
+ Returns:
73
+ The output from Claude Code execution
74
+ """
75
+ try:
76
+ # Check if the current working directory is a git repository
77
+ repo_path = os.environ.get("PRAISONAI_CODE_REPO_PATH", ".")
78
+
79
+ # Try to detect if git is available and if we're in a git repo
80
+ git_available = False
81
+ try:
82
+ subprocess.run(["git", "status"], cwd=repo_path, capture_output=True, check=True)
83
+ git_available = True
84
+ except (subprocess.CalledProcessError, FileNotFoundError):
85
+ git_available = False
86
+
87
+ # Build Claude Code command
88
+ claude_cmd = ["claude", "--dangerously-skip-permissions", "-p", query]
89
+
90
+ # Check if it's a continuation (simple heuristic)
91
+ user_session_context = cl.user_session.get("claude_code_context", False)
92
+ if user_session_context:
93
+ claude_cmd.insert(1, "--continue")
94
+
95
+ # Execute Claude Code command
96
+ result = subprocess.run(
97
+ claude_cmd,
98
+ cwd=repo_path,
99
+ capture_output=True,
100
+ text=True,
101
+ timeout=300 # 5 minutes timeout
102
+ )
103
+
104
+ # Set context for future requests
105
+ cl.user_session.set("claude_code_context", True)
106
+
107
+ output = result.stdout
108
+ if result.stderr:
109
+ output += f"\n\nErrors:\n{result.stderr}"
110
+
111
+ # If git is available and changes were made, try to create a branch and PR
112
+ if git_available and result.returncode == 0:
113
+ try:
114
+ # Check for changes
115
+ git_status = subprocess.run(
116
+ ["git", "status", "--porcelain"],
117
+ cwd=repo_path,
118
+ capture_output=True,
119
+ text=True,
120
+ check=True
121
+ )
122
+
123
+ if git_status.stdout.strip():
124
+ # Create a branch for the changes
125
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
126
+ branch_name = f"claude-code-{timestamp}"
127
+
128
+ # Create and switch to new branch
129
+ subprocess.run(["git", "checkout", "-b", branch_name], cwd=repo_path, check=True)
130
+
131
+ # Add and commit changes
132
+ subprocess.run(["git", "add", "."], cwd=repo_path, check=True)
133
+ commit_message = f"Claude Code changes: {query[:50]}..."
134
+ subprocess.run(
135
+ ["git", "commit", "-m", commit_message],
136
+ cwd=repo_path,
137
+ check=True
138
+ )
139
+
140
+ # Push to remote (if configured)
141
+ try:
142
+ subprocess.run(
143
+ ["git", "push", "-u", "origin", branch_name],
144
+ cwd=repo_path,
145
+ check=True
146
+ )
147
+
148
+ # Generate PR URL (assuming GitHub)
149
+ remote_url = subprocess.run(
150
+ ["git", "config", "--get", "remote.origin.url"],
151
+ cwd=repo_path,
152
+ capture_output=True,
153
+ text=True
154
+ )
155
+
156
+ if remote_url.returncode == 0:
157
+ repo_url = remote_url.stdout.strip()
158
+ if repo_url.endswith(".git"):
159
+ repo_url = repo_url[:-4]
160
+ if "github.com" in repo_url:
161
+ pr_url = f"{repo_url}/compare/main...{branch_name}?quick_pull=1"
162
+ output += f"\n\n📋 **Pull Request Created:**\n{pr_url}"
163
+
164
+ except subprocess.CalledProcessError:
165
+ output += f"\n\n🌲 **Branch created:** {branch_name} (push manually if needed)"
166
+
167
+ except subprocess.CalledProcessError as e:
168
+ output += f"\n\nGit operations failed: {e}"
169
+
170
+ return output
171
+
172
+ except subprocess.TimeoutExpired:
173
+ return "Claude Code execution timed out after 5 minutes."
174
+ except subprocess.CalledProcessError as e:
175
+ return f"Claude Code execution failed: {e}\nStdout: {e.stdout}\nStderr: {e.stderr}"
176
+ except Exception as e:
177
+ return f"Error executing Claude Code: {str(e)}"
52
178
 
53
179
  CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
54
180
 
@@ -109,6 +235,12 @@ async def start():
109
235
  model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
110
236
  cl.user_session.set("model_name", model_name)
111
237
  logger.debug(f"Model name: {model_name}")
238
+
239
+ # Load Claude Code setting (check CLI flag first, then database setting)
240
+ claude_code_enabled = os.getenv("PRAISONAI_CLAUDECODE_ENABLED", "false").lower() == "true"
241
+ if not claude_code_enabled:
242
+ claude_code_enabled = (load_setting("claude_code_enabled") or "false").lower() == "true"
243
+
112
244
  settings = cl.ChatSettings(
113
245
  [
114
246
  TextInput(
@@ -116,6 +248,11 @@ async def start():
116
248
  label="Enter the Model Name",
117
249
  placeholder="e.g., gpt-4o-mini",
118
250
  initial=model_name
251
+ ),
252
+ Switch(
253
+ id="claude_code_enabled",
254
+ label="Enable Claude Code (file modifications & coding)",
255
+ initial=claude_code_enabled
119
256
  )
120
257
  ]
121
258
  )
@@ -134,10 +271,13 @@ async def setup_agent(settings):
134
271
  logger.debug(settings)
135
272
  cl.user_session.set("settings", settings)
136
273
  model_name = settings["model_name"]
274
+ claude_code_enabled = settings.get("claude_code_enabled", False)
137
275
  cl.user_session.set("model_name", model_name)
276
+ cl.user_session.set("claude_code_enabled", claude_code_enabled)
138
277
 
139
278
  # Save in settings table
140
279
  save_setting("model_name", model_name)
280
+ save_setting("claude_code_enabled", str(claude_code_enabled).lower())
141
281
 
142
282
  # Save in thread metadata
143
283
  thread_id = cl.user_session.get("thread_id")
@@ -152,6 +292,7 @@ async def setup_agent(settings):
152
292
  metadata = {}
153
293
 
154
294
  metadata["model_name"] = model_name
295
+ metadata["claude_code_enabled"] = claude_code_enabled
155
296
 
156
297
  # Always store metadata as a dictionary
157
298
  await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
@@ -221,6 +362,7 @@ tools = [{
221
362
  @cl.on_message
222
363
  async def main(message: cl.Message):
223
364
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
365
+ claude_code_enabled = cl.user_session.get("claude_code_enabled", False)
224
366
  message_history = cl.user_session.get("message_history", [])
225
367
  repo_path_to_use = os.environ.get("PRAISONAI_CODE_REPO_PATH", ".")
226
368
  gatherer = ContextGatherer(directory=repo_path_to_use)
@@ -258,6 +400,87 @@ Context:
258
400
  msg = cl.Message(content="")
259
401
  await msg.send()
260
402
 
403
+ # Use PraisonAI Agents if available, otherwise fallback to litellm
404
+ if PRAISONAI_AGENTS_AVAILABLE:
405
+ await handle_with_praisonai_agents(message, user_message, model_name, claude_code_enabled, msg, image)
406
+ else:
407
+ await handle_with_litellm(user_message, model_name, message_history, msg, image)
408
+
409
+ async def handle_with_praisonai_agents(message, user_message, model_name, claude_code_enabled, msg, image):
410
+ """Handle message using PraisonAI Agents framework with optional Claude Code tool"""
411
+ try:
412
+ # Prepare tools list
413
+ available_tools = []
414
+
415
+ # Add Tavily search tool if API key available
416
+ if tavily_api_key:
417
+ available_tools.append(tavily_web_search)
418
+
419
+ # Add Claude Code tool if enabled
420
+ if claude_code_enabled:
421
+ available_tools.append(claude_code_tool)
422
+
423
+ # Create agent instructions
424
+ instructions = """You are a helpful AI assistant. Use the available tools when needed to provide comprehensive responses.
425
+
426
+ If Claude Code tool is available and the user's request involves:
427
+ - File modifications, code changes, or implementation tasks
428
+ - Creating, editing, or debugging code
429
+ - Project setup or development tasks
430
+ - Git operations or version control
431
+
432
+ Then use the Claude Code tool to handle those requests.
433
+
434
+ For informational questions, explanations, or general conversations, respond normally without using Claude Code."""
435
+
436
+ # Create agent
437
+ agent = Agent(
438
+ name="PraisonAI Assistant",
439
+ instructions=instructions,
440
+ llm=model_name,
441
+ tools=available_tools if available_tools else None
442
+ )
443
+
444
+ # Execute agent with streaming
445
+ full_response = ""
446
+
447
+ # Use agent's streaming capabilities if available
448
+ try:
449
+ # For now, use synchronous execution and stream the result
450
+ # TODO: Implement proper streaming when PraisonAI agents support it
451
+ result = agent.start(user_message)
452
+
453
+ # Stream the response character by character for better UX
454
+ if hasattr(result, 'raw'):
455
+ response_text = result.raw
456
+ else:
457
+ response_text = str(result)
458
+
459
+ for char in response_text:
460
+ await msg.stream_token(char)
461
+ full_response += char
462
+ # Small delay to make streaming visible
463
+ await asyncio.sleep(0.01)
464
+
465
+ except Exception as e:
466
+ error_response = f"Error executing agent: {str(e)}"
467
+ for char in error_response:
468
+ await msg.stream_token(char)
469
+ full_response += char
470
+ await asyncio.sleep(0.01)
471
+
472
+ msg.content = full_response
473
+ await msg.update()
474
+
475
+ except Exception as e:
476
+ error_msg = f"Failed to use PraisonAI Agents: {str(e)}"
477
+ logger.error(error_msg)
478
+ await msg.stream_token(error_msg)
479
+ msg.content = error_msg
480
+ await msg.update()
481
+
482
+ async def handle_with_litellm(user_message, model_name, message_history, msg, image):
483
+ """Fallback handler using litellm for backward compatibility"""
261
484
  # Prepare the completion parameters using the helper function
262
485
  completion_params = _build_completion_params(
263
486
  model_name,
@@ -279,7 +502,7 @@ Context:
279
502
  ]
280
503
  }
281
504
  # Use a vision-capable model when an image is present
282
- completion_params["model"] = "gpt-4-vision-preview" # Adjust this to your actual vision-capable model
505
+ completion_params["model"] = "gpt-4-vision-preview"
283
506
 
284
507
  # Only add tools and tool_choice if Tavily API key is available and no image is uploaded
285
508
  if tavily_api_key:
@@ -412,6 +635,10 @@ async def send_count():
412
635
  async def on_chat_resume(thread: ThreadDict):
413
636
  logger.info(f"Resuming chat: {thread['id']}")
414
637
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
638
+ # Load Claude Code setting (check CLI flag first, then database setting)
639
+ claude_code_enabled = os.getenv("PRAISONAI_CLAUDECODE_ENABLED", "false").lower() == "true"
640
+ if not claude_code_enabled:
641
+ claude_code_enabled = (load_setting("claude_code_enabled") or "false").lower() == "true"
415
642
  logger.debug(f"Model name: {model_name}")
416
643
  settings = cl.ChatSettings(
417
644
  [
@@ -420,6 +647,11 @@ async def on_chat_resume(thread: ThreadDict):
420
647
  label="Enter the Model Name",
421
648
  placeholder="e.g., gpt-4o-mini",
422
649
  initial=model_name
650
+ ),
651
+ Switch(
652
+ id="claude_code_enabled",
653
+ label="Enable Claude Code (file modifications & coding)",
654
+ initial=claude_code_enabled
423
655
  )
424
656
  ]
425
657
  )
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "PraisonAI"
3
- version = "2.2.32"
3
+ version = "2.2.34"
4
4
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
5
5
  readme = "README.md"
6
6
  license = ""
@@ -12,7 +12,7 @@ dependencies = [
12
12
  "rich>=13.7",
13
13
  "markdown>=3.5",
14
14
  "pyparsing>=3.0.0",
15
- "praisonaiagents>=0.0.105",
15
+ "praisonaiagents>=0.0.107",
16
16
  "python-dotenv>=0.19.0",
17
17
  "instructor>=1.3.3",
18
18
  "PyYAML>=6.0",
@@ -95,7 +95,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.15", "crewai"]
95
95
 
96
96
  [tool.poetry]
97
97
  name = "PraisonAI"
98
- version = "2.2.32"
98
+ version = "2.2.34"
99
99
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
100
100
  authors = ["Mervin Praison"]
101
101
  license = ""
@@ -113,7 +113,7 @@ python = ">=3.10,<3.13"
113
113
  rich = ">=13.7"
114
114
  markdown = ">=3.5"
115
115
  pyparsing = ">=3.0.0"
116
- praisonaiagents = ">=0.0.105"
116
+ praisonaiagents = ">=0.0.107"
117
117
  python-dotenv = ">=0.19.0"
118
118
  instructor = ">=1.3.3"
119
119
  PyYAML = ">=6.0"
File without changes
File without changes
File without changes