PraisonAI 2.0.12__cp311-cp311-macosx_15_0_arm64.whl → 2.2.16__cp311-cp311-macosx_15_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (40) hide show
  1. praisonai/README.md +5 -0
  2. praisonai/agents_generator.py +83 -44
  3. praisonai/api/call.py +3 -3
  4. praisonai/auto.py +1 -1
  5. praisonai/cli.py +151 -16
  6. praisonai/deploy.py +1 -1
  7. praisonai/inbuilt_tools/__init__.py +1 -1
  8. praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
  9. praisonai/public/praison-ai-agents-architecture.png +0 -0
  10. praisonai/setup/setup_conda_env.sh +55 -22
  11. praisonai/train.py +442 -156
  12. praisonai/train_vision.py +306 -0
  13. praisonai/ui/agents.py +822 -0
  14. praisonai/ui/callbacks.py +57 -0
  15. praisonai/ui/code.py +4 -2
  16. praisonai/ui/colab.py +474 -0
  17. praisonai/ui/colab_chainlit.py +81 -0
  18. praisonai/ui/config/chainlit.md +1 -1
  19. praisonai/ui/realtime.py +65 -10
  20. praisonai/ui/sql_alchemy.py +6 -5
  21. praisonai/ui/tools.md +133 -0
  22. praisonai/upload_vision.py +140 -0
  23. praisonai-2.2.16.dist-info/METADATA +103 -0
  24. {praisonai-2.0.12.dist-info → praisonai-2.2.16.dist-info}/RECORD +26 -29
  25. {praisonai-2.0.12.dist-info → praisonai-2.2.16.dist-info}/WHEEL +1 -1
  26. praisonai/ui/config/.chainlit/config.toml +0 -120
  27. praisonai/ui/config/.chainlit/translations/bn.json +0 -231
  28. praisonai/ui/config/.chainlit/translations/en-US.json +0 -229
  29. praisonai/ui/config/.chainlit/translations/gu.json +0 -231
  30. praisonai/ui/config/.chainlit/translations/he-IL.json +0 -231
  31. praisonai/ui/config/.chainlit/translations/hi.json +0 -231
  32. praisonai/ui/config/.chainlit/translations/kn.json +0 -231
  33. praisonai/ui/config/.chainlit/translations/ml.json +0 -231
  34. praisonai/ui/config/.chainlit/translations/mr.json +0 -231
  35. praisonai/ui/config/.chainlit/translations/ta.json +0 -231
  36. praisonai/ui/config/.chainlit/translations/te.json +0 -231
  37. praisonai/ui/config/.chainlit/translations/zh-CN.json +0 -229
  38. praisonai-2.0.12.dist-info/LICENSE +0 -20
  39. praisonai-2.0.12.dist-info/METADATA +0 -498
  40. {praisonai-2.0.12.dist-info → praisonai-2.2.16.dist-info}/entry_points.txt +0 -0
praisonai/README.md ADDED
@@ -0,0 +1,5 @@
1
+ # PraisonAI Package
2
+
3
+ This is the PraisonAI package, which serves as a wrapper for PraisonAIAgents.
4
+
5
+ It provides a simple and intuitive interface for working with AI agents and their capabilities.
@@ -200,6 +200,47 @@ class AgentsGenerator:
200
200
  tools_dict[name] = obj
201
201
  return tools_dict
202
202
 
203
+ def load_tools_from_tools_py(self):
204
+ """
205
+ Imports and returns all contents from tools.py file.
206
+ Also adds the tools to the global namespace.
207
+
208
+ Returns:
209
+ list: A list of callable functions with proper formatting
210
+ """
211
+ tools_list = []
212
+ try:
213
+ # Try to import tools.py from current directory
214
+ spec = importlib.util.spec_from_file_location("tools", "tools.py")
215
+ self.logger.debug(f"Spec: {spec}")
216
+ if spec is None:
217
+ self.logger.debug("tools.py not found in current directory")
218
+ return tools_list
219
+
220
+ module = importlib.util.module_from_spec(spec)
221
+ spec.loader.exec_module(module)
222
+
223
+ # Get all module attributes except private ones and classes
224
+ for name, obj in inspect.getmembers(module):
225
+ if (not name.startswith('_') and
226
+ callable(obj) and
227
+ not inspect.isclass(obj)):
228
+ # Add the function to global namespace
229
+ globals()[name] = obj
230
+ # Add to tools list
231
+ tools_list.append(obj)
232
+ self.logger.debug(f"Loaded and globalized tool function: {name}")
233
+
234
+ self.logger.debug(f"Loaded {len(tools_list)} tool functions from tools.py")
235
+ self.logger.debug(f"Tools list: {tools_list}")
236
+
237
+ except FileNotFoundError:
238
+ self.logger.debug("tools.py not found in current directory")
239
+ except Exception as e:
240
+ self.logger.warning(f"Error loading tools from tools.py: {e}")
241
+
242
+ return tools_list
243
+
203
244
  def generate_crew_and_kickoff(self):
204
245
  """
205
246
  Generates a crew of agents and initiates tasks based on the provided configuration.
@@ -271,7 +312,7 @@ class AgentsGenerator:
271
312
  elif tools_dir_path.is_dir():
272
313
  tools_dict.update(self.load_tools_from_module_class(tools_dir_path))
273
314
  self.logger.debug("tools folder exists in the root directory")
274
-
315
+
275
316
  framework = self.framework or config.get('framework')
276
317
 
277
318
  if framework == "autogen":
@@ -396,20 +437,26 @@ class AgentsGenerator:
396
437
  llm_model = details.get('llm')
397
438
  if llm_model:
398
439
  llm = PraisonAIModel(
399
- model=llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
440
+ model=llm_model.get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o",
441
+ base_url=self.config_list[0].get('base_url') if self.config_list else None
400
442
  ).get_model()
401
443
  else:
402
- llm = PraisonAIModel().get_model()
444
+ llm = PraisonAIModel(
445
+ base_url=self.config_list[0].get('base_url') if self.config_list else None
446
+ ).get_model()
403
447
 
404
448
  # Configure function calling LLM
405
449
  function_calling_llm_model = details.get('function_calling_llm')
406
450
  if function_calling_llm_model:
407
451
  function_calling_llm = PraisonAIModel(
408
- model=function_calling_llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
452
+ model=function_calling_llm_model.get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o",
453
+ base_url=self.config_list[0].get('base_url') if self.config_list else None
409
454
  ).get_model()
410
455
  else:
411
- function_calling_llm = PraisonAIModel().get_model()
412
-
456
+ function_calling_llm = PraisonAIModel(
457
+ base_url=self.config_list[0].get('base_url') if self.config_list else None
458
+ ).get_model()
459
+
413
460
  # Create CrewAI agent
414
461
  agent = Agent(
415
462
  role=role_filled,
@@ -419,14 +466,14 @@ class AgentsGenerator:
419
466
  allow_delegation=details.get('allow_delegation', False),
420
467
  llm=llm,
421
468
  function_calling_llm=function_calling_llm,
422
- max_iter=details.get('max_iter', 15),
423
- max_rpm=details.get('max_rpm'),
424
- max_execution_time=details.get('max_execution_time'),
469
+ max_iter=details.get('max_iter') or 15,
470
+ max_rpm=details.get('max_rpm') or None,
471
+ max_execution_time=details.get('max_execution_time') or None,
425
472
  verbose=details.get('verbose', True),
426
473
  cache=details.get('cache', True),
427
- system_template=details.get('system_template'),
428
- prompt_template=details.get('prompt_template'),
429
- response_template=details.get('response_template'),
474
+ system_template=details.get('system_template') or None,
475
+ prompt_template=details.get('prompt_template') or None,
476
+ response_template=details.get('response_template') or None,
430
477
  )
431
478
 
432
479
  # Set agent callback if provided
@@ -475,7 +522,7 @@ class AgentsGenerator:
475
522
  crew = Crew(
476
523
  agents=list(agents.values()),
477
524
  tasks=tasks,
478
- verbose=2
525
+ verbose=True
479
526
  )
480
527
 
481
528
  self.logger.debug("Final Crew Configuration:")
@@ -498,40 +545,26 @@ class AgentsGenerator:
498
545
  tasks = []
499
546
  tasks_dict = {}
500
547
 
548
+ # Load tools once at the beginning
549
+ tools_list = self.load_tools_from_tools_py()
550
+ self.logger.debug(f"Loaded tools: {tools_list}")
551
+
501
552
  # Create agents from config
502
553
  for role, details in config['roles'].items():
503
554
  role_filled = details['role'].format(topic=topic)
504
555
  goal_filled = details['goal'].format(topic=topic)
505
556
  backstory_filled = details['backstory'].format(topic=topic)
506
557
 
507
- # Get agent tools
508
- agent_tools = [tools_dict[tool] for tool in details.get('tools', [])
509
- if tool in tools_dict]
510
-
511
- # Configure LLM
512
- llm_model = details.get('llm')
513
- if llm_model:
514
- llm = llm_model.get("model", os.environ.get("MODEL_NAME", "gpt-4o"))
515
- else:
516
- llm = os.environ.get("MODEL_NAME", "gpt-4o")
517
-
518
- # Configure function calling LLM
519
- function_calling_llm_model = details.get('function_calling_llm')
520
- if function_calling_llm_model:
521
- function_calling_llm = function_calling_llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o"))
522
- else:
523
- function_calling_llm = os.environ.get("MODEL_NAME", "gpt-4o")
524
-
525
- # Create PraisonAI agent
558
+ # Pass all loaded tools to the agent
526
559
  agent = PraisonAgent(
527
560
  name=role_filled,
528
561
  role=role_filled,
529
562
  goal=goal_filled,
530
563
  backstory=backstory_filled,
531
- tools=agent_tools,
564
+ tools=tools_list, # Pass the entire tools list to the agent
532
565
  allow_delegation=details.get('allow_delegation', False),
533
- llm=llm,
534
- function_calling_llm=function_calling_llm,
566
+ llm=details.get('llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o",
567
+ function_calling_llm=details.get('function_calling_llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o",
535
568
  max_iter=details.get('max_iter', 15),
536
569
  max_rpm=details.get('max_rpm'),
537
570
  max_execution_time=details.get('max_execution_time'),
@@ -540,25 +573,27 @@ class AgentsGenerator:
540
573
  system_template=details.get('system_template'),
541
574
  prompt_template=details.get('prompt_template'),
542
575
  response_template=details.get('response_template'),
576
+ reflect_llm=details.get('reflect_llm', {}).get("model") or os.environ.get("MODEL_NAME") or "openai/gpt-4o",
577
+ min_reflect=details.get('min_reflect', 1),
578
+ max_reflect=details.get('max_reflect', 3),
543
579
  )
544
580
 
545
- # Set agent callback if provided
546
581
  if self.agent_callback:
547
582
  agent.step_callback = self.agent_callback
548
583
 
549
584
  agents[role] = agent
585
+ self.logger.debug(f"Created agent {role_filled} with tools: {agent.tools}")
550
586
 
551
587
  # Create tasks for the agent
552
588
  for task_name, task_details in details.get('tasks', {}).items():
553
589
  description_filled = task_details['description'].format(topic=topic)
554
590
  expected_output_filled = task_details['expected_output'].format(topic=topic)
555
591
 
556
- # Create task using PraisonAI Task class
557
592
  task = PraisonTask(
558
593
  description=description_filled,
559
594
  expected_output=expected_output_filled,
560
595
  agent=agent,
561
- tools=task_details.get('tools', []),
596
+ tools=tools_list, # Pass the same tools list to the task
562
597
  async_execution=task_details.get('async_execution', False),
563
598
  context=[],
564
599
  config=task_details.get('config', {}),
@@ -568,8 +603,9 @@ class AgentsGenerator:
568
603
  callback=task_details.get('callback'),
569
604
  create_directory=task_details.get('create_directory', False)
570
605
  )
606
+
607
+ self.logger.debug(f"Created task {task_name} with tools: {task.tools}")
571
608
 
572
- # Set task callback if provided
573
609
  if self.task_callback:
574
610
  task.callback = self.task_callback
575
611
 
@@ -581,31 +617,34 @@ class AgentsGenerator:
581
617
  for task_name, task_details in details.get('tasks', {}).items():
582
618
  task = tasks_dict[task_name]
583
619
  context_tasks = [tasks_dict[ctx] for ctx in task_details.get('context', [])
584
- if ctx in tasks_dict]
620
+ if ctx in tasks_dict]
585
621
  task.context = context_tasks
586
622
 
587
623
  # Create and run the PraisonAI agents
624
+ memory = config.get('memory', False)
625
+ self.logger.debug(f"Memory: {memory}")
588
626
  if config.get('process') == 'hierarchical':
589
627
  agents = PraisonAIAgents(
590
628
  agents=list(agents.values()),
591
629
  tasks=tasks,
592
630
  verbose=True,
593
631
  process="hierarchical",
594
- manager_llm=config.get('manager_llm', 'gpt-4o'),
632
+ manager_llm=config.get('manager_llm') or os.environ.get("MODEL_NAME") or "openai/gpt-4o",
633
+ memory=memory
595
634
  )
596
635
  else:
597
636
  agents = PraisonAIAgents(
598
637
  agents=list(agents.values()),
599
638
  tasks=tasks,
600
- verbose=2
639
+ verbose=True,
640
+ memory=memory
601
641
  )
602
-
642
+
603
643
  self.logger.debug("Final Configuration:")
604
644
  self.logger.debug(f"Agents: {agents.agents}")
605
645
  self.logger.debug(f"Tasks: {agents.tasks}")
606
646
 
607
647
  response = agents.start()
608
- # result = f"### Task Output ###\n{response}"
609
648
  self.logger.debug(f"Result: {response}")
610
649
  result = ""
611
650
 
praisonai/api/call.py CHANGED
@@ -50,7 +50,7 @@ logger.handlers = []
50
50
  # Try to import tools from the root directory
51
51
  tools = []
52
52
  tools_path = os.path.join(os.getcwd(), 'tools.py')
53
- logger.info(f"Tools path: {tools_path}")
53
+ logger.debug(f"Tools path: {tools_path}")
54
54
 
55
55
  def import_tools_from_file(file_path):
56
56
  spec = importlib.util.spec_from_file_location("custom_tools", file_path)
@@ -63,9 +63,9 @@ try:
63
63
  if os.path.exists(tools_path):
64
64
  # tools.py exists in the root directory, import from file
65
65
  custom_tools_module = import_tools_from_file(tools_path)
66
- logger.info("Successfully imported custom tools from root tools.py")
66
+ logger.debug("Successfully imported custom tools from root tools.py")
67
67
  else:
68
- logger.info("No custom tools.py file found in the root directory")
68
+ logger.debug("No custom tools.py file found in the root directory")
69
69
  custom_tools_module = None
70
70
 
71
71
  if custom_tools_module:
praisonai/auto.py CHANGED
@@ -104,7 +104,7 @@ Tools are not available for {framework}. To use tools, install:
104
104
  self.client = instructor.patch(
105
105
  OpenAI(
106
106
  base_url=self.config_list[0]['base_url'],
107
- api_key=os.getenv("OPENAI_API_KEY"),
107
+ api_key=self.config_list[0]['api_key'],
108
108
  ),
109
109
  mode=instructor.Mode.JSON,
110
110
  )
praisonai/cli.py CHANGED
@@ -25,7 +25,6 @@ CALL_MODULE_AVAILABLE = False
25
25
  CREWAI_AVAILABLE = False
26
26
  AUTOGEN_AVAILABLE = False
27
27
  PRAISONAI_AVAILABLE = False
28
-
29
28
  try:
30
29
  # Create necessary directories and set CHAINLIT_APP_ROOT
31
30
  if "CHAINLIT_APP_ROOT" not in os.environ:
@@ -73,6 +72,12 @@ except ImportError:
73
72
  pass
74
73
 
75
74
  logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'), format='%(asctime)s - %(levelname)s - %(message)s')
75
+ logging.getLogger('alembic').setLevel(logging.ERROR)
76
+ logging.getLogger('gradio').setLevel(logging.ERROR)
77
+ logging.getLogger('gradio').setLevel(os.environ.get('GRADIO_LOGLEVEL', 'WARNING'))
78
+ logging.getLogger('rust_logger').setLevel(logging.WARNING)
79
+ logging.getLogger('duckduckgo').setLevel(logging.ERROR)
80
+ logging.getLogger('_client').setLevel(logging.ERROR)
76
81
 
77
82
  def stream_subprocess(command, env=None):
78
83
  """
@@ -108,11 +113,14 @@ class PraisonAI:
108
113
  Initialize the PraisonAI object with default parameters.
109
114
  """
110
115
  self.agent_yaml = agent_yaml
116
+ # Create config_list with AutoGen compatibility
117
+ api_key = os.environ.get("OPENAI_API_KEY")
111
118
  self.config_list = [
112
119
  {
113
120
  'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
114
121
  'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
115
- 'api_key': os.environ.get("OPENAI_API_KEY")
122
+ 'api_key': api_key,
123
+ 'api_type': 'openai' # AutoGen expects this field
116
124
  }
117
125
  ]
118
126
  self.agent_file = agent_file
@@ -125,7 +133,7 @@ class PraisonAI:
125
133
  """
126
134
  Run the PraisonAI application.
127
135
  """
128
- self.main()
136
+ return self.main()
129
137
 
130
138
  def main(self):
131
139
  """
@@ -133,17 +141,33 @@ class PraisonAI:
133
141
  initializes the necessary attributes, and then calls the appropriate methods based on the
134
142
  provided arguments.
135
143
  """
144
+ # Store the original agent_file from constructor
145
+ original_agent_file = self.agent_file
146
+
136
147
  args = self.parse_args()
148
+ # Store args for use in handle_direct_prompt
149
+ self.args = args
137
150
  invocation_cmd = "praisonai"
138
151
  version_string = f"PraisonAI version {__version__}"
139
152
 
140
153
  self.framework = args.framework or self.framework
141
154
 
142
155
  if args.command:
143
- if args.command.startswith("tests.test"): # Argument used for testing purposes
156
+ if args.command.startswith("tests.test") or args.command.startswith("tests/test"): # Argument used for testing purposes
144
157
  print("test")
158
+ return "test"
145
159
  else:
146
160
  self.agent_file = args.command
161
+ elif hasattr(args, 'direct_prompt') and args.direct_prompt:
162
+ # Only handle direct prompt if agent_file wasn't explicitly set in constructor
163
+ if original_agent_file == "agents.yaml": # Default value, so safe to use direct prompt
164
+ result = self.handle_direct_prompt(args.direct_prompt)
165
+ print(result)
166
+ return result
167
+ else:
168
+ # Agent file was explicitly set, ignore direct prompt and use the file
169
+ pass
170
+ # If no command or direct_prompt, preserve agent_file from constructor (don't overwrite)
147
171
 
148
172
  if args.deploy:
149
173
  from .deploy import CloudDeployer
@@ -174,8 +198,18 @@ class PraisonAI:
174
198
  package_root = os.path.dirname(os.path.abspath(__file__))
175
199
  config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
176
200
 
177
- # Create config.yaml only if it doesn't exist or --model or --dataset is provided
178
- if not os.path.exists(config_yaml_destination) or args.model or args.dataset:
201
+ if not os.path.exists(config_yaml_destination):
202
+ config = generate_config(
203
+ model_name=args.model,
204
+ hf_model_name=args.hf,
205
+ ollama_model_name=args.ollama,
206
+ dataset=[{
207
+ "name": args.dataset
208
+ }]
209
+ )
210
+ with open('config.yaml', 'w') as f:
211
+ yaml.dump(config, f, default_flow_style=False, indent=2)
212
+ elif args.model or args.hf or args.ollama or (args.dataset and args.dataset != "yahma/alpaca-cleaned"):
179
213
  config = generate_config(
180
214
  model_name=args.model,
181
215
  hf_model_name=args.hf,
@@ -186,6 +220,9 @@ class PraisonAI:
186
220
  )
187
221
  with open('config.yaml', 'w') as f:
188
222
  yaml.dump(config, f, default_flow_style=False, indent=2)
223
+ else:
224
+ with open(config_yaml_destination, 'r') as f:
225
+ config = yaml.safe_load(f)
189
226
 
190
227
  # Overwrite huggingface_save and ollama_save if --hf or --ollama are provided
191
228
  if args.hf:
@@ -212,7 +249,18 @@ class PraisonAI:
212
249
  print("All packages installed.")
213
250
 
214
251
  train_args = sys.argv[2:] # Get all arguments after 'train'
215
- train_script_path = os.path.join(package_root, 'train.py')
252
+
253
+ # Check if this is a vision model - handle all case variations
254
+ model_name = config.get("model_name", "").lower()
255
+ is_vision_model = any(x in model_name for x in ["-vl-", "-vl", "vl-", "-vision-", "-vision", "vision-", "visionmodel"])
256
+
257
+ # Choose appropriate training script
258
+ if is_vision_model:
259
+ train_script_path = os.path.join(package_root, 'train_vision.py')
260
+ print("Using vision training script for VL model...")
261
+ else:
262
+ train_script_path = os.path.join(package_root, 'train.py')
263
+ print("Using standard training script...")
216
264
 
217
265
  # Set environment variables
218
266
  env = os.environ.copy()
@@ -279,22 +327,41 @@ class PraisonAI:
279
327
  """
280
328
  Parse the command-line arguments for the PraisonAI CLI.
281
329
  """
330
+ # Check if we're running in a test environment
331
+ in_test_env = (
332
+ 'pytest' in sys.argv[0] or
333
+ 'unittest' in sys.argv[0] or
334
+ any('test' in arg for arg in sys.argv[1:3]) or # Check first few args for test indicators
335
+ 'pytest' in sys.modules or
336
+ 'unittest' in sys.modules
337
+ )
338
+
339
+ # Define special commands
340
+ special_commands = ['chat', 'code', 'call', 'realtime', 'train', 'ui']
341
+
282
342
  parser = argparse.ArgumentParser(prog="praisonai", description="praisonAI command-line interface")
283
343
  parser.add_argument("--framework", choices=["crewai", "autogen", "praisonai"], help="Specify the framework")
284
344
  parser.add_argument("--ui", choices=["chainlit", "gradio"], help="Specify the UI framework (gradio or chainlit).")
285
345
  parser.add_argument("--auto", nargs=argparse.REMAINDER, help="Enable auto mode and pass arguments for it")
286
346
  parser.add_argument("--init", nargs=argparse.REMAINDER, help="Initialize agents with optional topic")
287
- parser.add_argument("command", nargs="?", help="Command to run")
347
+ parser.add_argument("command", nargs="?", help="Command to run or direct prompt")
288
348
  parser.add_argument("--deploy", action="store_true", help="Deploy the application")
289
349
  parser.add_argument("--model", type=str, help="Model name")
350
+ parser.add_argument("--llm", type=str, help="LLM model to use for direct prompts")
290
351
  parser.add_argument("--hf", type=str, help="Hugging Face model name")
291
352
  parser.add_argument("--ollama", type=str, help="Ollama model name")
292
353
  parser.add_argument("--dataset", type=str, help="Dataset name for training", default="yahma/alpaca-cleaned")
293
354
  parser.add_argument("--realtime", action="store_true", help="Start the realtime voice interaction interface")
294
355
  parser.add_argument("--call", action="store_true", help="Start the PraisonAI Call server")
295
356
  parser.add_argument("--public", action="store_true", help="Use ngrok to expose the server publicly (only with --call)")
296
- args, unknown_args = parser.parse_known_args()
357
+
358
+ # If we're in a test environment, parse with empty args to avoid pytest interference
359
+ if in_test_env:
360
+ args, unknown_args = parser.parse_known_args([])
361
+ else:
362
+ args, unknown_args = parser.parse_known_args()
297
363
 
364
+ # Handle special cases first
298
365
  if unknown_args and unknown_args[0] == '-b' and unknown_args[1] == 'api:app':
299
366
  args.command = 'agents.yaml'
300
367
  if args.command == 'api:app' or args.command == '/app/api:app':
@@ -325,9 +392,7 @@ class PraisonAI:
325
392
  call_module.main(call_args)
326
393
  sys.exit(0)
327
394
 
328
- # Handle special commands first
329
- special_commands = ['chat', 'code', 'call', 'realtime', 'train', 'ui']
330
-
395
+ # Handle special commands
331
396
  if args.command in special_commands:
332
397
  if args.command == 'chat':
333
398
  if not CHAINLIT_AVAILABLE:
@@ -374,9 +439,8 @@ class PraisonAI:
374
439
  sys.exit(0)
375
440
 
376
441
  elif args.command == 'train':
377
- print("[red]ERROR: Train feature is not installed. Install with:[/red]")
378
- print("\npip install \"praisonai[train]\"\n")
379
- sys.exit(1)
442
+ package_root = os.path.dirname(os.path.abspath(__file__))
443
+ config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
380
444
 
381
445
  elif args.command == 'ui':
382
446
  if not CHAINLIT_AVAILABLE:
@@ -396,8 +460,79 @@ class PraisonAI:
396
460
  print("pip install praisonaiagents # For PraisonAIAgents\n")
397
461
  sys.exit(1)
398
462
 
463
+ # Handle direct prompt if command is not a special command or file
464
+ # Skip this during testing to avoid pytest arguments interfering
465
+ if not in_test_env and args.command and not args.command.endswith('.yaml') and args.command not in special_commands:
466
+ args.direct_prompt = args.command
467
+ args.command = None
468
+
399
469
  return args
400
470
 
471
+ def handle_direct_prompt(self, prompt):
472
+ """
473
+ Handle direct prompt by creating a single agent and running it.
474
+ """
475
+ if PRAISONAI_AVAILABLE:
476
+ agent_config = {
477
+ "name": "DirectAgent",
478
+ "role": "Assistant",
479
+ "goal": "Complete the given task",
480
+ "backstory": "You are a helpful AI assistant"
481
+ }
482
+
483
+ # Add llm if specified
484
+ if hasattr(self, 'args') and self.args.llm:
485
+ agent_config["llm"] = self.args.llm
486
+
487
+ agent = PraisonAgent(**agent_config)
488
+ result = agent.start(prompt)
489
+ return result
490
+ elif CREWAI_AVAILABLE:
491
+ agent_config = {
492
+ "name": "DirectAgent",
493
+ "role": "Assistant",
494
+ "goal": "Complete the given task",
495
+ "backstory": "You are a helpful AI assistant"
496
+ }
497
+
498
+ # Add llm if specified
499
+ if hasattr(self, 'args') and self.args.llm:
500
+ agent_config["llm"] = self.args.llm
501
+
502
+ agent = Agent(**agent_config)
503
+ task = Task(
504
+ description=prompt,
505
+ agent=agent
506
+ )
507
+ crew = Crew(
508
+ agents=[agent],
509
+ tasks=[task]
510
+ )
511
+ return crew.kickoff()
512
+ elif AUTOGEN_AVAILABLE:
513
+ config_list = self.config_list
514
+ # Add llm if specified
515
+ if hasattr(self, 'args') and self.args.llm:
516
+ config_list[0]['model'] = self.args.llm
517
+
518
+ assistant = autogen.AssistantAgent(
519
+ name="DirectAgent",
520
+ llm_config={"config_list": config_list}
521
+ )
522
+ user_proxy = autogen.UserProxyAgent(
523
+ name="UserProxy",
524
+ code_execution_config={"work_dir": "coding"}
525
+ )
526
+ user_proxy.initiate_chat(assistant, message=prompt)
527
+ return "Task completed"
528
+ else:
529
+ print("[red]ERROR: No framework is installed. Please install at least one framework:[/red]")
530
+ print("\npip install \"praisonai\\[crewai]\" # For CrewAI")
531
+ print("pip install \"praisonai\\[autogen]\" # For AutoGen")
532
+ print("pip install \"praisonai\\[crewai,autogen]\" # For both frameworks\n")
533
+ print("pip install praisonaiagents # For PraisonAIAgents\n")
534
+ sys.exit(1)
535
+
401
536
  def create_chainlit_chat_interface(self):
402
537
  """
403
538
  Create a Chainlit interface for the chat application.
@@ -478,7 +613,7 @@ class PraisonAI:
478
613
  logging.info("Public folder not found in the package.")
479
614
  else:
480
615
  logging.info("Public folder already exists.")
481
- chainlit_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'chainlit_ui.py')
616
+ chainlit_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'agents.py')
482
617
  chainlit_run([chainlit_ui_path])
483
618
  else:
484
619
  print("ERROR: Chainlit is not installed. Please install it with 'pip install \"praisonai[ui]\"' to use the UI.")
praisonai/deploy.py CHANGED
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==2.0.10 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==2.2.16 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -1,4 +1,4 @@
1
- # Only try to import autogen_tools if either CrewAI or AutoGen is available
1
+ # Only try to import autogen_tools if either CrewAI or AG2 is available
2
2
  CREWAI_AVAILABLE = False
3
3
  AUTOGEN_AVAILABLE = False
4
4
  PRAISONAI_TOOLS_AVAILABLE = False