PraisonAI 0.0.57__tar.gz → 0.0.59rc2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (41) hide show
  1. {praisonai-0.0.57 → praisonai-0.0.59rc2}/PKG-INFO +2 -1
  2. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/cli.py +6 -0
  3. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/deploy.py +1 -1
  4. praisonai-0.0.59rc2/praisonai/train.py +232 -0
  5. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/context.py +87 -51
  6. {praisonai-0.0.57 → praisonai-0.0.59rc2}/pyproject.toml +15 -3
  7. praisonai-0.0.59rc2/setup/post_install.py +20 -0
  8. {praisonai-0.0.57 → praisonai-0.0.59rc2}/LICENSE +0 -0
  9. {praisonai-0.0.57 → praisonai-0.0.59rc2}/README.md +0 -0
  10. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/__init__.py +0 -0
  11. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/__main__.py +0 -0
  12. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/agents_generator.py +0 -0
  13. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/auto.py +0 -0
  14. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/chainlit_ui.py +0 -0
  15. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/inbuilt_tools/__init__.py +0 -0
  16. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  17. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/inc/__init__.py +0 -0
  18. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/inc/models.py +0 -0
  19. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/android-chrome-192x192.png +0 -0
  20. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/android-chrome-512x512.png +0 -0
  21. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/apple-touch-icon.png +0 -0
  22. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/fantasy.svg +0 -0
  23. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/favicon-16x16.png +0 -0
  24. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/favicon-32x32.png +0 -0
  25. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/favicon.ico +0 -0
  26. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/game.svg +0 -0
  27. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/logo_dark.png +0 -0
  28. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/logo_light.png +0 -0
  29. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/movie.svg +0 -0
  30. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/public/thriller.svg +0 -0
  31. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/test.py +0 -0
  32. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/chat.py +0 -0
  33. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/code.py +0 -0
  34. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/public/fantasy.svg +0 -0
  35. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/public/game.svg +0 -0
  36. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/public/logo_dark.png +0 -0
  37. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/public/logo_light.png +0 -0
  38. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/public/movie.svg +0 -0
  39. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/public/thriller.svg +0 -0
  40. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/ui/sql_alchemy.py +0 -0
  41. {praisonai-0.0.57 → praisonai-0.0.59rc2}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.57
3
+ Version: 0.0.59rc2
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -17,6 +17,7 @@ Provides-Extra: cohere
17
17
  Provides-Extra: google
18
18
  Provides-Extra: gradio
19
19
  Provides-Extra: openai
20
+ Provides-Extra: train
20
21
  Provides-Extra: ui
21
22
  Requires-Dist: agentops (>=0.2.6) ; extra == "agentops"
22
23
  Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code"
@@ -98,6 +98,12 @@ class PraisonAI:
98
98
  if getattr(args, 'code', False):
99
99
  self.create_code_interface()
100
100
  return
101
+
102
+ if args.agent_file == 'train':
103
+ from .train import main as train_main
104
+ train_args = sys.argv[2:] # Get all arguments after 'train'
105
+ train_main(train_args) # Pass the arguments to train.py's main function
106
+ return
101
107
 
102
108
  invocation_cmd = "praisonai"
103
109
  version_string = f"PraisonAI version {__version__}"
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.57 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.59rc2 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -0,0 +1,232 @@
1
+ import subprocess
2
+ import os
3
+ import sys
4
+ import yaml
5
+ import torch
6
+ from transformers import TextStreamer
7
+ from unsloth import FastLanguageModel, is_bfloat16_supported
8
+ from trl import SFTTrainer
9
+ from transformers import TrainingArguments
10
+ from datasets import load_dataset, concatenate_datasets, Dataset
11
+ from psutil import virtual_memory
12
+
13
+ class train:
14
+ def __init__(self, config_path="config.yaml"):
15
+ self.load_config(config_path)
16
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ self.model, self.tokenizer = None, None
18
+
19
+ def load_config(self, path):
20
+ with open(path, "r") as file:
21
+ self.config = yaml.safe_load(file)
22
+
23
+ def print_system_info(self):
24
+ print(f"PyTorch version: {torch.__version__}")
25
+ print(f"CUDA version: {torch.version.cuda}")
26
+ if torch.cuda.is_available():
27
+ device_capability = torch.cuda.get_device_capability()
28
+ print(f"CUDA Device Capability: {device_capability}")
29
+ else:
30
+ print("CUDA is not available")
31
+
32
+ python_version = sys.version
33
+ pip_version = subprocess.check_output(['pip', '--version']).decode().strip()
34
+ python_path = sys.executable
35
+ pip_path = subprocess.check_output(['which', 'pip']).decode().strip()
36
+ print(f"Python Version: {python_version}")
37
+ print(f"Pip Version: {pip_version}")
38
+ print(f"Python Path: {python_path}")
39
+ print(f"Pip Path: {pip_path}")
40
+
41
+ def check_gpu(self):
42
+ gpu_stats = torch.cuda.get_device_properties(0)
43
+ print(f"GPU = {gpu_stats.name}. Max memory = {round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)} GB.")
44
+
45
+ def check_ram(self):
46
+ ram_gb = virtual_memory().total / 1e9
47
+ print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
48
+ if ram_gb < 20:
49
+ print('Not using a high-RAM runtime')
50
+ else:
51
+ print('You are using a high-RAM runtime!')
52
+
53
+ # def install_packages(self):
54
+ # subprocess.run(["pip", "install", "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git@4e570be9ae4ced8cdc64e498125708e34942befc"])
55
+ # subprocess.run(["pip", "install", "--no-deps", "trl<0.9.0", "peft==0.12.0", "accelerate==0.33.0", "bitsandbytes==0.43.3"])
56
+
57
+ def prepare_model(self):
58
+ self.model, self.tokenizer = FastLanguageModel.from_pretrained(
59
+ model_name=self.config["model_name"],
60
+ max_seq_length=self.config["max_seq_length"],
61
+ dtype=None,
62
+ load_in_4bit=self.config["load_in_4bit"]
63
+ )
64
+ self.model = FastLanguageModel.get_peft_model(
65
+ self.model,
66
+ r=self.config["lora_r"],
67
+ target_modules=self.config["lora_target_modules"],
68
+ lora_alpha=self.config["lora_alpha"],
69
+ lora_dropout=self.config["lora_dropout"],
70
+ bias=self.config["lora_bias"],
71
+ use_gradient_checkpointing=self.config["use_gradient_checkpointing"],
72
+ random_state=self.config["random_state"],
73
+ use_rslora=self.config["use_rslora"],
74
+ loftq_config=self.config["loftq_config"],
75
+ )
76
+
77
+ def process_dataset(self, dataset_info):
78
+ dataset_name = dataset_info["name"]
79
+ split_type = dataset_info.get("split_type", "train")
80
+ processing_func = getattr(self, dataset_info.get("processing_func", "format_prompts"))
81
+ rename = dataset_info.get("rename", {})
82
+ filter_data = dataset_info.get("filter_data", False)
83
+ filter_column_value = dataset_info.get("filter_column_value", "id")
84
+ filter_value = dataset_info.get("filter_value", "alpaca")
85
+ num_samples = dataset_info.get("num_samples", 20000)
86
+
87
+ dataset = load_dataset(dataset_name, split=split_type)
88
+
89
+ if rename:
90
+ dataset = dataset.rename_columns(rename)
91
+ if filter_data:
92
+ dataset = dataset.filter(lambda example: filter_value in example[filter_column_value]).shuffle(seed=42).select(range(num_samples))
93
+ dataset = dataset.map(processing_func, batched=True)
94
+ return dataset
95
+
96
+ def format_prompts(self, examples):
97
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
98
+
99
+ ### Instruction:
100
+ {}
101
+
102
+ ### Input:
103
+ {}
104
+
105
+ ### Response:
106
+ {}"""
107
+ texts = [alpaca_prompt.format(ins, inp, out) + self.tokenizer.eos_token for ins, inp, out in zip(examples["instruction"], examples["input"], examples["output"])]
108
+ return {"text": texts}
109
+
110
+ def load_datasets(self):
111
+ datasets = []
112
+ for dataset_info in self.config["dataset"]:
113
+ datasets.append(self.process_dataset(dataset_info))
114
+ return concatenate_datasets(datasets)
115
+
116
+ def train_model(self):
117
+ dataset = self.load_datasets()
118
+ trainer = SFTTrainer(
119
+ model=self.model,
120
+ tokenizer=self.tokenizer,
121
+ train_dataset=dataset,
122
+ dataset_text_field=self.config["dataset_text_field"],
123
+ max_seq_length=self.config["max_seq_length"],
124
+ dataset_num_proc=self.config["dataset_num_proc"],
125
+ packing=self.config["packing"],
126
+ args=TrainingArguments(
127
+ per_device_train_batch_size=self.config["per_device_train_batch_size"],
128
+ gradient_accumulation_steps=self.config["gradient_accumulation_steps"],
129
+ warmup_steps=self.config["warmup_steps"],
130
+ num_train_epochs=self.config["num_train_epochs"],
131
+ max_steps=self.config["max_steps"],
132
+ learning_rate=self.config["learning_rate"],
133
+ fp16=not is_bfloat16_supported(),
134
+ bf16=is_bfloat16_supported(),
135
+ logging_steps=self.config["logging_steps"],
136
+ optim=self.config["optim"],
137
+ weight_decay=self.config["weight_decay"],
138
+ lr_scheduler_type=self.config["lr_scheduler_type"],
139
+ seed=self.config["seed"],
140
+ output_dir=self.config["output_dir"],
141
+ ),
142
+ )
143
+ trainer.train()
144
+
145
+ def inference(self, instruction, input_text):
146
+ FastLanguageModel.for_inference(self.model)
147
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
148
+
149
+ ### Instruction:
150
+ {}
151
+
152
+ ### Input:
153
+ {}
154
+
155
+ ### Response:
156
+ {}"""
157
+ inputs = self.tokenizer([alpaca_prompt.format(instruction, input_text, "")], return_tensors="pt").to("cuda")
158
+ outputs = self.model.generate(**inputs, max_new_tokens=64, use_cache=True)
159
+ print(self.tokenizer.batch_decode(outputs))
160
+
161
+ def save_model_merged(self):
162
+ if os.path.exists(self.config["hf_model_name"]):
163
+ shutil.rmtree(self.config["hf_model_name"])
164
+ self.model.push_to_hub_merged(
165
+ self.config["hf_model_name"],
166
+ self.tokenizer,
167
+ save_method="merged_16bit",
168
+ token=os.getenv('HF_TOKEN')
169
+ )
170
+
171
+ def push_model_gguf(self):
172
+ self.model.push_to_hub_gguf(
173
+ self.config["hf_model_name"],
174
+ self.tokenizer,
175
+ quantization_method=self.config["quantization_method"],
176
+ token=os.getenv('HF_TOKEN')
177
+ )
178
+
179
+ def prepare_modelfile_content(self):
180
+ output_model = self.config["hf_model_name"]
181
+ return f"""FROM {output_model}/unsloth.Q5_K_M.gguf
182
+
183
+ TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
184
+
185
+ ### Instruction:
186
+ {{{{ .Prompt }}}}
187
+
188
+ {{{{ end }}}}### Response:
189
+ {{{{ .Response }}}}\"\"\"
190
+
191
+ PARAMETER stop ""
192
+ PARAMETER stop ""
193
+ PARAMETER stop ""
194
+ PARAMETER stop ""
195
+ PARAMETER stop "<|reserved_special_token_"
196
+ """
197
+
198
+ def create_and_push_ollama_model(self):
199
+ modelfile_content = self.prepare_modelfile_content()
200
+ with open('Modelfile', 'w') as file:
201
+ file.write(modelfile_content)
202
+
203
+ subprocess.run(["ollama", "serve"])
204
+ subprocess.run(["ollama", "create", f"{self.config['ollama_model']}:{self.config['model_parameters']}", "-f", "Modelfile"])
205
+ subprocess.run(["ollama", "push", f"{self.config['ollama_model']}:{self.config['model_parameters']}"])
206
+
207
+ def run(self):
208
+ self.print_system_info()
209
+ self.check_gpu()
210
+ self.check_ram()
211
+ # self.install_packages()
212
+ self.prepare_model()
213
+ self.train_model()
214
+ self.save_model_merged()
215
+ self.push_model_gguf()
216
+ self.create_and_push_ollama_model()
217
+
218
+
219
+ def main():
220
+ import argparse
221
+ parser = argparse.ArgumentParser(description='PraisonAI Training Script')
222
+ parser.add_argument('command', choices=['train'], help='Command to execute')
223
+ parser.add_argument('--config', default='config.yaml', help='Path to configuration file')
224
+ args = parser.parse_args()
225
+
226
+ if args.command == 'train':
227
+ ai = train(config_path=args.config)
228
+ ai.run()
229
+
230
+
231
+ if __name__ == '__main__':
232
+ main()
@@ -97,17 +97,36 @@ class ContextGatherer:
97
97
  return modified_ignore_patterns
98
98
 
99
99
  def get_include_paths(self):
100
+ """
101
+ Loads include paths from:
102
+ 1. .praisoninclude (includes ONLY files/directories listed)
103
+ 2. .praisoncontext (if .praisoninclude doesn't exist, this is used
104
+ to include all other relevant files, excluding ignore patterns)
105
+ """
100
106
  include_paths = []
101
-
102
- # 1. Load from .praisoninclude
103
- include_file = os.path.join(self.directory, '.praisoninclude')
107
+ include_all = False # Flag to indicate if we need to include all files
108
+
109
+ include_file = os.path.join(self.directory, '.praisoncontext')
104
110
  if os.path.exists(include_file):
105
111
  with open(include_file, 'r') as f:
106
112
  include_paths.extend(
107
113
  line.strip() for line in f
108
114
  if line.strip() and not line.startswith('#')
109
115
  )
110
- return include_paths
116
+
117
+ # If .praisoncontext doesn't exist, fall back to .praisoninclude
118
+ # for including all relevant files
119
+ if not include_paths:
120
+ include_file = os.path.join(self.directory, '.praisoninclude')
121
+ if os.path.exists(include_file):
122
+ with open(include_file, 'r') as f:
123
+ include_paths.extend(
124
+ line.strip() for line in f
125
+ if line.strip() and not line.startswith('#')
126
+ )
127
+ include_all = True # Include all files along with specified paths
128
+
129
+ return include_paths, include_all
111
130
 
112
131
  def should_ignore(self, file_path):
113
132
  """
@@ -130,61 +149,78 @@ class ContextGatherer:
130
149
  any(file_path.endswith(ext) for ext in self.relevant_extensions)
131
150
 
132
151
  def gather_context(self):
133
- """Gather context from relevant files, respecting ignore patterns and include paths."""
152
+ """
153
+ Gather context from relevant files, respecting ignore patterns
154
+ and include options from .praisoninclude and .praisoncontext.
155
+ """
134
156
  context = []
135
157
  total_files = 0
136
158
  processed_files = 0
159
+ self.include_paths, include_all = self.get_include_paths()
137
160
 
138
- if not self.include_paths:
139
- # No include paths specified, process the entire directory
140
- for root, dirs, files in os.walk(self.directory):
141
- total_files += len(files)
142
- dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
143
- for file in files:
144
- file_path = os.path.join(root, file)
145
- if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
146
- try:
147
- with open(file_path, 'r', encoding='utf-8') as f:
148
- content = f.read()
149
- context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
150
- self.included_files.append(Path(file_path).relative_to(self.directory))
151
- except Exception as e:
152
- logger.error(f"Error reading {file_path}: {e}")
153
- processed_files += 1
154
- print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
155
- else:
156
- # Process specified include paths
161
+ def add_file_content(file_path):
162
+ """Helper function to add file content to context."""
163
+ try:
164
+ with open(file_path, 'r', encoding='utf-8') as f:
165
+ content = f.read()
166
+ context.append(
167
+ f"File: {file_path}\n\n{content}\n\n{'=' * 50}\n"
168
+ )
169
+ self.included_files.append(
170
+ Path(file_path).relative_to(self.directory)
171
+ )
172
+ except Exception as e:
173
+ logger.error(f"Error reading {file_path}: {e}")
174
+
175
+ def process_path(path):
176
+ """Helper function to process a single path (file or directory)."""
177
+ nonlocal total_files, processed_files
178
+ if os.path.isdir(path):
179
+ for root, dirs, files in os.walk(path):
180
+ total_files += len(files)
181
+ dirs[:] = [
182
+ d
183
+ for d in dirs
184
+ if not self.should_ignore(os.path.join(root, d))
185
+ ]
186
+ for file in files:
187
+ file_path = os.path.join(root, file)
188
+ if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
189
+ add_file_content(file_path)
190
+ processed_files += 1
191
+ print(
192
+ f"\rProcessed {processed_files}/{total_files} files",
193
+ end="",
194
+ flush=True,
195
+ )
196
+ elif os.path.isfile(path) and self.is_relevant_file(path):
197
+ add_file_content(path)
198
+ processed_files += 1
199
+ print(
200
+ f"\rProcessed {processed_files}/1 files",
201
+ end="",
202
+ flush=True,
203
+ )
204
+
205
+ if include_all:
206
+ # Include ALL relevant files from the entire directory
207
+ process_path(self.directory)
208
+
209
+ # Include files from .praisoninclude specifically
210
+ for include_path in self.include_paths:
211
+ full_path = os.path.join(self.directory, include_path)
212
+ process_path(full_path)
213
+ elif self.include_paths:
214
+ # Include only files specified in .praisoncontext
157
215
  for include_path in self.include_paths:
158
216
  full_path = os.path.join(self.directory, include_path)
159
- if os.path.isdir(full_path):
160
- for root, dirs, files in os.walk(full_path):
161
- total_files += len(files)
162
- dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
163
- for file in files:
164
- file_path = os.path.join(root, file)
165
- if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
166
- try:
167
- with open(file_path, 'r', encoding='utf-8') as f:
168
- content = f.read()
169
- context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
170
- self.included_files.append(Path(file_path).relative_to(self.directory))
171
- except Exception as e:
172
- logger.error(f"Error reading {file_path}: {e}")
173
- processed_files += 1
174
- print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
175
- elif os.path.isfile(full_path) and self.is_relevant_file(full_path):
176
- try:
177
- with open(full_path, 'r', encoding='utf-8') as f:
178
- content = f.read()
179
- context.append(f"File: {full_path}\n\n{content}\n\n{'='*50}\n")
180
- self.included_files.append(Path(full_path).relative_to(self.directory))
181
- except Exception as e:
182
- logger.error(f"Error reading {full_path}: {e}")
183
- processed_files += 1
184
- print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
217
+ process_path(full_path)
218
+ else:
219
+ # No include options, process the entire directory
220
+ process_path(self.directory)
185
221
 
186
222
  print() # New line after progress indicator
187
- return '\n'.join(context)
223
+ return "\n".join(context)
188
224
 
189
225
  def count_tokens(self, text):
190
226
  """Count tokens using a simple whitespace-based tokenizer."""
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "PraisonAI"
3
- version = "0.0.57"
3
+ version = "0.0.59rc2"
4
4
  description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
5
5
  authors = ["Mervin Praison"]
6
6
  license = ""
@@ -20,7 +20,7 @@ pyautogen = ">=0.2.19"
20
20
  crewai = ">=0.32.0"
21
21
  markdown = ">=3.5"
22
22
  praisonai-tools = ">=0.0.7"
23
- pyparsing = ">=3.0.0" # Added to fix a colab issue. Temp Fix.
23
+ pyparsing = ">=3.0.0"
24
24
  chainlit = {version = "^1.1.301", optional = true}
25
25
  gradio = {version = ">=4.26.0", optional = true}
26
26
  flask = {version = ">=3.0.0", optional = true}
@@ -89,6 +89,8 @@ build-backend = "poetry.core.masonry.api"
89
89
 
90
90
  [tool.poetry.scripts]
91
91
  praisonai = "praisonai.__main__:main"
92
+ setup-conda-env = "setup.setup_conda_env:main"
93
+ post-install = "setup.post_install:main"
92
94
 
93
95
  [tool.poetry.extras]
94
96
  ui = ["chainlit"]
@@ -100,4 +102,14 @@ openai = ["langchain-openai"]
100
102
  anthropic = ["langchain-anthropic"]
101
103
  cohere = ["langchain-cohere"]
102
104
  chat = ["chainlit", "litellm", "aiosqlite", "greenlet"]
103
- code = ["chainlit", "litellm", "aiosqlite", "greenlet"]
105
+ code = ["chainlit", "litellm", "aiosqlite", "greenlet"]
106
+ train = ["setup-conda-env"]
107
+
108
+ [tool.poetry-dynamic-versioning]
109
+ enable = true
110
+ vcs = "git"
111
+ style = "semver"
112
+
113
+ [tool.poetry.build]
114
+ generate-setup-file = false
115
+ script = "setup/post_install.py"
@@ -0,0 +1,20 @@
1
+ import subprocess
2
+ import sys
3
+ import os
4
+
5
+ def main():
6
+ try:
7
+ # Get the absolute path of the current file
8
+ current_file = os.path.abspath(__file__)
9
+
10
+ # Get the directory of the current file
11
+ script_dir = os.path.dirname(current_file)
12
+
13
+ # Construct the path to setup_conda_env.py
14
+ setup_script = os.path.join(script_dir, 'setup_conda_env.py')
15
+ except subprocess.CalledProcessError as e:
16
+ print(f"Error occurred while running the setup script: {e}")
17
+ sys.exit(1)
18
+
19
+ if __name__ == "__main__":
20
+ main()
File without changes
File without changes