PraisonAI 2.0.61__cp313-cp313-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/__init__.py +6 -0
- praisonai/__main__.py +10 -0
- praisonai/agents_generator.py +648 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +238 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/cli.py +518 -0
- praisonai/deploy.py +138 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +128 -0
- praisonai/public/android-chrome-192x192.png +0 -0
- praisonai/public/android-chrome-512x512.png +0 -0
- praisonai/public/apple-touch-icon.png +0 -0
- praisonai/public/fantasy.svg +3 -0
- praisonai/public/favicon-16x16.png +0 -0
- praisonai/public/favicon-32x32.png +0 -0
- praisonai/public/favicon.ico +0 -0
- praisonai/public/game.svg +3 -0
- praisonai/public/logo_dark.png +0 -0
- praisonai/public/logo_light.png +0 -0
- praisonai/public/movie.svg +3 -0
- praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
- praisonai/public/praison-ai-agents-architecture.png +0 -0
- praisonai/public/thriller.svg +3 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/config.yaml +60 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup/setup_conda_env.sh +72 -0
- praisonai/setup.py +16 -0
- praisonai/test.py +105 -0
- praisonai/train.py +276 -0
- praisonai/ui/README.md +21 -0
- praisonai/ui/agents.py +822 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chat.py +387 -0
- praisonai/ui/code.py +440 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +269 -0
- praisonai/ui/config/.chainlit/config.toml +120 -0
- praisonai/ui/config/.chainlit/translations/bn.json +231 -0
- praisonai/ui/config/.chainlit/translations/en-US.json +229 -0
- praisonai/ui/config/.chainlit/translations/gu.json +231 -0
- praisonai/ui/config/.chainlit/translations/he-IL.json +231 -0
- praisonai/ui/config/.chainlit/translations/hi.json +231 -0
- praisonai/ui/config/.chainlit/translations/kn.json +231 -0
- praisonai/ui/config/.chainlit/translations/ml.json +231 -0
- praisonai/ui/config/.chainlit/translations/mr.json +231 -0
- praisonai/ui/config/.chainlit/translations/ta.json +231 -0
- praisonai/ui/config/.chainlit/translations/te.json +231 -0
- praisonai/ui/config/.chainlit/translations/zh-CN.json +229 -0
- praisonai/ui/config/chainlit.md +1 -0
- praisonai/ui/config/translations/bn.json +231 -0
- praisonai/ui/config/translations/en-US.json +229 -0
- praisonai/ui/config/translations/gu.json +231 -0
- praisonai/ui/config/translations/he-IL.json +231 -0
- praisonai/ui/config/translations/hi.json +231 -0
- praisonai/ui/config/translations/kn.json +231 -0
- praisonai/ui/config/translations/ml.json +231 -0
- praisonai/ui/config/translations/mr.json +231 -0
- praisonai/ui/config/translations/ta.json +231 -0
- praisonai/ui/config/translations/te.json +231 -0
- praisonai/ui/config/translations/zh-CN.json +229 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/db.py +291 -0
- praisonai/ui/public/fantasy.svg +3 -0
- praisonai/ui/public/game.svg +3 -0
- praisonai/ui/public/logo_dark.png +0 -0
- praisonai/ui/public/logo_light.png +0 -0
- praisonai/ui/public/movie.svg +3 -0
- praisonai/ui/public/praison.css +3 -0
- praisonai/ui/public/thriller.svg +3 -0
- praisonai/ui/realtime.py +476 -0
- praisonai/ui/realtimeclient/__init__.py +653 -0
- praisonai/ui/realtimeclient/realtimedocs.txt +1484 -0
- praisonai/ui/realtimeclient/tools.py +236 -0
- praisonai/ui/sql_alchemy.py +707 -0
- praisonai/ui/tools.md +133 -0
- praisonai/version.py +1 -0
- praisonai-2.0.61.dist-info/LICENSE +20 -0
- praisonai-2.0.61.dist-info/METADATA +679 -0
- praisonai-2.0.61.dist-info/RECORD +89 -0
- praisonai-2.0.61.dist-info/WHEEL +4 -0
- praisonai-2.0.61.dist-info/entry_points.txt +5 -0
praisonai/test.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
import os
|
|
3
|
+
from rich import print
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
from crewai import Agent, Task, Crew
|
|
6
|
+
load_dotenv()
|
|
7
|
+
import autogen
|
|
8
|
+
config_list = [
|
|
9
|
+
{
|
|
10
|
+
'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini"),
|
|
11
|
+
'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
|
12
|
+
'api_key': os.environ.get("OPENAI_API_KEY")
|
|
13
|
+
}
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
def generate_crew_and_kickoff(agent_file):
|
|
17
|
+
"""
|
|
18
|
+
This function generates a crew of agents and kicks off tasks based on the configuration provided in a YAML file.
|
|
19
|
+
|
|
20
|
+
Parameters:
|
|
21
|
+
agent_file (str): The path to the YAML file containing the configuration for the agents and tasks.
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
str: The result of the last task executed by the crew.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
with open(agent_file, 'r') as f:
|
|
28
|
+
config = yaml.safe_load(f)
|
|
29
|
+
|
|
30
|
+
topic = config['topic']
|
|
31
|
+
framework = config['framework']
|
|
32
|
+
|
|
33
|
+
agents = {}
|
|
34
|
+
tasks = []
|
|
35
|
+
if framework == "autogen":
|
|
36
|
+
# Load the LLM configuration dynamically
|
|
37
|
+
print(config_list)
|
|
38
|
+
llm_config = {"config_list": config_list}
|
|
39
|
+
|
|
40
|
+
for role, details in config['roles'].items():
|
|
41
|
+
agent_name = details['role'].format(topic=topic).replace("{topic}", topic)
|
|
42
|
+
agent_goal = details['goal'].format(topic=topic)
|
|
43
|
+
# Creating an AssistantAgent for each role dynamically
|
|
44
|
+
agents[role] = autogen.AssistantAgent(
|
|
45
|
+
name=agent_name,
|
|
46
|
+
llm_config=llm_config,
|
|
47
|
+
system_message=details['backstory'].format(topic=topic)+". Reply \"TERMINATE\" in the end when everything is done.",
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# Preparing tasks for initiate_chats
|
|
51
|
+
for task_name, task_details in details.get('tasks', {}).items():
|
|
52
|
+
description_filled = task_details['description'].format(topic=topic)
|
|
53
|
+
expected_output_filled = task_details['expected_output'].format(topic=topic)
|
|
54
|
+
|
|
55
|
+
chat_task = {
|
|
56
|
+
"recipient": agents[role],
|
|
57
|
+
"message": description_filled,
|
|
58
|
+
"summary_method": "last_msg", # Customize as needed
|
|
59
|
+
# Additional fields like carryover can be added based on dependencies
|
|
60
|
+
}
|
|
61
|
+
tasks.append(chat_task)
|
|
62
|
+
|
|
63
|
+
# Assuming the user proxy agent is set up as per your requirements
|
|
64
|
+
user = autogen.UserProxyAgent(
|
|
65
|
+
name="User",
|
|
66
|
+
human_input_mode="NEVER",
|
|
67
|
+
is_termination_msg=lambda x: (x.get("content") or "").rstrip().endswith("TERMINATE"),
|
|
68
|
+
code_execution_config={
|
|
69
|
+
"work_dir": "coding",
|
|
70
|
+
"use_docker": False,
|
|
71
|
+
},
|
|
72
|
+
# additional setup for the user proxy agent
|
|
73
|
+
)
|
|
74
|
+
response = user.initiate_chats(tasks)
|
|
75
|
+
result = "### Output ###\n"+response[-1].summary if hasattr(response[-1], 'summary') else ""
|
|
76
|
+
else:
|
|
77
|
+
for role, details in config['roles'].items():
|
|
78
|
+
role_filled = details['role'].format(topic=topic)
|
|
79
|
+
goal_filled = details['goal'].format(topic=topic)
|
|
80
|
+
backstory_filled = details['backstory'].format(topic=topic)
|
|
81
|
+
|
|
82
|
+
# Assume tools are loaded and handled here as per your requirements
|
|
83
|
+
agent = Agent(role=role_filled, goal=goal_filled, backstory=backstory_filled)
|
|
84
|
+
agents[role] = agent
|
|
85
|
+
|
|
86
|
+
for task_name, task_details in details.get('tasks', {}).items():
|
|
87
|
+
description_filled = task_details['description'].format(topic=topic)
|
|
88
|
+
expected_output_filled = task_details['expected_output'].format(topic=topic)
|
|
89
|
+
|
|
90
|
+
task = Task(description=description_filled, expected_output=expected_output_filled, agent=agent)
|
|
91
|
+
tasks.append(task)
|
|
92
|
+
|
|
93
|
+
crew = Crew(
|
|
94
|
+
agents=list(agents.values()),
|
|
95
|
+
tasks=tasks,
|
|
96
|
+
verbose=2
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
result = crew.kickoff()
|
|
100
|
+
return result
|
|
101
|
+
|
|
102
|
+
if __name__ == "__main__":
|
|
103
|
+
agent_file = "agents.yaml"
|
|
104
|
+
result = generate_crew_and_kickoff(agent_file)
|
|
105
|
+
print(result)
|
praisonai/train.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import yaml
|
|
5
|
+
import torch
|
|
6
|
+
import shutil
|
|
7
|
+
from transformers import TextStreamer
|
|
8
|
+
from unsloth import FastLanguageModel, is_bfloat16_supported
|
|
9
|
+
from trl import SFTTrainer
|
|
10
|
+
from transformers import TrainingArguments
|
|
11
|
+
from datasets import load_dataset, concatenate_datasets, Dataset
|
|
12
|
+
from psutil import virtual_memory
|
|
13
|
+
|
|
14
|
+
class train:
|
|
15
|
+
def __init__(self, config_path="config.yaml"):
|
|
16
|
+
self.load_config(config_path)
|
|
17
|
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
18
|
+
self.model, self.tokenizer = None, None
|
|
19
|
+
|
|
20
|
+
def load_config(self, path):
|
|
21
|
+
with open(path, "r") as file:
|
|
22
|
+
self.config = yaml.safe_load(file)
|
|
23
|
+
|
|
24
|
+
def print_system_info(self):
|
|
25
|
+
print(f"PyTorch version: {torch.__version__}")
|
|
26
|
+
print(f"CUDA version: {torch.version.cuda}")
|
|
27
|
+
if torch.cuda.is_available():
|
|
28
|
+
device_capability = torch.cuda.get_device_capability()
|
|
29
|
+
print(f"CUDA Device Capability: {device_capability}")
|
|
30
|
+
else:
|
|
31
|
+
print("CUDA is not available")
|
|
32
|
+
|
|
33
|
+
python_version = sys.version
|
|
34
|
+
pip_version = subprocess.check_output(['pip', '--version']).decode().strip()
|
|
35
|
+
python_path = sys.executable
|
|
36
|
+
pip_path = subprocess.check_output(['which', 'pip']).decode().strip()
|
|
37
|
+
print(f"Python Version: {python_version}")
|
|
38
|
+
print(f"Pip Version: {pip_version}")
|
|
39
|
+
print(f"Python Path: {python_path}")
|
|
40
|
+
print(f"Pip Path: {pip_path}")
|
|
41
|
+
|
|
42
|
+
def check_gpu(self):
|
|
43
|
+
gpu_stats = torch.cuda.get_device_properties(0)
|
|
44
|
+
print(f"GPU = {gpu_stats.name}. Max memory = {round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)} GB.")
|
|
45
|
+
|
|
46
|
+
def check_ram(self):
|
|
47
|
+
ram_gb = virtual_memory().total / 1e9
|
|
48
|
+
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
|
|
49
|
+
if ram_gb < 20:
|
|
50
|
+
print('Not using a high-RAM runtime')
|
|
51
|
+
else:
|
|
52
|
+
print('You are using a high-RAM runtime!')
|
|
53
|
+
|
|
54
|
+
# def install_packages(self):
|
|
55
|
+
# subprocess.run(["pip", "install", "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git@4e570be9ae4ced8cdc64e498125708e34942befc"])
|
|
56
|
+
# subprocess.run(["pip", "install", "--no-deps", "trl<0.9.0", "peft==0.12.0", "accelerate==0.33.0", "bitsandbytes==0.43.3"])
|
|
57
|
+
|
|
58
|
+
def prepare_model(self):
|
|
59
|
+
self.model, self.tokenizer = FastLanguageModel.from_pretrained(
|
|
60
|
+
model_name=self.config["model_name"],
|
|
61
|
+
max_seq_length=self.config["max_seq_length"],
|
|
62
|
+
dtype=None,
|
|
63
|
+
load_in_4bit=self.config["load_in_4bit"]
|
|
64
|
+
)
|
|
65
|
+
self.model = FastLanguageModel.get_peft_model(
|
|
66
|
+
self.model,
|
|
67
|
+
r=self.config["lora_r"],
|
|
68
|
+
target_modules=self.config["lora_target_modules"],
|
|
69
|
+
lora_alpha=self.config["lora_alpha"],
|
|
70
|
+
lora_dropout=self.config["lora_dropout"],
|
|
71
|
+
bias=self.config["lora_bias"],
|
|
72
|
+
use_gradient_checkpointing=self.config["use_gradient_checkpointing"],
|
|
73
|
+
random_state=self.config["random_state"],
|
|
74
|
+
use_rslora=self.config["use_rslora"],
|
|
75
|
+
loftq_config=self.config["loftq_config"],
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def process_dataset(self, dataset_info):
|
|
79
|
+
dataset_name = dataset_info["name"]
|
|
80
|
+
split_type = dataset_info.get("split_type", "train")
|
|
81
|
+
processing_func = getattr(self, dataset_info.get("processing_func", "format_prompts"))
|
|
82
|
+
rename = dataset_info.get("rename", {})
|
|
83
|
+
filter_data = dataset_info.get("filter_data", False)
|
|
84
|
+
filter_column_value = dataset_info.get("filter_column_value", "id")
|
|
85
|
+
filter_value = dataset_info.get("filter_value", "alpaca")
|
|
86
|
+
num_samples = dataset_info.get("num_samples", 20000)
|
|
87
|
+
|
|
88
|
+
dataset = load_dataset(dataset_name, split=split_type)
|
|
89
|
+
|
|
90
|
+
if rename:
|
|
91
|
+
dataset = dataset.rename_columns(rename)
|
|
92
|
+
if filter_data:
|
|
93
|
+
dataset = dataset.filter(lambda example: filter_value in example[filter_column_value]).shuffle(seed=42).select(range(num_samples))
|
|
94
|
+
dataset = dataset.map(processing_func, batched=True)
|
|
95
|
+
return dataset
|
|
96
|
+
|
|
97
|
+
def format_prompts(self, examples):
|
|
98
|
+
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
|
99
|
+
|
|
100
|
+
### Instruction:
|
|
101
|
+
{}
|
|
102
|
+
|
|
103
|
+
### Input:
|
|
104
|
+
{}
|
|
105
|
+
|
|
106
|
+
### Response:
|
|
107
|
+
{}"""
|
|
108
|
+
texts = [alpaca_prompt.format(ins, inp, out) + self.tokenizer.eos_token for ins, inp, out in zip(examples["instruction"], examples["input"], examples["output"])]
|
|
109
|
+
return {"text": texts}
|
|
110
|
+
|
|
111
|
+
def load_datasets(self):
|
|
112
|
+
datasets = []
|
|
113
|
+
for dataset_info in self.config["dataset"]:
|
|
114
|
+
datasets.append(self.process_dataset(dataset_info))
|
|
115
|
+
return concatenate_datasets(datasets)
|
|
116
|
+
|
|
117
|
+
def train_model(self):
|
|
118
|
+
dataset = self.load_datasets()
|
|
119
|
+
trainer = SFTTrainer(
|
|
120
|
+
model=self.model,
|
|
121
|
+
tokenizer=self.tokenizer,
|
|
122
|
+
train_dataset=dataset,
|
|
123
|
+
dataset_text_field=self.config["dataset_text_field"],
|
|
124
|
+
max_seq_length=self.config["max_seq_length"],
|
|
125
|
+
dataset_num_proc=self.config["dataset_num_proc"],
|
|
126
|
+
packing=self.config["packing"],
|
|
127
|
+
args=TrainingArguments(
|
|
128
|
+
per_device_train_batch_size=self.config["per_device_train_batch_size"],
|
|
129
|
+
gradient_accumulation_steps=self.config["gradient_accumulation_steps"],
|
|
130
|
+
warmup_steps=self.config["warmup_steps"],
|
|
131
|
+
num_train_epochs=self.config["num_train_epochs"],
|
|
132
|
+
max_steps=self.config["max_steps"],
|
|
133
|
+
learning_rate=self.config["learning_rate"],
|
|
134
|
+
fp16=not is_bfloat16_supported(),
|
|
135
|
+
bf16=is_bfloat16_supported(),
|
|
136
|
+
logging_steps=self.config["logging_steps"],
|
|
137
|
+
optim=self.config["optim"],
|
|
138
|
+
weight_decay=self.config["weight_decay"],
|
|
139
|
+
lr_scheduler_type=self.config["lr_scheduler_type"],
|
|
140
|
+
seed=self.config["seed"],
|
|
141
|
+
output_dir=self.config["output_dir"],
|
|
142
|
+
),
|
|
143
|
+
)
|
|
144
|
+
trainer.train()
|
|
145
|
+
self.model.save_pretrained("lora_model") # Local saving
|
|
146
|
+
self.tokenizer.save_pretrained("lora_model")
|
|
147
|
+
|
|
148
|
+
def inference(self, instruction, input_text):
|
|
149
|
+
FastLanguageModel.for_inference(self.model)
|
|
150
|
+
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
|
151
|
+
|
|
152
|
+
### Instruction:
|
|
153
|
+
{}
|
|
154
|
+
|
|
155
|
+
### Input:
|
|
156
|
+
{}
|
|
157
|
+
|
|
158
|
+
### Response:
|
|
159
|
+
{}"""
|
|
160
|
+
inputs = self.tokenizer([alpaca_prompt.format(instruction, input_text, "")], return_tensors="pt").to("cuda")
|
|
161
|
+
outputs = self.model.generate(**inputs, max_new_tokens=64, use_cache=True)
|
|
162
|
+
print(self.tokenizer.batch_decode(outputs))
|
|
163
|
+
|
|
164
|
+
def load_model(self):
|
|
165
|
+
"""Loads the model and tokenizer using the FastLanguageModel library."""
|
|
166
|
+
from unsloth import FastLanguageModel
|
|
167
|
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
|
168
|
+
model_name=self.config["output_dir"],
|
|
169
|
+
max_seq_length=2048,
|
|
170
|
+
dtype=None,
|
|
171
|
+
load_in_4bit=self.config["load_in_4bit"],
|
|
172
|
+
)
|
|
173
|
+
return model, tokenizer
|
|
174
|
+
|
|
175
|
+
def save_model_merged(self):
|
|
176
|
+
if os.path.exists(self.config["hf_model_name"]):
|
|
177
|
+
shutil.rmtree(self.config["hf_model_name"])
|
|
178
|
+
self.model.push_to_hub_merged(
|
|
179
|
+
self.config["hf_model_name"],
|
|
180
|
+
self.tokenizer,
|
|
181
|
+
save_method="merged_16bit",
|
|
182
|
+
token=os.getenv('HF_TOKEN')
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
def push_model_gguf(self):
|
|
186
|
+
self.model.push_to_hub_gguf(
|
|
187
|
+
self.config["hf_model_name"],
|
|
188
|
+
self.tokenizer,
|
|
189
|
+
quantization_method=self.config["quantization_method"],
|
|
190
|
+
token=os.getenv('HF_TOKEN')
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
def save_model_gguf(self):
|
|
194
|
+
self.model.save_pretrained_gguf(
|
|
195
|
+
self.config["hf_model_name"],
|
|
196
|
+
self.tokenizer,
|
|
197
|
+
quantization_method="q4_k_m"
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
def prepare_modelfile_content(self):
|
|
201
|
+
output_model = self.config["hf_model_name"]
|
|
202
|
+
gguf_path = f"{output_model}/unsloth.Q4_K_M.gguf"
|
|
203
|
+
|
|
204
|
+
# Check if the GGUF file exists. If not, generate it ## TODO Multiple Quantisation other than Q4_K_M.gguf
|
|
205
|
+
if not os.path.exists(gguf_path):
|
|
206
|
+
self.model, self.tokenizer = self.load_model()
|
|
207
|
+
self.save_model_gguf()
|
|
208
|
+
return f"""FROM {output_model}/unsloth.Q4_K_M.gguf
|
|
209
|
+
|
|
210
|
+
TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
|
|
211
|
+
|
|
212
|
+
### Instruction:
|
|
213
|
+
{{{{ .Prompt }}}}
|
|
214
|
+
|
|
215
|
+
{{{{ end }}}}### Response:
|
|
216
|
+
{{{{ .Response }}}}\"\"\"
|
|
217
|
+
|
|
218
|
+
PARAMETER stop ""
|
|
219
|
+
PARAMETER stop ""
|
|
220
|
+
PARAMETER stop ""
|
|
221
|
+
PARAMETER stop ""
|
|
222
|
+
PARAMETER stop "<|reserved_special_token_"
|
|
223
|
+
"""
|
|
224
|
+
|
|
225
|
+
def create_and_push_ollama_model(self):
|
|
226
|
+
modelfile_content = self.prepare_modelfile_content()
|
|
227
|
+
with open('Modelfile', 'w') as file:
|
|
228
|
+
file.write(modelfile_content)
|
|
229
|
+
|
|
230
|
+
subprocess.run(["ollama", "serve"])
|
|
231
|
+
subprocess.run(["ollama", "create", f"{self.config['ollama_model']}:{self.config['model_parameters']}", "-f", "Modelfile"])
|
|
232
|
+
subprocess.run(["ollama", "push", f"{self.config['ollama_model']}:{self.config['model_parameters']}"])
|
|
233
|
+
|
|
234
|
+
def run(self):
|
|
235
|
+
self.print_system_info()
|
|
236
|
+
self.check_gpu()
|
|
237
|
+
self.check_ram()
|
|
238
|
+
# self.install_packages()
|
|
239
|
+
if self.config.get("train", "true").lower() == "true":
|
|
240
|
+
self.prepare_model()
|
|
241
|
+
self.train_model()
|
|
242
|
+
|
|
243
|
+
if self.config.get("huggingface_save", "true").lower() == "true":
|
|
244
|
+
# self.model, self.tokenizer = self.load_model()
|
|
245
|
+
self.save_model_merged()
|
|
246
|
+
|
|
247
|
+
if self.config.get("huggingface_save_gguf", "true").lower() == "true":
|
|
248
|
+
# self.model, self.tokenizer = self.load_model()
|
|
249
|
+
self.push_model_gguf()
|
|
250
|
+
|
|
251
|
+
# if self.config.get("save_gguf", "true").lower() == "true": ## TODO
|
|
252
|
+
# self.model, self.tokenizer = self.load_model()
|
|
253
|
+
# self.save_model_gguf()
|
|
254
|
+
|
|
255
|
+
# if self.config.get("save_merged", "true").lower() == "true": ## TODO
|
|
256
|
+
# self.model, self.tokenizer = self.load_model()
|
|
257
|
+
# self.save_model_merged()
|
|
258
|
+
|
|
259
|
+
if self.config.get("ollama_save", "true").lower() == "true":
|
|
260
|
+
self.create_and_push_ollama_model()
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def main():
|
|
264
|
+
import argparse
|
|
265
|
+
parser = argparse.ArgumentParser(description='PraisonAI Training Script')
|
|
266
|
+
parser.add_argument('command', choices=['train'], help='Command to execute')
|
|
267
|
+
parser.add_argument('--config', default='config.yaml', help='Path to configuration file')
|
|
268
|
+
args = parser.parse_args()
|
|
269
|
+
|
|
270
|
+
if args.command == 'train':
|
|
271
|
+
ai = train(config_path=args.config)
|
|
272
|
+
ai.run()
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
if __name__ == '__main__':
|
|
276
|
+
main()
|
praisonai/ui/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Changes to DB
|
|
2
|
+
|
|
3
|
+
The following columns are renamed or modified between the first and second versions of the code:
|
|
4
|
+
|
|
5
|
+
| Table Name | Original Column Name | New Column Name |
|
|
6
|
+
|-------------|----------------------------|--------------------|
|
|
7
|
+
| `users` | `metadata` | `meta` |
|
|
8
|
+
| `users` | `created_at` | `createdAt` |
|
|
9
|
+
| `threads` | `metadata` | `meta` |
|
|
10
|
+
| `threads` | `created_at` | `createdAt` |
|
|
11
|
+
| `steps` | `metadata` | `meta` |
|
|
12
|
+
| `steps` | `start_time` | `startTime` |
|
|
13
|
+
| `steps` | `end_time` | `endTime` |
|
|
14
|
+
| `elements` | `metadata` | (Removed) |
|
|
15
|
+
|
|
16
|
+
Key changes:
|
|
17
|
+
1. The `metadata` column in several tables is renamed to `meta`.
|
|
18
|
+
2. Timestamps (`created_at`, `start_time`, and `end_time`) are renamed to PascalCase (`createdAt`, `startTime`, and `endTime`).
|
|
19
|
+
3. Some columns are removed (e.g., `metadata` in `elements`).
|
|
20
|
+
|
|
21
|
+
These changes make the column names consistent and follow a specific naming convention.
|