PraisonAI 2.0.63__tar.gz → 2.0.65__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- {praisonai-2.0.63 → praisonai-2.0.65}/PKG-INFO +3 -1
- {praisonai-2.0.63 → praisonai-2.0.65}/README.md +2 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/cli.py +14 -4
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/deploy.py +1 -1
- praisonai-2.0.65/praisonai/setup/setup_conda_env.sh +105 -0
- praisonai-2.0.65/praisonai/train.py +374 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/pyproject.toml +2 -2
- praisonai-2.0.63/praisonai/setup/setup_conda_env.sh +0 -72
- praisonai-2.0.63/praisonai/train.py +0 -276
- {praisonai-2.0.63 → praisonai-2.0.65}/LICENSE +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/__init__.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/__main__.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/agents_generator.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/api/call.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/auto.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/chainlit_ui.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/inbuilt_tools/__init__.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/inc/__init__.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/inc/config.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/inc/models.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/android-chrome-192x192.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/android-chrome-512x512.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/apple-touch-icon.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/fantasy.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/favicon-16x16.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/favicon-32x32.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/favicon.ico +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/game.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/logo_dark.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/logo_light.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/movie.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/praison-ai-agents-architecture.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/thriller.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/setup/__init__.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/setup/build.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/setup/config.yaml +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/setup/post_install.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/setup/setup_conda_env.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/setup.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/test.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/README.md +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/agents.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/callbacks.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/chat.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/code.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/colab.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/colab_chainlit.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/components/aicoder.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/config.toml +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/bn.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/en-US.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/gu.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/he-IL.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/hi.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/kn.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/ml.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/mr.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/ta.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/te.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/.chainlit/translations/zh-CN.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/chainlit.md +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/bn.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/en-US.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/gu.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/he-IL.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/hi.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/kn.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/ml.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/mr.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/ta.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/te.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/config/translations/zh-CN.json +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/context.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/db.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/fantasy.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/game.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/logo_dark.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/logo_light.png +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/movie.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/praison.css +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/public/thriller.svg +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/realtime.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/realtimeclient/__init__.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/realtimeclient/realtimedocs.txt +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/realtimeclient/tools.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/sql_alchemy.py +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/ui/tools.md +0 -0
- {praisonai-2.0.63 → praisonai-2.0.65}/praisonai/version.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.65
|
|
4
4
|
Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10,<3.13
|
|
@@ -208,6 +208,8 @@ const agent = new Agent({ instructions: 'You are a helpful AI assistant' });
|
|
|
208
208
|
agent.start('Write a movie script about a robot in Mars');
|
|
209
209
|
```
|
|
210
210
|
|
|
211
|
+

|
|
212
|
+
|
|
211
213
|
## AI Agents Flow
|
|
212
214
|
|
|
213
215
|
```mermaid
|
|
@@ -118,6 +118,8 @@ const agent = new Agent({ instructions: 'You are a helpful AI assistant' });
|
|
|
118
118
|
agent.start('Write a movie script about a robot in Mars');
|
|
119
119
|
```
|
|
120
120
|
|
|
121
|
+

|
|
122
|
+
|
|
121
123
|
## AI Agents Flow
|
|
122
124
|
|
|
123
125
|
```mermaid
|
|
@@ -25,7 +25,7 @@ CALL_MODULE_AVAILABLE = False
|
|
|
25
25
|
CREWAI_AVAILABLE = False
|
|
26
26
|
AUTOGEN_AVAILABLE = False
|
|
27
27
|
PRAISONAI_AVAILABLE = False
|
|
28
|
-
|
|
28
|
+
TRAIN_AVAILABLE = False
|
|
29
29
|
try:
|
|
30
30
|
# Create necessary directories and set CHAINLIT_APP_ROOT
|
|
31
31
|
if "CHAINLIT_APP_ROOT" not in os.environ:
|
|
@@ -72,6 +72,12 @@ try:
|
|
|
72
72
|
except ImportError:
|
|
73
73
|
pass
|
|
74
74
|
|
|
75
|
+
try:
|
|
76
|
+
import accelerate
|
|
77
|
+
TRAIN_AVAILABLE = True
|
|
78
|
+
except ImportError:
|
|
79
|
+
pass
|
|
80
|
+
|
|
75
81
|
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'), format='%(asctime)s - %(levelname)s - %(message)s')
|
|
76
82
|
logging.getLogger('alembic').setLevel(logging.ERROR)
|
|
77
83
|
logging.getLogger('gradio').setLevel(logging.ERROR)
|
|
@@ -393,9 +399,13 @@ class PraisonAI:
|
|
|
393
399
|
sys.exit(0)
|
|
394
400
|
|
|
395
401
|
elif args.command == 'train':
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
402
|
+
if not TRAIN_AVAILABLE:
|
|
403
|
+
print("[red]ERROR: Train feature is not installed. Install with:[/red]")
|
|
404
|
+
print("\npip install \"praisonai[train]\"\n")
|
|
405
|
+
sys.exit(1)
|
|
406
|
+
package_root = os.path.dirname(os.path.abspath(__file__))
|
|
407
|
+
config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
|
|
408
|
+
|
|
399
409
|
|
|
400
410
|
elif args.command == 'ui':
|
|
401
411
|
if not CHAINLIT_AVAILABLE:
|
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==2.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==2.0.65 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Detect OS and architecture
|
|
4
|
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
5
|
+
# macOS
|
|
6
|
+
if [[ $(uname -m) == 'arm64' ]]; then
|
|
7
|
+
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh"
|
|
8
|
+
else
|
|
9
|
+
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh"
|
|
10
|
+
fi
|
|
11
|
+
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
12
|
+
# Linux
|
|
13
|
+
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh"
|
|
14
|
+
|
|
15
|
+
# Install libcurl development package if not present (Debian based)
|
|
16
|
+
if command -v dpkg &> /dev/null; then
|
|
17
|
+
if ! dpkg -s libcurl4-openssl-dev &> /dev/null; then
|
|
18
|
+
echo "libcurl4-openssl-dev is not installed. Installing..."
|
|
19
|
+
sudo apt-get update
|
|
20
|
+
sudo apt-get install -y libcurl4-openssl-dev
|
|
21
|
+
else
|
|
22
|
+
echo "libcurl4-openssl-dev is already installed."
|
|
23
|
+
fi
|
|
24
|
+
else
|
|
25
|
+
echo "Non-Debian based Linux detected. Please ensure libcurl development libraries are installed."
|
|
26
|
+
fi
|
|
27
|
+
|
|
28
|
+
# Check if ollama is installed and executable; if not, install it
|
|
29
|
+
if ! command -v ollama &> /dev/null; then
|
|
30
|
+
echo "Ollama is not installed. Installing Ollama..."
|
|
31
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
32
|
+
|
|
33
|
+
# Generate SSH key non-interactively only if it doesn't already exist
|
|
34
|
+
if [ ! -f ~/.ssh/id_ed25519 ]; then
|
|
35
|
+
echo "Generating SSH key for Ollama..."
|
|
36
|
+
ssh-keygen -t ed25519 -N "" -f ~/.ssh/id_ed25519 -q
|
|
37
|
+
else
|
|
38
|
+
echo "SSH key ~/.ssh/id_ed25519 already exists. Skipping generation."
|
|
39
|
+
fi
|
|
40
|
+
echo "Copying SSH key to /usr/share/ollama/.ollama..."
|
|
41
|
+
sudo cp ~/.ssh/id_ed25519 /usr/share/ollama/.ollama
|
|
42
|
+
else
|
|
43
|
+
echo "Ollama is already installed."
|
|
44
|
+
fi
|
|
45
|
+
|
|
46
|
+
elif [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
|
|
47
|
+
# Windows
|
|
48
|
+
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe"
|
|
49
|
+
echo "Windows detected. Please run this script in Git Bash or WSL."
|
|
50
|
+
exit 1
|
|
51
|
+
else
|
|
52
|
+
echo "Unsupported operating system: $OSTYPE"
|
|
53
|
+
exit 1
|
|
54
|
+
fi
|
|
55
|
+
|
|
56
|
+
# Check if conda is installed
|
|
57
|
+
if ! command -v conda &> /dev/null; then
|
|
58
|
+
echo "Conda is not installed. Installing Miniconda..."
|
|
59
|
+
wget "$MINICONDA_URL" -O ~/miniconda.sh
|
|
60
|
+
bash ~/miniconda.sh -b -p "$HOME/miniconda"
|
|
61
|
+
source "$HOME/miniconda/bin/activate"
|
|
62
|
+
conda init
|
|
63
|
+
else
|
|
64
|
+
echo "Conda is already installed."
|
|
65
|
+
fi
|
|
66
|
+
|
|
67
|
+
# Create and activate the Conda environment
|
|
68
|
+
ENV_NAME="praison_env"
|
|
69
|
+
if conda info --envs | grep -q "$ENV_NAME"; then
|
|
70
|
+
echo "Environment $ENV_NAME already exists. Recreating..."
|
|
71
|
+
conda env remove -y -n "$ENV_NAME"
|
|
72
|
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
73
|
+
conda create --name "$ENV_NAME" python=3.10 pytorch=2.3.0 -c pytorch -y
|
|
74
|
+
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
75
|
+
conda create --name "$ENV_NAME" python=3.10 pytorch=2.3.0 cudatoolkit=11.8 -c pytorch -c nvidia -y
|
|
76
|
+
fi
|
|
77
|
+
else
|
|
78
|
+
echo "Creating new environment $ENV_NAME..."
|
|
79
|
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
80
|
+
conda create --name "$ENV_NAME" python=3.10 pytorch=2.3.0 -c pytorch -y
|
|
81
|
+
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
82
|
+
conda create --name "$ENV_NAME" python=3.10 pytorch=2.3.0 cudatoolkit=11.8 -c pytorch -c nvidia -y
|
|
83
|
+
fi
|
|
84
|
+
fi
|
|
85
|
+
|
|
86
|
+
# Activate the environment
|
|
87
|
+
source "$HOME/miniconda/bin/activate" "$ENV_NAME"
|
|
88
|
+
|
|
89
|
+
# Install cmake via conda
|
|
90
|
+
echo "Installing cmake..."
|
|
91
|
+
conda install -y cmake
|
|
92
|
+
|
|
93
|
+
# Get full path of pip within the activated environment
|
|
94
|
+
PIP_FULL_PATH=$(conda run -n "$ENV_NAME" which pip)
|
|
95
|
+
|
|
96
|
+
# Install other packages using pip
|
|
97
|
+
$PIP_FULL_PATH install --upgrade pip
|
|
98
|
+
$PIP_FULL_PATH install "xformers==0.0.26.post1"
|
|
99
|
+
$PIP_FULL_PATH install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git@038e6d4c8d40207a87297ab3aaf787c19b1006d1"
|
|
100
|
+
$PIP_FULL_PATH install --no-deps "trl<0.9.0" peft accelerate bitsandbytes
|
|
101
|
+
$PIP_FULL_PATH install unsloth_zoo
|
|
102
|
+
$PIP_FULL_PATH install cut_cross_entropy
|
|
103
|
+
$PIP_FULL_PATH install sentencepiece protobuf datasets huggingface_hub hf_transfer
|
|
104
|
+
|
|
105
|
+
echo "Setup completed successfully!"
|
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
This script finetunes a model using Unsloth’s fast training framework.
|
|
5
|
+
It supports both ShareGPT and Alpaca‑style datasets by converting raw conversation
|
|
6
|
+
data into plain-text prompts using a chat template, then pre‑tokenizing the prompts.
|
|
7
|
+
Extra debug logging is added to help trace the root cause of errors.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import yaml
|
|
13
|
+
import torch
|
|
14
|
+
import shutil
|
|
15
|
+
import subprocess
|
|
16
|
+
from transformers import TextStreamer
|
|
17
|
+
from unsloth import FastLanguageModel, is_bfloat16_supported
|
|
18
|
+
from trl import SFTTrainer
|
|
19
|
+
from transformers import TrainingArguments
|
|
20
|
+
from datasets import load_dataset, concatenate_datasets
|
|
21
|
+
from psutil import virtual_memory
|
|
22
|
+
from unsloth.chat_templates import standardize_sharegpt, get_chat_template
|
|
23
|
+
from functools import partial
|
|
24
|
+
|
|
25
|
+
#####################################
|
|
26
|
+
# Step 1: Formatting Raw Conversations
|
|
27
|
+
#####################################
|
|
28
|
+
def formatting_prompts_func(examples, tokenizer):
|
|
29
|
+
"""
|
|
30
|
+
Converts each example’s conversation into a single plain-text prompt.
|
|
31
|
+
If the example has a "conversations" field, process it as ShareGPT-style.
|
|
32
|
+
Otherwise, assume Alpaca-style data with "instruction", "input", and "output" fields.
|
|
33
|
+
"""
|
|
34
|
+
print("DEBUG: formatting_prompts_func() received batch with keys:", list(examples.keys()))
|
|
35
|
+
texts = []
|
|
36
|
+
# Check if the example has a "conversations" field.
|
|
37
|
+
if "conversations" in examples:
|
|
38
|
+
for convo in examples["conversations"]:
|
|
39
|
+
try:
|
|
40
|
+
formatted = tokenizer.apply_chat_template(
|
|
41
|
+
convo,
|
|
42
|
+
tokenize=False, # Return a plain string
|
|
43
|
+
add_generation_prompt=False
|
|
44
|
+
)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
print(f"ERROR in apply_chat_template (conversations): {e}")
|
|
47
|
+
formatted = ""
|
|
48
|
+
# Flatten list if necessary
|
|
49
|
+
if isinstance(formatted, list):
|
|
50
|
+
formatted = formatted[0] if len(formatted) == 1 else "\n".join(formatted)
|
|
51
|
+
texts.append(formatted)
|
|
52
|
+
else:
|
|
53
|
+
# Assume Alpaca format: use "instruction", "input", and "output" keys.
|
|
54
|
+
instructions = examples.get("instruction", [])
|
|
55
|
+
inputs_list = examples.get("input", [])
|
|
56
|
+
outputs_list = examples.get("output", [])
|
|
57
|
+
# If any field is missing, replace with empty string.
|
|
58
|
+
for ins, inp, out in zip(instructions, inputs_list, outputs_list):
|
|
59
|
+
# Create a conversation-like structure.
|
|
60
|
+
convo = [
|
|
61
|
+
{"role": "user", "content": ins + (f"\nInput: {inp}" if inp.strip() != "" else "")},
|
|
62
|
+
{"role": "assistant", "content": out}
|
|
63
|
+
]
|
|
64
|
+
try:
|
|
65
|
+
formatted = tokenizer.apply_chat_template(
|
|
66
|
+
convo,
|
|
67
|
+
tokenize=False,
|
|
68
|
+
add_generation_prompt=False
|
|
69
|
+
)
|
|
70
|
+
except Exception as e:
|
|
71
|
+
print(f"ERROR in apply_chat_template (alpaca): {e}")
|
|
72
|
+
formatted = ""
|
|
73
|
+
if isinstance(formatted, list):
|
|
74
|
+
formatted = formatted[0] if len(formatted) == 1 else "\n".join(formatted)
|
|
75
|
+
texts.append(formatted)
|
|
76
|
+
if texts:
|
|
77
|
+
print("DEBUG: Raw texts sample (first 200 chars):", texts[0][:200])
|
|
78
|
+
return {"text": texts}
|
|
79
|
+
|
|
80
|
+
#####################################
|
|
81
|
+
# Step 2: Tokenizing the Prompts
|
|
82
|
+
#####################################
|
|
83
|
+
def tokenize_function(examples, hf_tokenizer, max_length):
|
|
84
|
+
"""
|
|
85
|
+
Tokenizes a batch of text prompts with padding and truncation enabled.
|
|
86
|
+
"""
|
|
87
|
+
flat_texts = []
|
|
88
|
+
for t in examples["text"]:
|
|
89
|
+
if isinstance(t, list):
|
|
90
|
+
t = t[0] if len(t) == 1 else " ".join(t)
|
|
91
|
+
flat_texts.append(t)
|
|
92
|
+
print("DEBUG: Tokenizing a batch of size:", len(flat_texts))
|
|
93
|
+
tokenized = hf_tokenizer(
|
|
94
|
+
flat_texts,
|
|
95
|
+
padding="max_length",
|
|
96
|
+
truncation=True,
|
|
97
|
+
max_length=max_length,
|
|
98
|
+
return_tensors="pt",
|
|
99
|
+
)
|
|
100
|
+
tokenized = {key: value.tolist() for key, value in tokenized.items()}
|
|
101
|
+
sample_key = list(tokenized.keys())[0]
|
|
102
|
+
print("DEBUG: Tokenized sample (first 10 tokens of", sample_key, "):", tokenized[sample_key][0][:10])
|
|
103
|
+
return tokenized
|
|
104
|
+
|
|
105
|
+
#####################################
|
|
106
|
+
# Main Training Class
|
|
107
|
+
#####################################
|
|
108
|
+
class TrainModel:
|
|
109
|
+
def __init__(self, config_path="config.yaml"):
|
|
110
|
+
self.load_config(config_path)
|
|
111
|
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
112
|
+
self.model = None
|
|
113
|
+
self.hf_tokenizer = None # The underlying HF tokenizer
|
|
114
|
+
self.chat_tokenizer = None # Chat wrapper for formatting
|
|
115
|
+
|
|
116
|
+
def load_config(self, path):
|
|
117
|
+
with open(path, "r") as file:
|
|
118
|
+
self.config = yaml.safe_load(file)
|
|
119
|
+
print("DEBUG: Loaded config:", self.config)
|
|
120
|
+
|
|
121
|
+
def print_system_info(self):
|
|
122
|
+
print("DEBUG: PyTorch version:", torch.__version__)
|
|
123
|
+
print("DEBUG: CUDA version:", torch.version.cuda)
|
|
124
|
+
if torch.cuda.is_available():
|
|
125
|
+
print("DEBUG: CUDA Device Capability:", torch.cuda.get_device_capability())
|
|
126
|
+
else:
|
|
127
|
+
print("DEBUG: CUDA is not available")
|
|
128
|
+
print("DEBUG: Python Version:", sys.version)
|
|
129
|
+
print("DEBUG: Python Path:", sys.executable)
|
|
130
|
+
|
|
131
|
+
def check_gpu(self):
|
|
132
|
+
gpu_stats = torch.cuda.get_device_properties(0)
|
|
133
|
+
print(f"DEBUG: GPU = {gpu_stats.name}. Max memory = {round(gpu_stats.total_memory/(1024**3),3)} GB.")
|
|
134
|
+
|
|
135
|
+
def check_ram(self):
|
|
136
|
+
ram_gb = virtual_memory().total / 1e9
|
|
137
|
+
print(f"DEBUG: Your runtime has {ram_gb:.1f} gigabytes of available RAM")
|
|
138
|
+
if ram_gb < 20:
|
|
139
|
+
print("DEBUG: Not using a high-RAM runtime")
|
|
140
|
+
else:
|
|
141
|
+
print("DEBUG: You are using a high-RAM runtime!")
|
|
142
|
+
|
|
143
|
+
def prepare_model(self):
|
|
144
|
+
print("DEBUG: Preparing model and tokenizer...")
|
|
145
|
+
self.model, original_tokenizer = FastLanguageModel.from_pretrained(
|
|
146
|
+
model_name=self.config["model_name"],
|
|
147
|
+
max_seq_length=self.config["max_seq_length"],
|
|
148
|
+
dtype=None,
|
|
149
|
+
load_in_4bit=self.config["load_in_4bit"],
|
|
150
|
+
)
|
|
151
|
+
print("DEBUG: Model and original tokenizer loaded.")
|
|
152
|
+
if original_tokenizer.pad_token is None:
|
|
153
|
+
original_tokenizer.pad_token = original_tokenizer.eos_token
|
|
154
|
+
original_tokenizer.model_max_length = self.config["max_seq_length"]
|
|
155
|
+
self.chat_tokenizer = get_chat_template(original_tokenizer, chat_template="llama-3.1")
|
|
156
|
+
self.hf_tokenizer = original_tokenizer
|
|
157
|
+
print("DEBUG: Chat tokenizer created; HF tokenizer saved.")
|
|
158
|
+
self.model = FastLanguageModel.get_peft_model(
|
|
159
|
+
self.model,
|
|
160
|
+
r=16,
|
|
161
|
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
|
|
162
|
+
lora_alpha=16,
|
|
163
|
+
lora_dropout=0,
|
|
164
|
+
bias="none",
|
|
165
|
+
use_gradient_checkpointing="unsloth",
|
|
166
|
+
random_state=3407,
|
|
167
|
+
use_rslora=False,
|
|
168
|
+
loftq_config=None,
|
|
169
|
+
)
|
|
170
|
+
print("DEBUG: LoRA adapters added.")
|
|
171
|
+
|
|
172
|
+
def process_dataset(self, dataset_info):
|
|
173
|
+
dataset_name = dataset_info["name"]
|
|
174
|
+
split_type = dataset_info.get("split_type", "train")
|
|
175
|
+
print(f"DEBUG: Loading dataset '{dataset_name}' split '{split_type}'...")
|
|
176
|
+
dataset = load_dataset(dataset_name, split=split_type)
|
|
177
|
+
print("DEBUG: Dataset columns:", dataset.column_names)
|
|
178
|
+
if "conversations" in dataset.column_names:
|
|
179
|
+
print("DEBUG: Standardizing dataset (ShareGPT style)...")
|
|
180
|
+
dataset = standardize_sharegpt(dataset)
|
|
181
|
+
else:
|
|
182
|
+
print("DEBUG: Dataset does not have 'conversations'; assuming Alpaca format.")
|
|
183
|
+
print("DEBUG: Applying formatting function to dataset...")
|
|
184
|
+
format_func = partial(formatting_prompts_func, tokenizer=self.chat_tokenizer)
|
|
185
|
+
dataset = dataset.map(format_func, batched=True, remove_columns=dataset.column_names)
|
|
186
|
+
sample = dataset[0]
|
|
187
|
+
print("DEBUG: Sample processed example keys:", list(sample.keys()))
|
|
188
|
+
if "text" in sample:
|
|
189
|
+
print("DEBUG: Sample processed 'text' type:", type(sample["text"]))
|
|
190
|
+
print("DEBUG: Sample processed 'text' content (first 200 chars):", sample["text"][:200])
|
|
191
|
+
else:
|
|
192
|
+
print("DEBUG: Processed sample does not contain 'text'.")
|
|
193
|
+
return dataset
|
|
194
|
+
|
|
195
|
+
def tokenize_dataset(self, dataset):
|
|
196
|
+
print("DEBUG: Tokenizing the entire dataset...")
|
|
197
|
+
tokenized_dataset = dataset.map(
|
|
198
|
+
lambda examples: tokenize_function(examples, self.hf_tokenizer, self.config["max_seq_length"]),
|
|
199
|
+
batched=True
|
|
200
|
+
)
|
|
201
|
+
tokenized_dataset = tokenized_dataset.remove_columns(["text"])
|
|
202
|
+
print("DEBUG: Tokenized dataset sample keys:", tokenized_dataset[0].keys())
|
|
203
|
+
return tokenized_dataset
|
|
204
|
+
|
|
205
|
+
def load_datasets(self):
|
|
206
|
+
datasets = []
|
|
207
|
+
for dataset_info in self.config["dataset"]:
|
|
208
|
+
print("DEBUG: Processing dataset info:", dataset_info)
|
|
209
|
+
datasets.append(self.process_dataset(dataset_info))
|
|
210
|
+
combined = concatenate_datasets(datasets)
|
|
211
|
+
print("DEBUG: Combined dataset has", len(combined), "examples.")
|
|
212
|
+
return combined
|
|
213
|
+
|
|
214
|
+
def train_model(self):
|
|
215
|
+
print("DEBUG: Starting training...")
|
|
216
|
+
raw_dataset = self.load_datasets()
|
|
217
|
+
tokenized_dataset = self.tokenize_dataset(raw_dataset)
|
|
218
|
+
print("DEBUG: Dataset tokenization complete.")
|
|
219
|
+
training_args = TrainingArguments(
|
|
220
|
+
per_device_train_batch_size=2,
|
|
221
|
+
gradient_accumulation_steps=4,
|
|
222
|
+
warmup_steps=5,
|
|
223
|
+
max_steps=60,
|
|
224
|
+
learning_rate=2e-4,
|
|
225
|
+
fp16=not is_bfloat16_supported(),
|
|
226
|
+
bf16=is_bfloat16_supported(),
|
|
227
|
+
logging_steps=1,
|
|
228
|
+
optim="adamw_8bit",
|
|
229
|
+
weight_decay=0.01,
|
|
230
|
+
lr_scheduler_type="linear",
|
|
231
|
+
seed=3407,
|
|
232
|
+
output_dir="outputs",
|
|
233
|
+
report_to="none",
|
|
234
|
+
remove_unused_columns=False,
|
|
235
|
+
)
|
|
236
|
+
# Since the dataset is pre-tokenized, we supply a dummy dataset_text_field.
|
|
237
|
+
trainer = SFTTrainer(
|
|
238
|
+
model=self.model,
|
|
239
|
+
tokenizer=self.hf_tokenizer,
|
|
240
|
+
train_dataset=tokenized_dataset,
|
|
241
|
+
dataset_text_field="input_ids", # Dummy field since data is numeric
|
|
242
|
+
max_seq_length=self.config["max_seq_length"],
|
|
243
|
+
dataset_num_proc=1, # Use a single process to avoid pickling issues
|
|
244
|
+
packing=False,
|
|
245
|
+
args=training_args,
|
|
246
|
+
)
|
|
247
|
+
from unsloth.chat_templates import train_on_responses_only
|
|
248
|
+
trainer = train_on_responses_only(
|
|
249
|
+
trainer,
|
|
250
|
+
instruction_part="<|start_header_id|>user<|end_header_id|>\n\n",
|
|
251
|
+
response_part="<|start_header_id|>assistant<|end_header_id|>\n\n",
|
|
252
|
+
)
|
|
253
|
+
print("DEBUG: Beginning trainer.train() ...")
|
|
254
|
+
trainer.train()
|
|
255
|
+
print("DEBUG: Training complete. Saving model and tokenizer locally...")
|
|
256
|
+
self.model.save_pretrained("lora_model")
|
|
257
|
+
self.hf_tokenizer.save_pretrained("lora_model")
|
|
258
|
+
print("DEBUG: Saved model and tokenizer to 'lora_model'.")
|
|
259
|
+
|
|
260
|
+
def inference(self, instruction, input_text):
|
|
261
|
+
FastLanguageModel.for_inference(self.model)
|
|
262
|
+
messages = [{"role": "user", "content": f"{instruction}\n\nInput: {input_text}"}]
|
|
263
|
+
inputs = self.hf_tokenizer.apply_chat_template(
|
|
264
|
+
messages,
|
|
265
|
+
tokenize=True,
|
|
266
|
+
add_generation_prompt=True,
|
|
267
|
+
return_tensors="pt"
|
|
268
|
+
).to("cuda")
|
|
269
|
+
outputs = self.model.generate(
|
|
270
|
+
input_ids=inputs,
|
|
271
|
+
max_new_tokens=64,
|
|
272
|
+
use_cache=True,
|
|
273
|
+
temperature=1.5,
|
|
274
|
+
min_p=0.1
|
|
275
|
+
)
|
|
276
|
+
print("DEBUG: Inference output:", self.hf_tokenizer.batch_decode(outputs))
|
|
277
|
+
|
|
278
|
+
def load_model(self):
|
|
279
|
+
from unsloth import FastLanguageModel
|
|
280
|
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
|
281
|
+
model_name=self.config["output_dir"],
|
|
282
|
+
max_seq_length=2048,
|
|
283
|
+
dtype=None,
|
|
284
|
+
load_in_4bit=self.config["load_in_4bit"],
|
|
285
|
+
)
|
|
286
|
+
return model, tokenizer
|
|
287
|
+
|
|
288
|
+
def save_model_merged(self):
|
|
289
|
+
if os.path.exists(self.config["hf_model_name"]):
|
|
290
|
+
shutil.rmtree(self.config["hf_model_name"])
|
|
291
|
+
self.model.push_to_hub_merged(
|
|
292
|
+
self.config["hf_model_name"],
|
|
293
|
+
self.hf_tokenizer,
|
|
294
|
+
save_method="merged_16bit",
|
|
295
|
+
token=os.getenv("HF_TOKEN")
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
def push_model_gguf(self):
|
|
299
|
+
self.model.push_to_hub_gguf(
|
|
300
|
+
self.config["hf_model_name"],
|
|
301
|
+
self.hf_tokenizer,
|
|
302
|
+
quantization_method=self.config["quantization_method"],
|
|
303
|
+
token=os.getenv("HF_TOKEN")
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
def save_model_gguf(self):
|
|
307
|
+
self.model.save_pretrained_gguf(
|
|
308
|
+
self.config["hf_model_name"],
|
|
309
|
+
self.hf_tokenizer,
|
|
310
|
+
quantization_method="q4_k_m"
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
def prepare_modelfile_content(self):
|
|
314
|
+
output_model = self.config["hf_model_name"]
|
|
315
|
+
gguf_path = f"{output_model}/unsloth.Q4_K_M.gguf"
|
|
316
|
+
if not os.path.exists(gguf_path):
|
|
317
|
+
self.model, self.hf_tokenizer = self.load_model()
|
|
318
|
+
self.save_model_gguf()
|
|
319
|
+
return f"""FROM {output_model}/unsloth.Q4_K_M.gguf
|
|
320
|
+
|
|
321
|
+
TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
|
|
322
|
+
|
|
323
|
+
### Instruction:
|
|
324
|
+
{{{{ .Prompt }}}}
|
|
325
|
+
|
|
326
|
+
{{{{ end }}}}### Response:
|
|
327
|
+
{{{{ .Response }}}}\"\"\"
|
|
328
|
+
|
|
329
|
+
PARAMETER stop ""
|
|
330
|
+
PARAMETER stop ""
|
|
331
|
+
PARAMETER stop ""
|
|
332
|
+
PARAMETER stop ""
|
|
333
|
+
PARAMETER stop "<|reserved_special_token_"
|
|
334
|
+
"""
|
|
335
|
+
|
|
336
|
+
def create_and_push_ollama_model(self):
|
|
337
|
+
modelfile_content = self.prepare_modelfile_content()
|
|
338
|
+
with open("Modelfile", "w") as file:
|
|
339
|
+
file.write(modelfile_content)
|
|
340
|
+
subprocess.run(["ollama", "serve"])
|
|
341
|
+
subprocess.run(["ollama", "create", f"{self.config['ollama_model']}:{self.config['model_parameters']}", "-f", "Modelfile"])
|
|
342
|
+
subprocess.run(["ollama", "push", f"{self.config['ollama_model']}:{self.config['model_parameters']}"])
|
|
343
|
+
|
|
344
|
+
def run(self):
|
|
345
|
+
self.print_system_info()
|
|
346
|
+
self.check_gpu()
|
|
347
|
+
self.check_ram()
|
|
348
|
+
if self.config.get("train", "true").lower() == "true":
|
|
349
|
+
self.prepare_model()
|
|
350
|
+
self.train_model()
|
|
351
|
+
if self.config.get("huggingface_save", "true").lower() == "true":
|
|
352
|
+
self.save_model_merged()
|
|
353
|
+
if self.config.get("huggingface_save_gguf", "true").lower() == "true":
|
|
354
|
+
self.push_model_gguf()
|
|
355
|
+
if self.config.get("ollama_save", "true").lower() == "true":
|
|
356
|
+
self.create_and_push_ollama_model()
|
|
357
|
+
|
|
358
|
+
def main():
|
|
359
|
+
import argparse
|
|
360
|
+
parser = argparse.ArgumentParser(description="PraisonAI Training Script")
|
|
361
|
+
parser.add_argument("command", choices=["train"], help="Command to execute")
|
|
362
|
+
parser.add_argument("--config", default="config.yaml", help="Path to configuration file")
|
|
363
|
+
parser.add_argument("--model", type=str, help="Model name")
|
|
364
|
+
parser.add_argument("--hf", type=str, help="Hugging Face model name")
|
|
365
|
+
parser.add_argument("--ollama", type=str, help="Ollama model name")
|
|
366
|
+
parser.add_argument("--dataset", type=str, help="Dataset name for training")
|
|
367
|
+
args = parser.parse_args()
|
|
368
|
+
|
|
369
|
+
if args.command == "train":
|
|
370
|
+
trainer_obj = TrainModel(config_path=args.config)
|
|
371
|
+
trainer_obj.run()
|
|
372
|
+
|
|
373
|
+
if __name__ == "__main__":
|
|
374
|
+
main()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "PraisonAI"
|
|
3
|
-
version = "2.0.
|
|
3
|
+
version = "2.0.65"
|
|
4
4
|
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = ""
|
|
@@ -84,7 +84,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]
|
|
|
84
84
|
|
|
85
85
|
[tool.poetry]
|
|
86
86
|
name = "PraisonAI"
|
|
87
|
-
version = "2.0.
|
|
87
|
+
version = "2.0.65"
|
|
88
88
|
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
|
|
89
89
|
authors = ["Mervin Praison"]
|
|
90
90
|
license = ""
|
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
#!/bin/bash
|
|
2
|
-
|
|
3
|
-
# Detect OS and architecture
|
|
4
|
-
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
5
|
-
# macOS
|
|
6
|
-
if [[ $(uname -m) == 'arm64' ]]; then
|
|
7
|
-
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh"
|
|
8
|
-
else
|
|
9
|
-
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh"
|
|
10
|
-
fi
|
|
11
|
-
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
12
|
-
# Linux
|
|
13
|
-
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh"
|
|
14
|
-
elif [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
|
|
15
|
-
# Windows
|
|
16
|
-
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe"
|
|
17
|
-
echo "Windows detected. Please run this script in Git Bash or WSL."
|
|
18
|
-
exit 1
|
|
19
|
-
else
|
|
20
|
-
echo "Unsupported operating system: $OSTYPE"
|
|
21
|
-
exit 1
|
|
22
|
-
fi
|
|
23
|
-
|
|
24
|
-
# Check if conda is already installed
|
|
25
|
-
if ! command -v conda &> /dev/null; then
|
|
26
|
-
echo "Conda is not installed. Installing Miniconda..."
|
|
27
|
-
wget $MINICONDA_URL -O ~/miniconda.sh
|
|
28
|
-
bash ~/miniconda.sh -b -p $HOME/miniconda
|
|
29
|
-
source $HOME/miniconda/bin/activate
|
|
30
|
-
conda init
|
|
31
|
-
else
|
|
32
|
-
echo "Conda is already installed."
|
|
33
|
-
fi
|
|
34
|
-
|
|
35
|
-
# Create and activate the Conda environment
|
|
36
|
-
ENV_NAME="praison_env"
|
|
37
|
-
if conda info --envs | grep -q $ENV_NAME; then
|
|
38
|
-
echo "Environment $ENV_NAME already exists. Recreating..."
|
|
39
|
-
conda env remove -y -n $ENV_NAME # Remove existing environment
|
|
40
|
-
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
41
|
-
# macOS (both Intel and M1/M2)
|
|
42
|
-
conda create --name $ENV_NAME python=3.10 pytorch=2.3.0 -c pytorch -y
|
|
43
|
-
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
44
|
-
# Linux
|
|
45
|
-
conda create --name $ENV_NAME python=3.10 pytorch=2.3.0 cudatoolkit=11.8 -c pytorch -c nvidia -y
|
|
46
|
-
fi
|
|
47
|
-
# conda activate $ENV_NAME
|
|
48
|
-
else
|
|
49
|
-
echo "Creating new environment $ENV_NAME..."
|
|
50
|
-
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
51
|
-
# macOS (both Intel and M1/M2)
|
|
52
|
-
conda create --name $ENV_NAME python=3.10 pytorch=2.3.0 -c pytorch -y
|
|
53
|
-
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
54
|
-
# Linux
|
|
55
|
-
conda create --name $ENV_NAME python=3.10 pytorch=2.3.0 cudatoolkit=11.8 -c pytorch -c nvidia -y
|
|
56
|
-
fi
|
|
57
|
-
# conda activate $ENV_NAME
|
|
58
|
-
fi
|
|
59
|
-
|
|
60
|
-
# source $HOME/miniconda/bin/activate $ENV_NAME
|
|
61
|
-
|
|
62
|
-
# Get full path of pip
|
|
63
|
-
PIP_FULL_PATH=$(conda run -n $ENV_NAME which pip)
|
|
64
|
-
|
|
65
|
-
# Install other packages within the activated environment
|
|
66
|
-
# Use PIP_FULL_PATH to run pip commands
|
|
67
|
-
$PIP_FULL_PATH install --upgrade pip
|
|
68
|
-
$PIP_FULL_PATH install "xformers==0.0.26.post1"
|
|
69
|
-
$PIP_FULL_PATH install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git@4e570be9ae4ced8cdc64e498125708e34942befc"
|
|
70
|
-
$PIP_FULL_PATH install --no-deps "trl<0.9.0" peft accelerate bitsandbytes
|
|
71
|
-
|
|
72
|
-
echo "Setup completed successfully!"
|
|
@@ -1,276 +0,0 @@
|
|
|
1
|
-
import subprocess
|
|
2
|
-
import os
|
|
3
|
-
import sys
|
|
4
|
-
import yaml
|
|
5
|
-
import torch
|
|
6
|
-
import shutil
|
|
7
|
-
from transformers import TextStreamer
|
|
8
|
-
from unsloth import FastLanguageModel, is_bfloat16_supported
|
|
9
|
-
from trl import SFTTrainer
|
|
10
|
-
from transformers import TrainingArguments
|
|
11
|
-
from datasets import load_dataset, concatenate_datasets, Dataset
|
|
12
|
-
from psutil import virtual_memory
|
|
13
|
-
|
|
14
|
-
class train:
|
|
15
|
-
def __init__(self, config_path="config.yaml"):
|
|
16
|
-
self.load_config(config_path)
|
|
17
|
-
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
18
|
-
self.model, self.tokenizer = None, None
|
|
19
|
-
|
|
20
|
-
def load_config(self, path):
|
|
21
|
-
with open(path, "r") as file:
|
|
22
|
-
self.config = yaml.safe_load(file)
|
|
23
|
-
|
|
24
|
-
def print_system_info(self):
|
|
25
|
-
print(f"PyTorch version: {torch.__version__}")
|
|
26
|
-
print(f"CUDA version: {torch.version.cuda}")
|
|
27
|
-
if torch.cuda.is_available():
|
|
28
|
-
device_capability = torch.cuda.get_device_capability()
|
|
29
|
-
print(f"CUDA Device Capability: {device_capability}")
|
|
30
|
-
else:
|
|
31
|
-
print("CUDA is not available")
|
|
32
|
-
|
|
33
|
-
python_version = sys.version
|
|
34
|
-
pip_version = subprocess.check_output(['pip', '--version']).decode().strip()
|
|
35
|
-
python_path = sys.executable
|
|
36
|
-
pip_path = subprocess.check_output(['which', 'pip']).decode().strip()
|
|
37
|
-
print(f"Python Version: {python_version}")
|
|
38
|
-
print(f"Pip Version: {pip_version}")
|
|
39
|
-
print(f"Python Path: {python_path}")
|
|
40
|
-
print(f"Pip Path: {pip_path}")
|
|
41
|
-
|
|
42
|
-
def check_gpu(self):
|
|
43
|
-
gpu_stats = torch.cuda.get_device_properties(0)
|
|
44
|
-
print(f"GPU = {gpu_stats.name}. Max memory = {round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)} GB.")
|
|
45
|
-
|
|
46
|
-
def check_ram(self):
|
|
47
|
-
ram_gb = virtual_memory().total / 1e9
|
|
48
|
-
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
|
|
49
|
-
if ram_gb < 20:
|
|
50
|
-
print('Not using a high-RAM runtime')
|
|
51
|
-
else:
|
|
52
|
-
print('You are using a high-RAM runtime!')
|
|
53
|
-
|
|
54
|
-
# def install_packages(self):
|
|
55
|
-
# subprocess.run(["pip", "install", "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git@4e570be9ae4ced8cdc64e498125708e34942befc"])
|
|
56
|
-
# subprocess.run(["pip", "install", "--no-deps", "trl<0.9.0", "peft==0.12.0", "accelerate==0.33.0", "bitsandbytes==0.43.3"])
|
|
57
|
-
|
|
58
|
-
def prepare_model(self):
|
|
59
|
-
self.model, self.tokenizer = FastLanguageModel.from_pretrained(
|
|
60
|
-
model_name=self.config["model_name"],
|
|
61
|
-
max_seq_length=self.config["max_seq_length"],
|
|
62
|
-
dtype=None,
|
|
63
|
-
load_in_4bit=self.config["load_in_4bit"]
|
|
64
|
-
)
|
|
65
|
-
self.model = FastLanguageModel.get_peft_model(
|
|
66
|
-
self.model,
|
|
67
|
-
r=self.config["lora_r"],
|
|
68
|
-
target_modules=self.config["lora_target_modules"],
|
|
69
|
-
lora_alpha=self.config["lora_alpha"],
|
|
70
|
-
lora_dropout=self.config["lora_dropout"],
|
|
71
|
-
bias=self.config["lora_bias"],
|
|
72
|
-
use_gradient_checkpointing=self.config["use_gradient_checkpointing"],
|
|
73
|
-
random_state=self.config["random_state"],
|
|
74
|
-
use_rslora=self.config["use_rslora"],
|
|
75
|
-
loftq_config=self.config["loftq_config"],
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
def process_dataset(self, dataset_info):
|
|
79
|
-
dataset_name = dataset_info["name"]
|
|
80
|
-
split_type = dataset_info.get("split_type", "train")
|
|
81
|
-
processing_func = getattr(self, dataset_info.get("processing_func", "format_prompts"))
|
|
82
|
-
rename = dataset_info.get("rename", {})
|
|
83
|
-
filter_data = dataset_info.get("filter_data", False)
|
|
84
|
-
filter_column_value = dataset_info.get("filter_column_value", "id")
|
|
85
|
-
filter_value = dataset_info.get("filter_value", "alpaca")
|
|
86
|
-
num_samples = dataset_info.get("num_samples", 20000)
|
|
87
|
-
|
|
88
|
-
dataset = load_dataset(dataset_name, split=split_type)
|
|
89
|
-
|
|
90
|
-
if rename:
|
|
91
|
-
dataset = dataset.rename_columns(rename)
|
|
92
|
-
if filter_data:
|
|
93
|
-
dataset = dataset.filter(lambda example: filter_value in example[filter_column_value]).shuffle(seed=42).select(range(num_samples))
|
|
94
|
-
dataset = dataset.map(processing_func, batched=True)
|
|
95
|
-
return dataset
|
|
96
|
-
|
|
97
|
-
def format_prompts(self, examples):
|
|
98
|
-
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
|
99
|
-
|
|
100
|
-
### Instruction:
|
|
101
|
-
{}
|
|
102
|
-
|
|
103
|
-
### Input:
|
|
104
|
-
{}
|
|
105
|
-
|
|
106
|
-
### Response:
|
|
107
|
-
{}"""
|
|
108
|
-
texts = [alpaca_prompt.format(ins, inp, out) + self.tokenizer.eos_token for ins, inp, out in zip(examples["instruction"], examples["input"], examples["output"])]
|
|
109
|
-
return {"text": texts}
|
|
110
|
-
|
|
111
|
-
def load_datasets(self):
|
|
112
|
-
datasets = []
|
|
113
|
-
for dataset_info in self.config["dataset"]:
|
|
114
|
-
datasets.append(self.process_dataset(dataset_info))
|
|
115
|
-
return concatenate_datasets(datasets)
|
|
116
|
-
|
|
117
|
-
def train_model(self):
|
|
118
|
-
dataset = self.load_datasets()
|
|
119
|
-
trainer = SFTTrainer(
|
|
120
|
-
model=self.model,
|
|
121
|
-
tokenizer=self.tokenizer,
|
|
122
|
-
train_dataset=dataset,
|
|
123
|
-
dataset_text_field=self.config["dataset_text_field"],
|
|
124
|
-
max_seq_length=self.config["max_seq_length"],
|
|
125
|
-
dataset_num_proc=self.config["dataset_num_proc"],
|
|
126
|
-
packing=self.config["packing"],
|
|
127
|
-
args=TrainingArguments(
|
|
128
|
-
per_device_train_batch_size=self.config["per_device_train_batch_size"],
|
|
129
|
-
gradient_accumulation_steps=self.config["gradient_accumulation_steps"],
|
|
130
|
-
warmup_steps=self.config["warmup_steps"],
|
|
131
|
-
num_train_epochs=self.config["num_train_epochs"],
|
|
132
|
-
max_steps=self.config["max_steps"],
|
|
133
|
-
learning_rate=self.config["learning_rate"],
|
|
134
|
-
fp16=not is_bfloat16_supported(),
|
|
135
|
-
bf16=is_bfloat16_supported(),
|
|
136
|
-
logging_steps=self.config["logging_steps"],
|
|
137
|
-
optim=self.config["optim"],
|
|
138
|
-
weight_decay=self.config["weight_decay"],
|
|
139
|
-
lr_scheduler_type=self.config["lr_scheduler_type"],
|
|
140
|
-
seed=self.config["seed"],
|
|
141
|
-
output_dir=self.config["output_dir"],
|
|
142
|
-
),
|
|
143
|
-
)
|
|
144
|
-
trainer.train()
|
|
145
|
-
self.model.save_pretrained("lora_model") # Local saving
|
|
146
|
-
self.tokenizer.save_pretrained("lora_model")
|
|
147
|
-
|
|
148
|
-
def inference(self, instruction, input_text):
|
|
149
|
-
FastLanguageModel.for_inference(self.model)
|
|
150
|
-
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
|
151
|
-
|
|
152
|
-
### Instruction:
|
|
153
|
-
{}
|
|
154
|
-
|
|
155
|
-
### Input:
|
|
156
|
-
{}
|
|
157
|
-
|
|
158
|
-
### Response:
|
|
159
|
-
{}"""
|
|
160
|
-
inputs = self.tokenizer([alpaca_prompt.format(instruction, input_text, "")], return_tensors="pt").to("cuda")
|
|
161
|
-
outputs = self.model.generate(**inputs, max_new_tokens=64, use_cache=True)
|
|
162
|
-
print(self.tokenizer.batch_decode(outputs))
|
|
163
|
-
|
|
164
|
-
def load_model(self):
|
|
165
|
-
"""Loads the model and tokenizer using the FastLanguageModel library."""
|
|
166
|
-
from unsloth import FastLanguageModel
|
|
167
|
-
model, tokenizer = FastLanguageModel.from_pretrained(
|
|
168
|
-
model_name=self.config["output_dir"],
|
|
169
|
-
max_seq_length=2048,
|
|
170
|
-
dtype=None,
|
|
171
|
-
load_in_4bit=self.config["load_in_4bit"],
|
|
172
|
-
)
|
|
173
|
-
return model, tokenizer
|
|
174
|
-
|
|
175
|
-
def save_model_merged(self):
|
|
176
|
-
if os.path.exists(self.config["hf_model_name"]):
|
|
177
|
-
shutil.rmtree(self.config["hf_model_name"])
|
|
178
|
-
self.model.push_to_hub_merged(
|
|
179
|
-
self.config["hf_model_name"],
|
|
180
|
-
self.tokenizer,
|
|
181
|
-
save_method="merged_16bit",
|
|
182
|
-
token=os.getenv('HF_TOKEN')
|
|
183
|
-
)
|
|
184
|
-
|
|
185
|
-
def push_model_gguf(self):
|
|
186
|
-
self.model.push_to_hub_gguf(
|
|
187
|
-
self.config["hf_model_name"],
|
|
188
|
-
self.tokenizer,
|
|
189
|
-
quantization_method=self.config["quantization_method"],
|
|
190
|
-
token=os.getenv('HF_TOKEN')
|
|
191
|
-
)
|
|
192
|
-
|
|
193
|
-
def save_model_gguf(self):
|
|
194
|
-
self.model.save_pretrained_gguf(
|
|
195
|
-
self.config["hf_model_name"],
|
|
196
|
-
self.tokenizer,
|
|
197
|
-
quantization_method="q4_k_m"
|
|
198
|
-
)
|
|
199
|
-
|
|
200
|
-
def prepare_modelfile_content(self):
|
|
201
|
-
output_model = self.config["hf_model_name"]
|
|
202
|
-
gguf_path = f"{output_model}/unsloth.Q4_K_M.gguf"
|
|
203
|
-
|
|
204
|
-
# Check if the GGUF file exists. If not, generate it ## TODO Multiple Quantisation other than Q4_K_M.gguf
|
|
205
|
-
if not os.path.exists(gguf_path):
|
|
206
|
-
self.model, self.tokenizer = self.load_model()
|
|
207
|
-
self.save_model_gguf()
|
|
208
|
-
return f"""FROM {output_model}/unsloth.Q4_K_M.gguf
|
|
209
|
-
|
|
210
|
-
TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
|
|
211
|
-
|
|
212
|
-
### Instruction:
|
|
213
|
-
{{{{ .Prompt }}}}
|
|
214
|
-
|
|
215
|
-
{{{{ end }}}}### Response:
|
|
216
|
-
{{{{ .Response }}}}\"\"\"
|
|
217
|
-
|
|
218
|
-
PARAMETER stop ""
|
|
219
|
-
PARAMETER stop ""
|
|
220
|
-
PARAMETER stop ""
|
|
221
|
-
PARAMETER stop ""
|
|
222
|
-
PARAMETER stop "<|reserved_special_token_"
|
|
223
|
-
"""
|
|
224
|
-
|
|
225
|
-
def create_and_push_ollama_model(self):
|
|
226
|
-
modelfile_content = self.prepare_modelfile_content()
|
|
227
|
-
with open('Modelfile', 'w') as file:
|
|
228
|
-
file.write(modelfile_content)
|
|
229
|
-
|
|
230
|
-
subprocess.run(["ollama", "serve"])
|
|
231
|
-
subprocess.run(["ollama", "create", f"{self.config['ollama_model']}:{self.config['model_parameters']}", "-f", "Modelfile"])
|
|
232
|
-
subprocess.run(["ollama", "push", f"{self.config['ollama_model']}:{self.config['model_parameters']}"])
|
|
233
|
-
|
|
234
|
-
def run(self):
|
|
235
|
-
self.print_system_info()
|
|
236
|
-
self.check_gpu()
|
|
237
|
-
self.check_ram()
|
|
238
|
-
# self.install_packages()
|
|
239
|
-
if self.config.get("train", "true").lower() == "true":
|
|
240
|
-
self.prepare_model()
|
|
241
|
-
self.train_model()
|
|
242
|
-
|
|
243
|
-
if self.config.get("huggingface_save", "true").lower() == "true":
|
|
244
|
-
# self.model, self.tokenizer = self.load_model()
|
|
245
|
-
self.save_model_merged()
|
|
246
|
-
|
|
247
|
-
if self.config.get("huggingface_save_gguf", "true").lower() == "true":
|
|
248
|
-
# self.model, self.tokenizer = self.load_model()
|
|
249
|
-
self.push_model_gguf()
|
|
250
|
-
|
|
251
|
-
# if self.config.get("save_gguf", "true").lower() == "true": ## TODO
|
|
252
|
-
# self.model, self.tokenizer = self.load_model()
|
|
253
|
-
# self.save_model_gguf()
|
|
254
|
-
|
|
255
|
-
# if self.config.get("save_merged", "true").lower() == "true": ## TODO
|
|
256
|
-
# self.model, self.tokenizer = self.load_model()
|
|
257
|
-
# self.save_model_merged()
|
|
258
|
-
|
|
259
|
-
if self.config.get("ollama_save", "true").lower() == "true":
|
|
260
|
-
self.create_and_push_ollama_model()
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
def main():
|
|
264
|
-
import argparse
|
|
265
|
-
parser = argparse.ArgumentParser(description='PraisonAI Training Script')
|
|
266
|
-
parser.add_argument('command', choices=['train'], help='Command to execute')
|
|
267
|
-
parser.add_argument('--config', default='config.yaml', help='Path to configuration file')
|
|
268
|
-
args = parser.parse_args()
|
|
269
|
-
|
|
270
|
-
if args.command == 'train':
|
|
271
|
-
ai = train(config_path=args.config)
|
|
272
|
-
ai.run()
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
if __name__ == '__main__':
|
|
276
|
-
main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{praisonai-2.0.63 → praisonai-2.0.65}/praisonai/public/praison-ai-agents-architecture-dark.png
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|