PraisonAI 0.0.59rc11__tar.gz → 0.0.64__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (47) hide show
  1. {praisonai-0.0.59rc11 → praisonai-0.0.64}/PKG-INFO +12 -4
  2. {praisonai-0.0.59rc11 → praisonai-0.0.64}/README.md +11 -3
  3. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/cli.py +19 -13
  4. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/deploy.py +1 -1
  5. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/inc/config.py +3 -3
  6. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/setup/config.yaml +1 -1
  7. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/train.py +36 -0
  8. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/chat.py +5 -5
  9. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/code.py +5 -5
  10. {praisonai-0.0.59rc11 → praisonai-0.0.64}/pyproject.toml +1 -1
  11. {praisonai-0.0.59rc11 → praisonai-0.0.64}/LICENSE +0 -0
  12. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/__init__.py +0 -0
  13. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/__main__.py +0 -0
  14. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/agents_generator.py +0 -0
  15. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/auto.py +0 -0
  16. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/chainlit_ui.py +0 -0
  17. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/inbuilt_tools/__init__.py +0 -0
  18. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  19. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/inc/__init__.py +0 -0
  20. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/inc/models.py +0 -0
  21. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/android-chrome-192x192.png +0 -0
  22. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/android-chrome-512x512.png +0 -0
  23. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/apple-touch-icon.png +0 -0
  24. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/fantasy.svg +0 -0
  25. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/favicon-16x16.png +0 -0
  26. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/favicon-32x32.png +0 -0
  27. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/favicon.ico +0 -0
  28. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/game.svg +0 -0
  29. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/logo_dark.png +0 -0
  30. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/logo_light.png +0 -0
  31. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/movie.svg +0 -0
  32. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/public/thriller.svg +0 -0
  33. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/setup/__init__.py +0 -0
  34. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/setup/build.py +0 -0
  35. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/setup/post_install.py +0 -0
  36. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/setup/setup_conda_env.py +0 -0
  37. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/setup/setup_conda_env.sh +0 -0
  38. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/test.py +0 -0
  39. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/context.py +0 -0
  40. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/public/fantasy.svg +0 -0
  41. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/public/game.svg +0 -0
  42. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/public/logo_dark.png +0 -0
  43. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/public/logo_light.png +0 -0
  44. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/public/movie.svg +0 -0
  45. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/public/thriller.svg +0 -0
  46. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/ui/sql_alchemy.py +0 -0
  47. {praisonai-0.0.59rc11 → praisonai-0.0.64}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.59rc11
3
+ Version: 0.0.64
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -78,6 +78,11 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
78
78
  | **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
79
79
  | **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
80
80
 
81
+ | Other Features | Description | Docs |
82
+ |---|---|---|
83
+ | **Train** | Fine-tune LLMs using your custom data | [https://docs.praison.ai/train](https://docs.praison.ai/train) |
84
+
85
+
81
86
  ## Google Colab Multi Agents
82
87
 
83
88
  | | Cookbook | Open in Colab |
@@ -87,9 +92,12 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
87
92
 
88
93
  ## Install
89
94
 
90
- | PraisonAI | PraisonAI Code | PraisonAI Chat |
91
- | --- | --- | --- |
92
- | `pip install praisonai` | `pip install "praisonai[code]"` | `pip install "praisonai[chat]"` |
95
+ | | Installation |
96
+ |---|---|
97
+ | **PraisonAI** | `pip install praisonai` |
98
+ | **PraisonAI Code** | `pip install "praisonai[code]"` |
99
+ | **PraisonAI Chat** | `pip install "praisonai[chat]"` |
100
+ | **PraisonAI Train** | `pip install "praisonai[train]"` |
93
101
 
94
102
  ## Key Features
95
103
 
@@ -36,6 +36,11 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
36
36
  | **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
37
37
  | **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
38
38
 
39
+ | Other Features | Description | Docs |
40
+ |---|---|---|
41
+ | **Train** | Fine-tune LLMs using your custom data | [https://docs.praison.ai/train](https://docs.praison.ai/train) |
42
+
43
+
39
44
  ## Google Colab Multi Agents
40
45
 
41
46
  | | Cookbook | Open in Colab |
@@ -45,9 +50,12 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
45
50
 
46
51
  ## Install
47
52
 
48
- | PraisonAI | PraisonAI Code | PraisonAI Chat |
49
- | --- | --- | --- |
50
- | `pip install praisonai` | `pip install "praisonai[code]"` | `pip install "praisonai[chat]"` |
53
+ | | Installation |
54
+ |---|---|
55
+ | **PraisonAI** | `pip install praisonai` |
56
+ | **PraisonAI Code** | `pip install "praisonai[code]"` |
57
+ | **PraisonAI Chat** | `pip install "praisonai[chat]"` |
58
+ | **PraisonAI Train** | `pip install "praisonai[train]"` |
51
59
 
52
60
  ## Key Features
53
61
 
@@ -133,18 +133,24 @@ class PraisonAI:
133
133
  package_root = os.path.dirname(os.path.abspath(__file__))
134
134
  config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
135
135
 
136
- # Generate config.yaml using the function
137
- config = generate_config(
138
- model_name=args.model,
139
- hf_model_name=args.hf,
140
- ollama_model_name=args.ollama,
141
- dataset=[{
142
- "name": args.dataset
143
- }]
144
- )
145
- with open('config.yaml', 'w') as f:
146
- yaml.dump(config, f, default_flow_style=False, indent=2)
147
-
136
+ # Create config.yaml only if it doesn't exist or --model or --dataset is provided
137
+ if not os.path.exists(config_yaml_destination) or args.model or args.dataset:
138
+ config = generate_config(
139
+ model_name=args.model,
140
+ hf_model_name=args.hf,
141
+ ollama_model_name=args.ollama,
142
+ dataset=[{
143
+ "name": args.dataset
144
+ }]
145
+ )
146
+ with open('config.yaml', 'w') as f:
147
+ yaml.dump(config, f, default_flow_style=False, indent=2)
148
+
149
+ # Overwrite huggingface_save and ollama_save if --hf or --ollama are provided
150
+ if args.hf:
151
+ config["huggingface_save"] = "true"
152
+ if args.ollama:
153
+ config["ollama_save"] = "true"
148
154
 
149
155
  if 'init' in sys.argv:
150
156
  from praisonai.setup.setup_conda_env import main as setup_conda_main
@@ -171,7 +177,7 @@ class PraisonAI:
171
177
  env = os.environ.copy()
172
178
  env['PYTHONUNBUFFERED'] = '1'
173
179
 
174
- stream_subprocess(['conda', 'run', '--no-capture-output', '--name', 'praison_env', 'python', '-u', train_script_path, 'train'] + train_args, env=env)
180
+ stream_subprocess(['conda', 'run', '--no-capture-output', '--name', 'praison_env', 'python', '-u', train_script_path, 'train'], env=env)
175
181
  return
176
182
 
177
183
  invocation_cmd = "praisonai"
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.59rc11 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.64 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -38,9 +38,9 @@ def generate_config(
38
38
  """Generates the configuration for PraisonAI with dynamic overrides."""
39
39
 
40
40
  config = {
41
- "ollama_save": ollama_save or "false",
42
- "huggingface_save": huggingface_save or "false",
43
- "train": train or "false",
41
+ "ollama_save": ollama_save or "true",
42
+ "huggingface_save": huggingface_save or "true",
43
+ "train": train or "true",
44
44
 
45
45
  "model_name": model_name or "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
46
46
  "hf_model_name": hf_model_name or "mervinpraison/llama-3.1-tamilan-8B-test",
@@ -10,7 +10,7 @@ model_parameters: "8b"
10
10
  max_seq_length: 2048
11
11
  load_in_4bit: true
12
12
  lora_r: 16
13
- lora_target_modules:
13
+ lora_target_modules:
14
14
  - "q_proj"
15
15
  - "k_proj"
16
16
  - "v_proj"
@@ -142,6 +142,8 @@ class train:
142
142
  ),
143
143
  )
144
144
  trainer.train()
145
+ self.model.save_pretrained("lora_model") # Local saving
146
+ self.tokenizer.save_pretrained("lora_model")
145
147
 
146
148
  def inference(self, instruction, input_text):
147
149
  FastLanguageModel.for_inference(self.model)
@@ -158,6 +160,17 @@ class train:
158
160
  inputs = self.tokenizer([alpaca_prompt.format(instruction, input_text, "")], return_tensors="pt").to("cuda")
159
161
  outputs = self.model.generate(**inputs, max_new_tokens=64, use_cache=True)
160
162
  print(self.tokenizer.batch_decode(outputs))
163
+
164
+ def load_model(self):
165
+ """Loads the model and tokenizer using the FastLanguageModel library."""
166
+ from unsloth import FastLanguageModel
167
+ model, tokenizer = FastLanguageModel.from_pretrained(
168
+ model_name=self.config["output_dir"],
169
+ max_seq_length=2048,
170
+ dtype=None,
171
+ load_in_4bit=self.config["load_in_4bit"],
172
+ )
173
+ return model, tokenizer
161
174
 
162
175
  def save_model_merged(self):
163
176
  if os.path.exists(self.config["hf_model_name"]):
@@ -176,9 +189,22 @@ class train:
176
189
  quantization_method=self.config["quantization_method"],
177
190
  token=os.getenv('HF_TOKEN')
178
191
  )
192
+
193
+ def save_model_gguf(self):
194
+ self.model.save_pretrained_gguf(
195
+ self.config["hf_model_name"],
196
+ self.tokenizer,
197
+ quantization_method="q4_k_m"
198
+ )
179
199
 
180
200
  def prepare_modelfile_content(self):
181
201
  output_model = self.config["hf_model_name"]
202
+ gguf_path = f"{output_model}/unsloth.Q4_K_M.gguf"
203
+
204
+ # Check if the GGUF file exists. If not, generate it ## TODO Multiple Quantisation other than Q4_K_M.gguf
205
+ if not os.path.exists(gguf_path):
206
+ self.model, self.tokenizer = self.load_model()
207
+ self.save_model_gguf()
182
208
  return f"""FROM {output_model}/unsloth.Q4_K_M.gguf
183
209
 
184
210
  TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
@@ -215,10 +241,20 @@ PARAMETER stop "<|reserved_special_token_"
215
241
  self.train_model()
216
242
 
217
243
  if self.config.get("huggingface_save", "true").lower() == "true":
244
+ # self.model, self.tokenizer = self.load_model()
218
245
  self.save_model_merged()
219
246
 
220
247
  if self.config.get("huggingface_save_gguf", "true").lower() == "true":
248
+ # self.model, self.tokenizer = self.load_model()
221
249
  self.push_model_gguf()
250
+
251
+ # if self.config.get("save_gguf", "true").lower() == "true": ## TODO
252
+ # self.model, self.tokenizer = self.load_model()
253
+ # self.save_model_gguf()
254
+
255
+ # if self.config.get("save_merged", "true").lower() == "true": ## TODO
256
+ # self.model, self.tokenizer = self.load_model()
257
+ # self.save_model_merged()
222
258
 
223
259
  if self.config.get("ollama_save", "true").lower() == "true":
224
260
  self.create_and_push_ollama_model()
@@ -73,11 +73,11 @@ def initialize_db():
73
73
  type TEXT NOT NULL,
74
74
  threadId UUID NOT NULL,
75
75
  parentId UUID,
76
- disableFeedback BOOLEAN NOT NULL,
77
- streaming BOOLEAN NOT NULL,
78
- waitForAnswer BOOLEAN,
79
- isError BOOLEAN,
80
- metadata JSONB,
76
+ disableFeedback BOOLEAN NOT NULL DEFAULT 0,
77
+ streaming BOOLEAN NOT NULL DEFAULT 0,
78
+ waitForAnswer BOOLEAN DEFAULT 0,
79
+ isError BOOLEAN NOT NULL DEFAULT 0,
80
+ metadata JSONB DEFAULT '{}',
81
81
  tags TEXT[],
82
82
  input TEXT,
83
83
  output TEXT,
@@ -74,11 +74,11 @@ def initialize_db():
74
74
  type TEXT NOT NULL,
75
75
  threadId UUID NOT NULL,
76
76
  parentId UUID,
77
- disableFeedback BOOLEAN NOT NULL,
78
- streaming BOOLEAN NOT NULL,
79
- waitForAnswer BOOLEAN,
80
- isError BOOLEAN,
81
- metadata JSONB,
77
+ disableFeedback BOOLEAN NOT NULL DEFAULT 0,
78
+ streaming BOOLEAN NOT NULL DEFAULT 0,
79
+ waitForAnswer BOOLEAN DEFAULT 0,
80
+ isError BOOLEAN NOT NULL DEFAULT 0,
81
+ metadata JSONB DEFAULT '{}',
82
82
  tags TEXT[],
83
83
  input TEXT,
84
84
  output TEXT,
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "PraisonAI"
3
- version = "0.0.59rc11"
3
+ version = "0.0.64"
4
4
  description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
5
5
  authors = ["Mervin Praison"]
6
6
  license = ""
File without changes