PraisonAI 0.0.59rc11__cp312-cp312-manylinux_2_35_x86_64.whl → 0.0.61__cp312-cp312-manylinux_2_35_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

praisonai/cli.py CHANGED
@@ -133,18 +133,24 @@ class PraisonAI:
133
133
  package_root = os.path.dirname(os.path.abspath(__file__))
134
134
  config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
135
135
 
136
- # Generate config.yaml using the function
137
- config = generate_config(
138
- model_name=args.model,
139
- hf_model_name=args.hf,
140
- ollama_model_name=args.ollama,
141
- dataset=[{
142
- "name": args.dataset
143
- }]
144
- )
145
- with open('config.yaml', 'w') as f:
146
- yaml.dump(config, f, default_flow_style=False, indent=2)
147
-
136
+ # Create config.yaml only if it doesn't exist or --model or --dataset is provided
137
+ if not os.path.exists(config_yaml_destination) or args.model or args.dataset:
138
+ config = generate_config(
139
+ model_name=args.model,
140
+ hf_model_name=args.hf,
141
+ ollama_model_name=args.ollama,
142
+ dataset=[{
143
+ "name": args.dataset
144
+ }]
145
+ )
146
+ with open('config.yaml', 'w') as f:
147
+ yaml.dump(config, f, default_flow_style=False, indent=2)
148
+
149
+ # Overwrite huggingface_save and ollama_save if --hf or --ollama are provided
150
+ if args.hf:
151
+ config["huggingface_save"] = "true"
152
+ if args.ollama:
153
+ config["ollama_save"] = "true"
148
154
 
149
155
  if 'init' in sys.argv:
150
156
  from praisonai.setup.setup_conda_env import main as setup_conda_main
@@ -171,7 +177,7 @@ class PraisonAI:
171
177
  env = os.environ.copy()
172
178
  env['PYTHONUNBUFFERED'] = '1'
173
179
 
174
- stream_subprocess(['conda', 'run', '--no-capture-output', '--name', 'praison_env', 'python', '-u', train_script_path, 'train'] + train_args, env=env)
180
+ stream_subprocess(['conda', 'run', '--no-capture-output', '--name', 'praison_env', 'python', '-u', train_script_path, 'train'], env=env)
175
181
  return
176
182
 
177
183
  invocation_cmd = "praisonai"
praisonai/deploy.py CHANGED
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.59rc11 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.61 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
praisonai/inc/config.py CHANGED
@@ -38,9 +38,9 @@ def generate_config(
38
38
  """Generates the configuration for PraisonAI with dynamic overrides."""
39
39
 
40
40
  config = {
41
- "ollama_save": ollama_save or "false",
42
- "huggingface_save": huggingface_save or "false",
43
- "train": train or "false",
41
+ "ollama_save": ollama_save or "true",
42
+ "huggingface_save": huggingface_save or "true",
43
+ "train": train or "true",
44
44
 
45
45
  "model_name": model_name or "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
46
46
  "hf_model_name": hf_model_name or "mervinpraison/llama-3.1-tamilan-8B-test",
@@ -10,7 +10,7 @@ model_parameters: "8b"
10
10
  max_seq_length: 2048
11
11
  load_in_4bit: true
12
12
  lora_r: 16
13
- lora_target_modules:
13
+ lora_target_modules:
14
14
  - "q_proj"
15
15
  - "k_proj"
16
16
  - "v_proj"
praisonai/train.py CHANGED
@@ -142,6 +142,8 @@ class train:
142
142
  ),
143
143
  )
144
144
  trainer.train()
145
+ self.model.save_pretrained("lora_model") # Local saving
146
+ self.tokenizer.save_pretrained("lora_model")
145
147
 
146
148
  def inference(self, instruction, input_text):
147
149
  FastLanguageModel.for_inference(self.model)
@@ -158,6 +160,17 @@ class train:
158
160
  inputs = self.tokenizer([alpaca_prompt.format(instruction, input_text, "")], return_tensors="pt").to("cuda")
159
161
  outputs = self.model.generate(**inputs, max_new_tokens=64, use_cache=True)
160
162
  print(self.tokenizer.batch_decode(outputs))
163
+
164
+ def load_model(self):
165
+ """Loads the model and tokenizer using the FastLanguageModel library."""
166
+ from unsloth import FastLanguageModel
167
+ model, tokenizer = FastLanguageModel.from_pretrained(
168
+ model_name=self.config["output_dir"],
169
+ max_seq_length=2048,
170
+ dtype=None,
171
+ load_in_4bit=self.config["load_in_4bit"],
172
+ )
173
+ return model, tokenizer
161
174
 
162
175
  def save_model_merged(self):
163
176
  if os.path.exists(self.config["hf_model_name"]):
@@ -176,9 +189,22 @@ class train:
176
189
  quantization_method=self.config["quantization_method"],
177
190
  token=os.getenv('HF_TOKEN')
178
191
  )
192
+
193
+ def save_model_gguf(self):
194
+ self.model.save_pretrained_gguf(
195
+ self.config["hf_model_name"],
196
+ self.tokenizer,
197
+ quantization_method="q4_k_m"
198
+ )
179
199
 
180
200
  def prepare_modelfile_content(self):
181
201
  output_model = self.config["hf_model_name"]
202
+ gguf_path = f"{output_model}/unsloth.Q4_K_M.gguf"
203
+
204
+ # Check if the GGUF file exists. If not, generate it ## TODO Multiple Quantisation other than Q4_K_M.gguf
205
+ if not os.path.exists(gguf_path):
206
+ self.model, self.tokenizer = self.load_model()
207
+ self.save_model_gguf()
182
208
  return f"""FROM {output_model}/unsloth.Q4_K_M.gguf
183
209
 
184
210
  TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
@@ -215,10 +241,20 @@ PARAMETER stop "<|reserved_special_token_"
215
241
  self.train_model()
216
242
 
217
243
  if self.config.get("huggingface_save", "true").lower() == "true":
244
+ # self.model, self.tokenizer = self.load_model()
218
245
  self.save_model_merged()
219
246
 
220
247
  if self.config.get("huggingface_save_gguf", "true").lower() == "true":
248
+ # self.model, self.tokenizer = self.load_model()
221
249
  self.push_model_gguf()
250
+
251
+ # if self.config.get("save_gguf", "true").lower() == "true": ## TODO
252
+ # self.model, self.tokenizer = self.load_model()
253
+ # self.save_model_gguf()
254
+
255
+ # if self.config.get("save_merged", "true").lower() == "true": ## TODO
256
+ # self.model, self.tokenizer = self.load_model()
257
+ # self.save_model_merged()
222
258
 
223
259
  if self.config.get("ollama_save", "true").lower() == "true":
224
260
  self.create_and_push_ollama_model()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.59rc11
3
+ Version: 0.0.61
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -3,12 +3,12 @@ praisonai/__main__.py,sha256=MVgsjMThjBexHt4nhd760JCqvP4x0IQcwo8kULOK4FQ,144
3
3
  praisonai/agents_generator.py,sha256=8d1WRbubvEkBrW1HZ7_xnGyqgJi0yxmXa3MgTIqef1c,19127
4
4
  praisonai/auto.py,sha256=9spTXqj47Hmmqv5QHRYE_RzSVHH_KoPbaZjskUj2UcE,7895
5
5
  praisonai/chainlit_ui.py,sha256=bNR7s509lp0I9JlJNvwCZRUZosC64qdvlFCt8NmFamQ,12216
6
- praisonai/cli.py,sha256=Cz_oegYqZBRwWLpzbUiTVQRZqJJg38UgTBtnHFjk_HE,17648
7
- praisonai/deploy.py,sha256=9qkDTiytzxjHKlso9sBSiMVPZTr9wJZQgdxZh8mhzd0,6032
6
+ praisonai/cli.py,sha256=ZDQF9OXgaquu_89cLc-jsx5D9zWxYg4ChPhlVv66drk,18047
7
+ praisonai/deploy.py,sha256=JQ8FsfDYeZ310dSmOAHGfgI0wFjqPK45BrcLrRUOgBw,6028
8
8
  praisonai/inbuilt_tools/__init__.py,sha256=mUKnbL6Gram9c9f2m8wJwEzURBLmPEOcHzwySBH89YA,74
9
9
  praisonai/inbuilt_tools/autogen_tools.py,sha256=svYkM2N7DVFvbiwgoAS7U_MqTOD8rHf8VD3BaFUV5_Y,14907
10
10
  praisonai/inc/__init__.py,sha256=sPDlYBBwdk0VlWzaaM_lG0_LD07lS2HRGvPdxXJFiYg,62
11
- praisonai/inc/config.py,sha256=ZjmgY9Bh3PjA74Gwdal-MvPj50CKITiKny0aoY7XWN4,3336
11
+ praisonai/inc/config.py,sha256=up2-841ruK7MCUUT3xkWBA5S6WsY0sFODNfcT6Q4Wms,3333
12
12
  praisonai/inc/models.py,sha256=1kwP9o56AvN8L38x7eeAzudjAvstN0uWu-woQkgxAe4,5449
13
13
  praisonai/public/android-chrome-192x192.png,sha256=ENJEqhDE3XEQViRhKNDezQKRiOiuHOUj5nzRN43fz50,6535
14
14
  praisonai/public/android-chrome-512x512.png,sha256=4txEwB0cJkxFVarRdvFGJZR1DtWJ2h-L_2cUEjBXHAc,15244
@@ -24,12 +24,12 @@ praisonai/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk,12
24
24
  praisonai/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
25
25
  praisonai/setup/__init__.py ,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  praisonai/setup/build.py,sha256=NyTAXQ_UZ8vKo_KwCINp8ctmauZyCMDkw1rys3ay0ec,646
27
- praisonai/setup/config.yaml,sha256=pjThU3Mzp5ce2ZFb1GHnR3RviE9dhpwRz3ONd9JnnwE,1212
27
+ praisonai/setup/config.yaml,sha256=sr_D1RIvv3LQ_eueOMZV0rAUiWTR-n2xuE1RhKK6b34,1211
28
28
  praisonai/setup/post_install.py,sha256=hXukn_7bL64vE582SZcS-9MiZGeJj6hN7upoR1oJ-Bo,576
29
29
  praisonai/setup/setup_conda_env.py,sha256=4QiWrqgEObivzOMwfJgWaCPpUEpB68cQ6lFwVwFoufk,816
30
30
  praisonai/setup/setup_conda_env.sh,sha256=te7s0KHsTi7XM-vkNvE0dKC1HeU2tXxqE-sPUScV6fY,2718
31
31
  praisonai/test.py,sha256=OL-wesjA5JTohr8rtr6kWoaS4ImkJg2l0GXJ-dUUfRU,4090
32
- praisonai/train.py,sha256=S4YPXXMypR_d6K_m0dcrDxFNanaOvXZq8tM2IenpmPs,9522
32
+ praisonai/train.py,sha256=DvORlrwKOD-2v4r_z84eV3LsfzpNs-WnPKb5cQB3_t4,11071
33
33
  praisonai/ui/chat.py,sha256=B4F1R7qP-0c-elg8WcRsYlr6-FkmHWtdunGIzU7WrDM,9321
34
34
  praisonai/ui/code.py,sha256=GcOr8lNah4AgI2RcIKmgjehzSl-KNu7x6UHrghixeaM,10095
35
35
  praisonai/ui/context.py,sha256=oWO2I_WBZb7kZnuXItf18EJX0ZQv-1nAd8rxhwhuuDU,11871
@@ -41,8 +41,8 @@ praisonai/ui/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk
41
41
  praisonai/ui/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
42
42
  praisonai/ui/sql_alchemy.py,sha256=HsyeRq-G9qbQobHWpTJHHKQiT4FvYw_7iuv-2PNh0IU,27419
43
43
  praisonai/version.py,sha256=ugyuFliEqtAwQmH4sTlc16YXKYbFWDmfyk87fErB8-8,21
44
- praisonai-0.0.59rc11.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
45
- praisonai-0.0.59rc11.dist-info/METADATA,sha256=oPMDxC8HHl-S91_BkN6RunO0LyP8SXnKSIjuHHF0Frc,11152
46
- praisonai-0.0.59rc11.dist-info/WHEEL,sha256=HBsDV7Hj4OTiS1GX6ua7iQXUQTB9UHftbBxr7Q8Xm9c,110
47
- praisonai-0.0.59rc11.dist-info/entry_points.txt,sha256=jB078LEGLY3Ky_indhclomRIVVpXrPSksHjJ-tcBZ-o,133
48
- praisonai-0.0.59rc11.dist-info/RECORD,,
44
+ praisonai-0.0.61.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
45
+ praisonai-0.0.61.dist-info/METADATA,sha256=M-MbTL30aaXFUXZ5M42Gxhcu478_Gl9MfdiZLgnb5jI,11148
46
+ praisonai-0.0.61.dist-info/WHEEL,sha256=HBsDV7Hj4OTiS1GX6ua7iQXUQTB9UHftbBxr7Q8Xm9c,110
47
+ praisonai-0.0.61.dist-info/entry_points.txt,sha256=jB078LEGLY3Ky_indhclomRIVVpXrPSksHjJ-tcBZ-o,133
48
+ praisonai-0.0.61.dist-info/RECORD,,