PraisonAI 2.0.75__cp313-cp313-manylinux_2_39_x86_64.whl → 2.0.77__cp313-cp313-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/deploy.py +1 -1
- praisonai/train_vision.py +103 -31
- praisonai/upload_vision.py +140 -0
- {praisonai-2.0.75.dist-info → praisonai-2.0.77.dist-info}/METADATA +2 -2
- {praisonai-2.0.75.dist-info → praisonai-2.0.77.dist-info}/RECORD +8 -7
- {praisonai-2.0.75.dist-info → praisonai-2.0.77.dist-info}/WHEEL +1 -1
- {praisonai-2.0.75.dist-info → praisonai-2.0.77.dist-info}/LICENSE +0 -0
- {praisonai-2.0.75.dist-info → praisonai-2.0.77.dist-info}/entry_points.txt +0 -0
praisonai/deploy.py
CHANGED
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==2.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==2.0.77 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
praisonai/train_vision.py
CHANGED
|
@@ -12,11 +12,14 @@ import yaml
|
|
|
12
12
|
import torch
|
|
13
13
|
import shutil
|
|
14
14
|
import subprocess
|
|
15
|
+
import gc # For garbage collection
|
|
15
16
|
|
|
16
|
-
from datasets import load_dataset, concatenate_datasets
|
|
17
|
+
from datasets import load_dataset, concatenate_datasets, Dataset
|
|
17
18
|
from unsloth import FastVisionModel, is_bf16_supported
|
|
18
19
|
from unsloth.trainer import UnslothVisionDataCollator
|
|
19
|
-
from
|
|
20
|
+
from transformers import TrainingArguments
|
|
21
|
+
from trl import SFTTrainer
|
|
22
|
+
from tqdm import tqdm # Add progress bar
|
|
20
23
|
|
|
21
24
|
|
|
22
25
|
class TrainVisionModel:
|
|
@@ -62,11 +65,21 @@ class TrainVisionModel:
|
|
|
62
65
|
use_gradient_checkpointing="unsloth"
|
|
63
66
|
)
|
|
64
67
|
print("DEBUG: Vision model and original tokenizer loaded.")
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
original_tokenizer.model_max_length = self.config.get("max_seq_length", 2048)
|
|
68
|
+
|
|
69
|
+
# Use the full processor that supports image inputs.
|
|
68
70
|
self.hf_tokenizer = original_tokenizer
|
|
69
71
|
|
|
72
|
+
# Set pad token if needed
|
|
73
|
+
if not hasattr(self.hf_tokenizer, 'pad_token') or self.hf_tokenizer.pad_token is None:
|
|
74
|
+
if hasattr(self.hf_tokenizer, 'eos_token'):
|
|
75
|
+
self.hf_tokenizer.pad_token = self.hf_tokenizer.eos_token
|
|
76
|
+
elif hasattr(self.hf_tokenizer, 'bos_token'):
|
|
77
|
+
self.hf_tokenizer.pad_token = self.hf_tokenizer.bos_token
|
|
78
|
+
|
|
79
|
+
# Set max length
|
|
80
|
+
if hasattr(self.hf_tokenizer, 'model_max_length'):
|
|
81
|
+
self.hf_tokenizer.model_max_length = self.config.get("max_seq_length", 2048)
|
|
82
|
+
|
|
70
83
|
# Add vision-specific LoRA adapters
|
|
71
84
|
self.model = FastVisionModel.get_peft_model(
|
|
72
85
|
self.model,
|
|
@@ -85,38 +98,62 @@ class TrainVisionModel:
|
|
|
85
98
|
print("DEBUG: Vision LoRA adapters added.")
|
|
86
99
|
|
|
87
100
|
def convert_sample(self, sample):
|
|
88
|
-
|
|
89
|
-
|
|
101
|
+
|
|
102
|
+
instruction = self.config.get(
|
|
103
|
+
"vision_instruction",
|
|
104
|
+
"You are an expert radiographer. Describe accurately what you see in this image."
|
|
105
|
+
)
|
|
90
106
|
conversation = [
|
|
91
|
-
{
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
107
|
+
{
|
|
108
|
+
"role": "user",
|
|
109
|
+
"content": [
|
|
110
|
+
{"type": "text", "text": instruction},
|
|
111
|
+
{"type": "image", "image": sample["image"]}
|
|
112
|
+
]
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
"role": "assistant",
|
|
116
|
+
"content": [
|
|
117
|
+
{"type": "text", "text": sample["caption"]}
|
|
118
|
+
]
|
|
119
|
+
},
|
|
98
120
|
]
|
|
121
|
+
|
|
99
122
|
return {"messages": conversation}
|
|
100
123
|
|
|
101
124
|
def load_datasets(self):
|
|
102
|
-
|
|
125
|
+
all_converted = []
|
|
103
126
|
for dataset_info in self.config["dataset"]:
|
|
104
|
-
print("
|
|
105
|
-
ds = load_dataset(
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
127
|
+
print("\nDEBUG: Loading vision dataset:", dataset_info)
|
|
128
|
+
ds = load_dataset(
|
|
129
|
+
dataset_info["name"],
|
|
130
|
+
split=dataset_info.get("split_type", "train")
|
|
131
|
+
)
|
|
132
|
+
print("DEBUG: Dataset size:", len(ds))
|
|
133
|
+
print("DEBUG: First raw sample:", ds[0])
|
|
134
|
+
print("DEBUG: Dataset features:", ds.features)
|
|
135
|
+
|
|
136
|
+
print("\nDEBUG: Converting dataset to vision conversation format...")
|
|
137
|
+
converted_ds = [self.convert_sample(sample) for sample in ds]
|
|
138
|
+
|
|
139
|
+
# Debug first converted sample
|
|
140
|
+
print("\nDEBUG: First converted sample structure:")
|
|
141
|
+
first = converted_ds[0]
|
|
142
|
+
print("DEBUG: Message keys:", first["messages"][0]["content"][1].keys())
|
|
143
|
+
print("DEBUG: Image type in converted:", type(first["messages"][0]["content"][1].get("image")))
|
|
144
|
+
|
|
145
|
+
all_converted.extend(converted_ds)
|
|
146
|
+
|
|
147
|
+
print("\nDEBUG: Combined vision dataset has", len(all_converted), "examples.")
|
|
148
|
+
return all_converted
|
|
112
149
|
|
|
113
150
|
def train_model(self):
|
|
114
151
|
print("DEBUG: Starting vision training...")
|
|
115
152
|
raw_dataset = self.load_datasets()
|
|
116
153
|
|
|
117
|
-
# Build training arguments using
|
|
118
|
-
|
|
119
|
-
per_device_train_batch_size=self.config.get("per_device_train_batch_size",
|
|
154
|
+
# Build training arguments using TrainingArguments
|
|
155
|
+
training_args = TrainingArguments(
|
|
156
|
+
per_device_train_batch_size=self.config.get("per_device_train_batch_size", 1),
|
|
120
157
|
gradient_accumulation_steps=self.config.get("gradient_accumulation_steps", 4),
|
|
121
158
|
warmup_steps=self.config.get("warmup_steps", 5),
|
|
122
159
|
max_steps=self.config.get("max_steps", 30),
|
|
@@ -131,10 +168,9 @@ class TrainVisionModel:
|
|
|
131
168
|
output_dir=self.config.get("output_dir", "outputs"),
|
|
132
169
|
report_to="none" if not os.getenv("PRAISON_WANDB") else "wandb",
|
|
133
170
|
remove_unused_columns=False,
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
max_seq_length=self.config.get("max_seq_length", 2048)
|
|
171
|
+
# Add memory optimization settings
|
|
172
|
+
gradient_checkpointing=True,
|
|
173
|
+
max_grad_norm=1.0,
|
|
138
174
|
)
|
|
139
175
|
|
|
140
176
|
trainer = SFTTrainer(
|
|
@@ -142,7 +178,11 @@ class TrainVisionModel:
|
|
|
142
178
|
tokenizer=self.hf_tokenizer,
|
|
143
179
|
data_collator=UnslothVisionDataCollator(self.model, self.hf_tokenizer),
|
|
144
180
|
train_dataset=raw_dataset,
|
|
145
|
-
args=
|
|
181
|
+
args=training_args,
|
|
182
|
+
max_seq_length=self.config.get("max_seq_length", 2048),
|
|
183
|
+
dataset_text_field="", # Required for vision training
|
|
184
|
+
dataset_kwargs={"skip_prepare_dataset": True}, # Required for vision training
|
|
185
|
+
packing=False # Explicitly set packing to False
|
|
146
186
|
)
|
|
147
187
|
print("DEBUG: Beginning vision trainer.train() ...")
|
|
148
188
|
trainer.train()
|
|
@@ -200,6 +240,32 @@ class TrainVisionModel:
|
|
|
200
240
|
quantization_method="q4_k_m"
|
|
201
241
|
)
|
|
202
242
|
|
|
243
|
+
def prepare_modelfile_content(self):
|
|
244
|
+
output_model = self.config["hf_model_name"]
|
|
245
|
+
|
|
246
|
+
template = '''{{- range $index, $_ := .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|>
|
|
247
|
+
|
|
248
|
+
{{ .Content }}
|
|
249
|
+
{{- if gt (len (slice $.Messages $index)) 1 }}<|eot_id|>
|
|
250
|
+
{{- else if ne .Role "assistant" }}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
|
251
|
+
|
|
252
|
+
{{ end }}
|
|
253
|
+
{{- end }}'''
|
|
254
|
+
|
|
255
|
+
return f"""FROM {output_model}
|
|
256
|
+
TEMPLATE {template}
|
|
257
|
+
PARAMETER temperature 0.6
|
|
258
|
+
PARAMETER top_p 0.9
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
def create_and_push_ollama_model(self):
|
|
262
|
+
modelfile_content = self.prepare_modelfile_content()
|
|
263
|
+
with open("Modelfile", "w") as file:
|
|
264
|
+
file.write(modelfile_content)
|
|
265
|
+
subprocess.run(["ollama", "serve"])
|
|
266
|
+
subprocess.run(["ollama", "create", f"{self.config['ollama_model']}:{self.config['model_parameters']}", "-f", "Modelfile"])
|
|
267
|
+
subprocess.run(["ollama", "push", f"{self.config['ollama_model']}:{self.config['model_parameters']}"])
|
|
268
|
+
|
|
203
269
|
def run(self):
|
|
204
270
|
self.print_system_info()
|
|
205
271
|
self.check_gpu()
|
|
@@ -207,6 +273,12 @@ class TrainVisionModel:
|
|
|
207
273
|
if self.config.get("train", "true").lower() == "true":
|
|
208
274
|
self.prepare_model()
|
|
209
275
|
self.train_model()
|
|
276
|
+
if self.config.get("huggingface_save", "true").lower() == "true":
|
|
277
|
+
self.save_model_merged()
|
|
278
|
+
if self.config.get("huggingface_save_gguf", "true").lower() == "true":
|
|
279
|
+
self.push_model_gguf()
|
|
280
|
+
if self.config.get("ollama_save", "true").lower() == "true":
|
|
281
|
+
self.create_and_push_ollama_model()
|
|
210
282
|
|
|
211
283
|
|
|
212
284
|
def main():
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
This script handles uploading trained vision models to Hugging Face and Ollama.
|
|
5
|
+
It reads configuration from config.yaml and provides options to upload in different formats.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import yaml
|
|
10
|
+
import torch
|
|
11
|
+
import shutil
|
|
12
|
+
import subprocess
|
|
13
|
+
from unsloth import FastVisionModel
|
|
14
|
+
|
|
15
|
+
class UploadVisionModel:
|
|
16
|
+
def __init__(self, config_path="config.yaml"):
|
|
17
|
+
self.load_config(config_path)
|
|
18
|
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
19
|
+
self.model = None
|
|
20
|
+
self.hf_tokenizer = None
|
|
21
|
+
|
|
22
|
+
def load_config(self, path):
|
|
23
|
+
"""Load configuration from yaml file."""
|
|
24
|
+
with open(path, "r") as file:
|
|
25
|
+
self.config = yaml.safe_load(file)
|
|
26
|
+
print("DEBUG: Loaded config:", self.config)
|
|
27
|
+
|
|
28
|
+
def prepare_model(self):
|
|
29
|
+
"""Load the trained model for uploading."""
|
|
30
|
+
print("DEBUG: Loading trained model and tokenizer...")
|
|
31
|
+
self.model, original_tokenizer = FastVisionModel.from_pretrained(
|
|
32
|
+
model_name=self.config.get("output_dir", "lora_model"),
|
|
33
|
+
load_in_4bit=self.config.get("load_in_4bit", True)
|
|
34
|
+
)
|
|
35
|
+
self.hf_tokenizer = original_tokenizer
|
|
36
|
+
print("DEBUG: Model and tokenizer loaded successfully.")
|
|
37
|
+
|
|
38
|
+
def save_model_merged(self):
|
|
39
|
+
"""Save merged model to Hugging Face Hub."""
|
|
40
|
+
print(f"DEBUG: Saving merged model to Hugging Face Hub: {self.config['hf_model_name']}")
|
|
41
|
+
if os.path.exists(self.config["hf_model_name"]):
|
|
42
|
+
shutil.rmtree(self.config["hf_model_name"])
|
|
43
|
+
self.model.push_to_hub_merged(
|
|
44
|
+
self.config["hf_model_name"],
|
|
45
|
+
self.hf_tokenizer,
|
|
46
|
+
save_method="merged_16bit",
|
|
47
|
+
token=os.getenv("HF_TOKEN")
|
|
48
|
+
)
|
|
49
|
+
print("DEBUG: Model saved to Hugging Face Hub successfully.")
|
|
50
|
+
|
|
51
|
+
def push_model_gguf(self):
|
|
52
|
+
"""Push model in GGUF format to Hugging Face Hub."""
|
|
53
|
+
print(f"DEBUG: Pushing GGUF model to Hugging Face Hub: {self.config['hf_model_name']}")
|
|
54
|
+
self.model.push_to_hub_gguf(
|
|
55
|
+
self.config["hf_model_name"],
|
|
56
|
+
self.hf_tokenizer,
|
|
57
|
+
quantization_method=self.config.get("quantization_method", "q4_k_m"),
|
|
58
|
+
token=os.getenv("HF_TOKEN")
|
|
59
|
+
)
|
|
60
|
+
print("DEBUG: GGUF model pushed to Hugging Face Hub successfully.")
|
|
61
|
+
|
|
62
|
+
def prepare_modelfile_content(self):
|
|
63
|
+
"""Prepare Ollama modelfile content using Llama 3.2 vision template."""
|
|
64
|
+
output_model = self.config["hf_model_name"]
|
|
65
|
+
|
|
66
|
+
# Using Llama 3.2 vision template format
|
|
67
|
+
template = """{{- range $index, $_ := .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|>
|
|
68
|
+
|
|
69
|
+
{{ .Content }}
|
|
70
|
+
{{- if gt (len (slice $.Messages $index)) 1 }}<|eot_id|>
|
|
71
|
+
{{- else if ne .Role "assistant" }}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
|
72
|
+
|
|
73
|
+
{{ end }}
|
|
74
|
+
{{- end }}"""
|
|
75
|
+
|
|
76
|
+
# Assemble the modelfile content with Llama 3.2 vision parameters
|
|
77
|
+
modelfile = f"FROM {output_model}\n"
|
|
78
|
+
modelfile += "TEMPLATE \"""" + template + "\"""\n"
|
|
79
|
+
modelfile += "PARAMETER temperature 0.6\n"
|
|
80
|
+
modelfile += "PARAMETER top_p 0.9\n"
|
|
81
|
+
return modelfile
|
|
82
|
+
|
|
83
|
+
def create_and_push_ollama_model(self):
|
|
84
|
+
"""Create and push model to Ollama."""
|
|
85
|
+
print(f"DEBUG: Creating Ollama model: {self.config['ollama_model']}:{self.config['model_parameters']}")
|
|
86
|
+
modelfile_content = self.prepare_modelfile_content()
|
|
87
|
+
with open("Modelfile", "w") as file:
|
|
88
|
+
file.write(modelfile_content)
|
|
89
|
+
|
|
90
|
+
print("DEBUG: Starting Ollama server...")
|
|
91
|
+
subprocess.run(["ollama", "serve"])
|
|
92
|
+
|
|
93
|
+
print("DEBUG: Creating Ollama model...")
|
|
94
|
+
subprocess.run([
|
|
95
|
+
"ollama", "create",
|
|
96
|
+
f"{self.config['ollama_model']}:{self.config['model_parameters']}",
|
|
97
|
+
"-f", "Modelfile"
|
|
98
|
+
])
|
|
99
|
+
|
|
100
|
+
print("DEBUG: Pushing model to Ollama...")
|
|
101
|
+
subprocess.run([
|
|
102
|
+
"ollama", "push",
|
|
103
|
+
f"{self.config['ollama_model']}:{self.config['model_parameters']}"
|
|
104
|
+
])
|
|
105
|
+
print("DEBUG: Model pushed to Ollama successfully.")
|
|
106
|
+
|
|
107
|
+
def upload(self, target="all"):
|
|
108
|
+
"""
|
|
109
|
+
Upload the model to specified targets.
|
|
110
|
+
Args:
|
|
111
|
+
target (str): One of 'all', 'huggingface', 'huggingface_gguf', or 'ollama'
|
|
112
|
+
"""
|
|
113
|
+
self.prepare_model()
|
|
114
|
+
|
|
115
|
+
if target in ["all", "huggingface"]:
|
|
116
|
+
self.save_model_merged()
|
|
117
|
+
|
|
118
|
+
if target in ["all", "huggingface_gguf"]:
|
|
119
|
+
self.push_model_gguf()
|
|
120
|
+
|
|
121
|
+
if target in ["all", "ollama"]:
|
|
122
|
+
self.create_and_push_ollama_model()
|
|
123
|
+
|
|
124
|
+
def main():
|
|
125
|
+
import argparse
|
|
126
|
+
parser = argparse.ArgumentParser(description="Upload Vision Model to Various Platforms")
|
|
127
|
+
parser.add_argument("--config", default="config.yaml", help="Path to configuration file")
|
|
128
|
+
parser.add_argument(
|
|
129
|
+
"--target",
|
|
130
|
+
choices=["all", "huggingface", "huggingface_gguf", "ollama"],
|
|
131
|
+
default="all",
|
|
132
|
+
help="Target platform to upload to"
|
|
133
|
+
)
|
|
134
|
+
args = parser.parse_args()
|
|
135
|
+
|
|
136
|
+
uploader = UploadVisionModel(config_path=args.config)
|
|
137
|
+
uploader.upload(target=args.target)
|
|
138
|
+
|
|
139
|
+
if __name__ == "__main__":
|
|
140
|
+
main()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.77
|
|
4
4
|
Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10,<3.13
|
|
@@ -61,7 +61,7 @@ Requires-Dist: playwright (>=1.47.0) ; extra == "code"
|
|
|
61
61
|
Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
|
|
62
62
|
Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "autogen"
|
|
63
63
|
Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "crewai"
|
|
64
|
-
Requires-Dist: praisonaiagents (>=0.0.
|
|
64
|
+
Requires-Dist: praisonaiagents (>=0.0.62)
|
|
65
65
|
Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
|
|
66
66
|
Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
|
|
67
67
|
Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
|
|
@@ -5,7 +5,7 @@ praisonai/api/call.py,sha256=krOfTCZM_bdbsNuWQ1PijzCHECkDvEi9jIvvZaDQUUU,11035
|
|
|
5
5
|
praisonai/auto.py,sha256=uLDm8CU3L_3amZsd55yzf9RdBF1uW-BGSx7nl9ctNZ4,8680
|
|
6
6
|
praisonai/chainlit_ui.py,sha256=bNR7s509lp0I9JlJNvwCZRUZosC64qdvlFCt8NmFamQ,12216
|
|
7
7
|
praisonai/cli.py,sha256=hxGPiX8-LZanu2jiwBXIkMPm8Kk0Tt3LwDclLkUt0iY,26051
|
|
8
|
-
praisonai/deploy.py,sha256=
|
|
8
|
+
praisonai/deploy.py,sha256=2gLKcli7gR-BHJSWTkWzYgmwl5VDbf4k5J02dziIAVM,6028
|
|
9
9
|
praisonai/inbuilt_tools/__init__.py,sha256=fai4ZJIKz7-iOnGZv5jJX0wmT77PKa4x2jqyaJddKFA,569
|
|
10
10
|
praisonai/inbuilt_tools/autogen_tools.py,sha256=kJdEv61BTYvdHOaURNEpBcWq8Rs-oC03loNFTIjT-ak,4687
|
|
11
11
|
praisonai/inc/__init__.py,sha256=sPDlYBBwdk0VlWzaaM_lG0_LD07lS2HRGvPdxXJFiYg,62
|
|
@@ -34,7 +34,7 @@ praisonai/setup/setup_conda_env.sh,sha256=_pVbrXStZua6vUJTbuGiZam-zWsDDLWP0ZaFuP
|
|
|
34
34
|
praisonai/setup.py,sha256=0jHgKnIPCtBZiGYaYyTz3PzrJI6nBy55VXk2UctXlDo,373
|
|
35
35
|
praisonai/test.py,sha256=OL-wesjA5JTohr8rtr6kWoaS4ImkJg2l0GXJ-dUUfRU,4090
|
|
36
36
|
praisonai/train.py,sha256=Cjb0TKU3esNrCk2OX24Qm1S1crRC00FdiGUYJLw3iPQ,24094
|
|
37
|
-
praisonai/train_vision.py,sha256=
|
|
37
|
+
praisonai/train_vision.py,sha256=OLDtr5u9rszWQ80LC5iFy37yPuYguES6AQybm_2RtM4,12514
|
|
38
38
|
praisonai/ui/README.md,sha256=QG9yucvBieVjCjWFzu6hL9xNtYllkoqyJ_q1b0YYAco,1124
|
|
39
39
|
praisonai/ui/agents.py,sha256=1qsWE2yCaQKhuc-1uLHdMfZJeOXzBtp4pe5q7bk2EuA,32813
|
|
40
40
|
praisonai/ui/callbacks.py,sha256=V4_-GjxmjDFmugUZGfQHKtNSysx7rT6i1UblbM_8lIM,1968
|
|
@@ -82,9 +82,10 @@ praisonai/ui/realtimeclient/realtimedocs.txt,sha256=hmgd8Uwy2SkjSndyyF_-ZOaNxiyH
|
|
|
82
82
|
praisonai/ui/realtimeclient/tools.py,sha256=IJOYwVOBW5Ocn5_iV9pFkmSKR3WU3YpX3kwF0I3jikQ,7855
|
|
83
83
|
praisonai/ui/sql_alchemy.py,sha256=oekZOXlRGMJ2SuC-lmgMMIzAmvbMg2DWeGTSpOzbVBM,29674
|
|
84
84
|
praisonai/ui/tools.md,sha256=Ad3YH_ZCLMWlz3mDXllQnQ_S5l55LWqLdcZSh-EXrHI,3956
|
|
85
|
+
praisonai/upload_vision.py,sha256=lMpFn993UiYVJxRNZQTmcbPbEajQ5TFKCNGK1Icn_hg,5253
|
|
85
86
|
praisonai/version.py,sha256=ugyuFliEqtAwQmH4sTlc16YXKYbFWDmfyk87fErB8-8,21
|
|
86
|
-
praisonai-2.0.
|
|
87
|
-
praisonai-2.0.
|
|
88
|
-
praisonai-2.0.
|
|
89
|
-
praisonai-2.0.
|
|
90
|
-
praisonai-2.0.
|
|
87
|
+
praisonai-2.0.77.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
|
|
88
|
+
praisonai-2.0.77.dist-info/METADATA,sha256=RZFBDDFCCji6tnnu4sBE-qX6rjtAOM6hS_YV5slwQjo,21942
|
|
89
|
+
praisonai-2.0.77.dist-info/WHEEL,sha256=IC-58YCXkB2pVoJBgAzESes-vAnjpjsOuhYWk8sUmtI,110
|
|
90
|
+
praisonai-2.0.77.dist-info/entry_points.txt,sha256=I_xc6a6MNTTfLxYmAxe0rgey0G-_hbY07oFW-ZDnkw4,135
|
|
91
|
+
praisonai-2.0.77.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|