PraisonAI 0.0.54__py3-none-any.whl → 0.0.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/deploy.py +1 -1
- praisonai/test.py +1 -1
- praisonai/ui/chat.py +5 -5
- praisonai/ui/code.py +5 -5
- praisonai/ui/context.py +66 -20
- {praisonai-0.0.54.dist-info → praisonai-0.0.56.dist-info}/METADATA +35 -58
- {praisonai-0.0.54.dist-info → praisonai-0.0.56.dist-info}/RECORD +10 -10
- {praisonai-0.0.54.dist-info → praisonai-0.0.56.dist-info}/LICENSE +0 -0
- {praisonai-0.0.54.dist-info → praisonai-0.0.56.dist-info}/WHEEL +0 -0
- {praisonai-0.0.54.dist-info → praisonai-0.0.56.dist-info}/entry_points.txt +0 -0
praisonai/deploy.py
CHANGED
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==0.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==0.0.56 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
praisonai/test.py
CHANGED
|
@@ -7,7 +7,7 @@ load_dotenv()
|
|
|
7
7
|
import autogen
|
|
8
8
|
config_list = [
|
|
9
9
|
{
|
|
10
|
-
'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-
|
|
10
|
+
'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini"),
|
|
11
11
|
'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
|
12
12
|
'api_key': os.environ.get("OPENAI_API_KEY")
|
|
13
13
|
}
|
praisonai/ui/chat.py
CHANGED
|
@@ -179,7 +179,7 @@ async def start():
|
|
|
179
179
|
cl.user_session.set("model_name", model_name)
|
|
180
180
|
else:
|
|
181
181
|
# If no setting found, use default or environment variable
|
|
182
|
-
model_name = os.getenv("MODEL_NAME", "gpt-
|
|
182
|
+
model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
183
183
|
cl.user_session.set("model_name", model_name)
|
|
184
184
|
logger.debug(f"Model name: {model_name}")
|
|
185
185
|
settings = cl.ChatSettings(
|
|
@@ -187,7 +187,7 @@ async def start():
|
|
|
187
187
|
TextInput(
|
|
188
188
|
id="model_name",
|
|
189
189
|
label="Enter the Model Name",
|
|
190
|
-
placeholder="e.g., gpt-
|
|
190
|
+
placeholder="e.g., gpt-4o-mini",
|
|
191
191
|
initial=model_name
|
|
192
192
|
)
|
|
193
193
|
]
|
|
@@ -221,7 +221,7 @@ async def setup_agent(settings):
|
|
|
221
221
|
|
|
222
222
|
@cl.on_message
|
|
223
223
|
async def main(message: cl.Message):
|
|
224
|
-
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-
|
|
224
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
225
225
|
message_history = cl.user_session.get("message_history", [])
|
|
226
226
|
message_history.append({"role": "user", "content": message.content})
|
|
227
227
|
|
|
@@ -268,14 +268,14 @@ async def send_count():
|
|
|
268
268
|
@cl.on_chat_resume
|
|
269
269
|
async def on_chat_resume(thread: cl_data.ThreadDict):
|
|
270
270
|
logger.info(f"Resuming chat: {thread['id']}")
|
|
271
|
-
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-
|
|
271
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
272
272
|
logger.debug(f"Model name: {model_name}")
|
|
273
273
|
settings = cl.ChatSettings(
|
|
274
274
|
[
|
|
275
275
|
TextInput(
|
|
276
276
|
id="model_name",
|
|
277
277
|
label="Enter the Model Name",
|
|
278
|
-
placeholder="e.g., gpt-
|
|
278
|
+
placeholder="e.g., gpt-4o-mini",
|
|
279
279
|
initial=model_name
|
|
280
280
|
)
|
|
281
281
|
]
|
praisonai/ui/code.py
CHANGED
|
@@ -180,7 +180,7 @@ async def start():
|
|
|
180
180
|
cl.user_session.set("model_name", model_name)
|
|
181
181
|
else:
|
|
182
182
|
# If no setting found, use default or environment variable
|
|
183
|
-
model_name = os.getenv("MODEL_NAME", "gpt-
|
|
183
|
+
model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
184
184
|
cl.user_session.set("model_name", model_name)
|
|
185
185
|
logger.debug(f"Model name: {model_name}")
|
|
186
186
|
settings = cl.ChatSettings(
|
|
@@ -188,7 +188,7 @@ async def start():
|
|
|
188
188
|
TextInput(
|
|
189
189
|
id="model_name",
|
|
190
190
|
label="Enter the Model Name",
|
|
191
|
-
placeholder="e.g., gpt-
|
|
191
|
+
placeholder="e.g., gpt-4o-mini",
|
|
192
192
|
initial=model_name
|
|
193
193
|
)
|
|
194
194
|
]
|
|
@@ -228,7 +228,7 @@ async def setup_agent(settings):
|
|
|
228
228
|
|
|
229
229
|
@cl.on_message
|
|
230
230
|
async def main(message: cl.Message):
|
|
231
|
-
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-
|
|
231
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
232
232
|
message_history = cl.user_session.get("message_history", [])
|
|
233
233
|
message_history.append({"role": "user", "content": message.content})
|
|
234
234
|
gatherer = ContextGatherer()
|
|
@@ -282,14 +282,14 @@ async def send_count():
|
|
|
282
282
|
@cl.on_chat_resume
|
|
283
283
|
async def on_chat_resume(thread: cl_data.ThreadDict):
|
|
284
284
|
logger.info(f"Resuming chat: {thread['id']}")
|
|
285
|
-
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-
|
|
285
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
286
286
|
logger.debug(f"Model name: {model_name}")
|
|
287
287
|
settings = cl.ChatSettings(
|
|
288
288
|
[
|
|
289
289
|
TextInput(
|
|
290
290
|
id="model_name",
|
|
291
291
|
label="Enter the Model Name",
|
|
292
|
-
placeholder="e.g., gpt-
|
|
292
|
+
placeholder="e.g., gpt-4o-mini",
|
|
293
293
|
initial=model_name
|
|
294
294
|
)
|
|
295
295
|
]
|
praisonai/ui/context.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import fnmatch
|
|
3
|
-
import re
|
|
4
3
|
import yaml
|
|
5
4
|
from pathlib import Path
|
|
6
5
|
import logging
|
|
@@ -34,10 +33,12 @@ class ContextGatherer:
|
|
|
34
33
|
self.max_file_size = max_file_size
|
|
35
34
|
self.max_tokens = int(os.getenv("PRAISONAI_MAX_TOKENS", max_tokens))
|
|
36
35
|
self.ignore_patterns = self.get_ignore_patterns()
|
|
36
|
+
self.include_paths = self.get_include_paths()
|
|
37
|
+
self.included_files = []
|
|
37
38
|
|
|
38
39
|
def get_ignore_patterns(self):
|
|
39
40
|
"""
|
|
40
|
-
Loads ignore patterns from various sources, prioritizing them in
|
|
41
|
+
Loads ignore patterns from various sources, prioritizing them in
|
|
41
42
|
the following order:
|
|
42
43
|
1. .praisonignore
|
|
43
44
|
2. settings.yaml (under code.ignore_files)
|
|
@@ -95,6 +96,19 @@ class ContextGatherer:
|
|
|
95
96
|
logger.debug(f"Final ignore patterns: {modified_ignore_patterns}")
|
|
96
97
|
return modified_ignore_patterns
|
|
97
98
|
|
|
99
|
+
def get_include_paths(self):
|
|
100
|
+
include_paths = []
|
|
101
|
+
|
|
102
|
+
# 1. Load from .praisoninclude
|
|
103
|
+
include_file = os.path.join(self.directory, '.praisoninclude')
|
|
104
|
+
if os.path.exists(include_file):
|
|
105
|
+
with open(include_file, 'r') as f:
|
|
106
|
+
include_paths.extend(
|
|
107
|
+
line.strip() for line in f
|
|
108
|
+
if line.strip() and not line.startswith('#')
|
|
109
|
+
)
|
|
110
|
+
return include_paths
|
|
111
|
+
|
|
98
112
|
def should_ignore(self, file_path):
|
|
99
113
|
"""
|
|
100
114
|
Check if a file or directory should be ignored based on patterns.
|
|
@@ -116,31 +130,65 @@ class ContextGatherer:
|
|
|
116
130
|
any(file_path.endswith(ext) for ext in self.relevant_extensions)
|
|
117
131
|
|
|
118
132
|
def gather_context(self):
|
|
119
|
-
"""Gather context from relevant files, respecting ignore patterns."""
|
|
133
|
+
"""Gather context from relevant files, respecting ignore patterns and include paths."""
|
|
120
134
|
context = []
|
|
121
135
|
total_files = 0
|
|
122
136
|
processed_files = 0
|
|
123
137
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
138
|
+
if not self.include_paths:
|
|
139
|
+
# No include paths specified, process the entire directory
|
|
140
|
+
for root, dirs, files in os.walk(self.directory):
|
|
141
|
+
total_files += len(files)
|
|
142
|
+
dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
|
|
143
|
+
for file in files:
|
|
144
|
+
file_path = os.path.join(root, file)
|
|
145
|
+
if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
|
|
146
|
+
try:
|
|
147
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
148
|
+
content = f.read()
|
|
149
|
+
context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
|
|
150
|
+
self.included_files.append(Path(file_path).relative_to(self.directory))
|
|
151
|
+
except Exception as e:
|
|
152
|
+
logger.error(f"Error reading {file_path}: {e}")
|
|
153
|
+
processed_files += 1
|
|
154
|
+
print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
|
|
155
|
+
else:
|
|
156
|
+
# Process specified include paths
|
|
157
|
+
for include_path in self.include_paths:
|
|
158
|
+
full_path = os.path.join(self.directory, include_path)
|
|
159
|
+
if os.path.isdir(full_path):
|
|
160
|
+
for root, dirs, files in os.walk(full_path):
|
|
161
|
+
total_files += len(files)
|
|
162
|
+
dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
|
|
163
|
+
for file in files:
|
|
164
|
+
file_path = os.path.join(root, file)
|
|
165
|
+
if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
|
|
166
|
+
try:
|
|
167
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
168
|
+
content = f.read()
|
|
169
|
+
context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
|
|
170
|
+
self.included_files.append(Path(file_path).relative_to(self.directory))
|
|
171
|
+
except Exception as e:
|
|
172
|
+
logger.error(f"Error reading {file_path}: {e}")
|
|
173
|
+
processed_files += 1
|
|
174
|
+
print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
|
|
175
|
+
elif os.path.isfile(full_path) and self.is_relevant_file(full_path):
|
|
130
176
|
try:
|
|
131
|
-
with open(
|
|
177
|
+
with open(full_path, 'r', encoding='utf-8') as f:
|
|
132
178
|
content = f.read()
|
|
133
|
-
context.append(f"File: {
|
|
179
|
+
context.append(f"File: {full_path}\n\n{content}\n\n{'='*50}\n")
|
|
180
|
+
self.included_files.append(Path(full_path).relative_to(self.directory))
|
|
134
181
|
except Exception as e:
|
|
135
|
-
logger.error(f"Error reading {
|
|
136
|
-
|
|
137
|
-
|
|
182
|
+
logger.error(f"Error reading {full_path}: {e}")
|
|
183
|
+
processed_files += 1
|
|
184
|
+
print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
|
|
185
|
+
|
|
138
186
|
print() # New line after progress indicator
|
|
139
187
|
return '\n'.join(context)
|
|
140
188
|
|
|
141
189
|
def count_tokens(self, text):
|
|
142
190
|
"""Count tokens using a simple whitespace-based tokenizer."""
|
|
143
|
-
return len(text.split())
|
|
191
|
+
return len(text.split())
|
|
144
192
|
|
|
145
193
|
def truncate_context(self, context):
|
|
146
194
|
"""Truncate context to stay within the token limit."""
|
|
@@ -165,12 +213,9 @@ class ContextGatherer:
|
|
|
165
213
|
contents = sorted(path.iterdir())
|
|
166
214
|
pointers = [('└── ' if i == len(contents) - 1 else '├── ') for i in range(len(contents))]
|
|
167
215
|
for pointer, item in zip(pointers, contents):
|
|
168
|
-
# Use should_ignore for consistency
|
|
169
|
-
if self.should_ignore(item):
|
|
170
|
-
continue
|
|
171
|
-
|
|
172
216
|
rel_path = item.relative_to(start_dir)
|
|
173
|
-
|
|
217
|
+
if rel_path in self.included_files:
|
|
218
|
+
tree.append(f"{prefix}{pointer}{rel_path}")
|
|
174
219
|
|
|
175
220
|
if item.is_dir():
|
|
176
221
|
add_to_tree(item, prefix + (' ' if pointer == '└── ' else '│ '))
|
|
@@ -193,6 +238,7 @@ class ContextGatherer:
|
|
|
193
238
|
def main():
|
|
194
239
|
gatherer = ContextGatherer()
|
|
195
240
|
context, token_count, context_tree = gatherer.run()
|
|
241
|
+
print(context_tree)
|
|
196
242
|
print(f"\nThe context contains approximately {token_count} tokens.")
|
|
197
243
|
print("First 500 characters of context:")
|
|
198
244
|
print(context[:500] + "...")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.56
|
|
4
4
|
Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10,<3.13
|
|
@@ -69,14 +69,38 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
69
69
|
</picture>
|
|
70
70
|
</div>
|
|
71
71
|
|
|
72
|
-
##
|
|
72
|
+
## Different User Interfaces:
|
|
73
|
+
|
|
74
|
+
| Interface | Description | URL |
|
|
75
|
+
|---|---|---|
|
|
76
|
+
| **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
|
|
77
|
+
| **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
|
|
78
|
+
| **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
|
|
79
|
+
|
|
80
|
+
## Google Colab Multi Agents
|
|
73
81
|
|
|
74
82
|
| | Cookbook | Open in Colab |
|
|
75
83
|
| ------------- | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
76
84
|
| Basic | PraisonAI | <a target="_blank" href="https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/praisonai-googlecolab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> |
|
|
77
85
|
| Include Tools | PraisonAI Tools | <a target="_blank" href="https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/praisonai-tools-googlecolab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> |
|
|
78
86
|
|
|
79
|
-
##
|
|
87
|
+
## Install
|
|
88
|
+
|
|
89
|
+
| PraisonAI | PraisonAI Code | PraisonAI Chat |
|
|
90
|
+
| --- | --- | --- |
|
|
91
|
+
| `pip install praisonai` | `pip install "praisonai[code]"` | `pip install "praisonai[chat]"` |
|
|
92
|
+
|
|
93
|
+
## Key Features
|
|
94
|
+
|
|
95
|
+
- Automated AI Agents Creation
|
|
96
|
+
- Use CrewAI or AutoGen Framework
|
|
97
|
+
- 100+ LLM Support
|
|
98
|
+
- Chat with ENTIRE Codebase
|
|
99
|
+
- Interactive UIs
|
|
100
|
+
- YAML-based Configuration
|
|
101
|
+
- Custom Tool Integration
|
|
102
|
+
|
|
103
|
+
## TL;DR Multi Agents
|
|
80
104
|
|
|
81
105
|
```bash
|
|
82
106
|
pip install praisonai
|
|
@@ -85,14 +109,6 @@ praisonai --init create a movie script about dog in moon
|
|
|
85
109
|
praisonai
|
|
86
110
|
```
|
|
87
111
|
|
|
88
|
-
## Different User Interfaces:
|
|
89
|
-
|
|
90
|
-
| Interface | Description | URL |
|
|
91
|
-
|---|---|---|
|
|
92
|
-
| **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
|
|
93
|
-
| **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
|
|
94
|
-
| **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
|
|
95
|
-
|
|
96
112
|
## Table of Contents
|
|
97
113
|
|
|
98
114
|
- [Installation](#installation)
|
|
@@ -109,7 +125,7 @@ praisonai
|
|
|
109
125
|
- [Contributing](#contributing)
|
|
110
126
|
- [Star History](#star-history)
|
|
111
127
|
|
|
112
|
-
## Installation
|
|
128
|
+
## Installation Multi Agents
|
|
113
129
|
|
|
114
130
|
```bash
|
|
115
131
|
pip install praisonai
|
|
@@ -195,56 +211,17 @@ export OPENAI_API_KEY="Enter your API key"
|
|
|
195
211
|
praisonai chat
|
|
196
212
|
```
|
|
197
213
|
|
|
198
|
-
##
|
|
199
|
-
|
|
200
|
-
- https://docs.praison.ai/tools/custom/
|
|
201
|
-
|
|
202
|
-
### Step 1: Pre-requisite to Create a Custom Tool
|
|
203
|
-
|
|
204
|
-
`agents.yaml` file should be present in the current directory.
|
|
214
|
+
## Praison AI Code
|
|
205
215
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
Create a file called tools.py in the same directory as the agents.yaml file.
|
|
211
|
-
|
|
212
|
-
```python
|
|
213
|
-
# example tools.py
|
|
214
|
-
from duckduckgo_search import DDGS
|
|
215
|
-
from praisonai_tools import BaseTool
|
|
216
|
-
|
|
217
|
-
class InternetSearchTool(BaseTool):
|
|
218
|
-
name: str = "InternetSearchTool"
|
|
219
|
-
description: str = "Search Internet for relevant information based on a query or latest news"
|
|
220
|
-
|
|
221
|
-
def _run(self, query: str):
|
|
222
|
-
ddgs = DDGS()
|
|
223
|
-
results = ddgs.text(keywords=query, region='wt-wt', safesearch='moderate', max_results=5)
|
|
224
|
-
return results
|
|
216
|
+
```bash
|
|
217
|
+
pip install "praisonai[code]"
|
|
218
|
+
export OPENAI_API_KEY="Enter your API key"
|
|
219
|
+
praisonai code
|
|
225
220
|
```
|
|
226
221
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
Add the tool to the agents.yaml file as show below under the tools section `- InternetSearchTool`.
|
|
222
|
+
## Create Custom Tools
|
|
230
223
|
|
|
231
|
-
|
|
232
|
-
framework: crewai
|
|
233
|
-
topic: research about the latest AI News and prepare a detailed report
|
|
234
|
-
roles:
|
|
235
|
-
research_analyst:
|
|
236
|
-
backstory: Experienced in gathering and analyzing data related to AI news trends.
|
|
237
|
-
goal: Analyze AI News trends
|
|
238
|
-
role: Research Analyst
|
|
239
|
-
tasks:
|
|
240
|
-
gather_data:
|
|
241
|
-
description:
|
|
242
|
-
Conduct in-depth research on the latest AI News trends from reputable
|
|
243
|
-
sources.
|
|
244
|
-
expected_output: Comprehensive report on current AI News trends.
|
|
245
|
-
tools:
|
|
246
|
-
- InternetSearchTool
|
|
247
|
-
```
|
|
224
|
+
- https://docs.praison.ai/tools/custom/
|
|
248
225
|
|
|
249
226
|
## Agents Playbook
|
|
250
227
|
|
|
@@ -4,7 +4,7 @@ praisonai/agents_generator.py,sha256=8d1WRbubvEkBrW1HZ7_xnGyqgJi0yxmXa3MgTIqef1c
|
|
|
4
4
|
praisonai/auto.py,sha256=9spTXqj47Hmmqv5QHRYE_RzSVHH_KoPbaZjskUj2UcE,7895
|
|
5
5
|
praisonai/chainlit_ui.py,sha256=bNR7s509lp0I9JlJNvwCZRUZosC64qdvlFCt8NmFamQ,12216
|
|
6
6
|
praisonai/cli.py,sha256=VaVEJlc8c_aE2SBY6xN7WIbHrqNcXGR2xrDzFAsD2B8,14504
|
|
7
|
-
praisonai/deploy.py,sha256=
|
|
7
|
+
praisonai/deploy.py,sha256=BPJfWN_JL_pixuOd_upB24JN9_uMl8EMLz1hcn-k6eM,6028
|
|
8
8
|
praisonai/inbuilt_tools/__init__.py,sha256=mUKnbL6Gram9c9f2m8wJwEzURBLmPEOcHzwySBH89YA,74
|
|
9
9
|
praisonai/inbuilt_tools/autogen_tools.py,sha256=svYkM2N7DVFvbiwgoAS7U_MqTOD8rHf8VD3BaFUV5_Y,14907
|
|
10
10
|
praisonai/inc/__init__.py,sha256=sPDlYBBwdk0VlWzaaM_lG0_LD07lS2HRGvPdxXJFiYg,62
|
|
@@ -21,10 +21,10 @@ praisonai/public/logo_dark.png,sha256=frHz1zkrnivGssJgk9iy1cabojkVgm8B4MllFwL_Cn
|
|
|
21
21
|
praisonai/public/logo_light.png,sha256=8cQRti_Ysa30O3_7C3ku2w40LnVUUlUok47H-3ZZHSU,19656
|
|
22
22
|
praisonai/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk,1289
|
|
23
23
|
praisonai/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
|
|
24
|
-
praisonai/test.py,sha256=
|
|
25
|
-
praisonai/ui/chat.py,sha256=
|
|
26
|
-
praisonai/ui/code.py,sha256=
|
|
27
|
-
praisonai/ui/context.py,sha256=
|
|
24
|
+
praisonai/test.py,sha256=OL-wesjA5JTohr8rtr6kWoaS4ImkJg2l0GXJ-dUUfRU,4090
|
|
25
|
+
praisonai/ui/chat.py,sha256=WTi8XvRszZFvQr6Fgo8kYeCKCHVjYZQoQCTgQU5mHSc,9228
|
|
26
|
+
praisonai/ui/code.py,sha256=Duuj4YGacbfsW_3GCxNEhDS-_oFEpL3He0g2cHbYBM4,10002
|
|
27
|
+
praisonai/ui/context.py,sha256=xLVyRa8UDy1HJyMa7RSFz0Lkq4qQ-E4pPLfgzP51_k8,11281
|
|
28
28
|
praisonai/ui/public/fantasy.svg,sha256=4Gs3kIOux-pjGtw6ogI_rv5_viVJxnE5gRwGilsSg0o,1553
|
|
29
29
|
praisonai/ui/public/game.svg,sha256=y2QMaA01m8XzuDjTOBWzupOC3-TpnUl9ah89mIhviUw,2406
|
|
30
30
|
praisonai/ui/public/logo_dark.png,sha256=frHz1zkrnivGssJgk9iy1cabojkVgm8B4MllFwL_CnI,17050
|
|
@@ -33,8 +33,8 @@ praisonai/ui/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk
|
|
|
33
33
|
praisonai/ui/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
|
|
34
34
|
praisonai/ui/sql_alchemy.py,sha256=HsyeRq-G9qbQobHWpTJHHKQiT4FvYw_7iuv-2PNh0IU,27419
|
|
35
35
|
praisonai/version.py,sha256=ugyuFliEqtAwQmH4sTlc16YXKYbFWDmfyk87fErB8-8,21
|
|
36
|
-
praisonai-0.0.
|
|
37
|
-
praisonai-0.0.
|
|
38
|
-
praisonai-0.0.
|
|
39
|
-
praisonai-0.0.
|
|
40
|
-
praisonai-0.0.
|
|
36
|
+
praisonai-0.0.56.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
|
|
37
|
+
praisonai-0.0.56.dist-info/METADATA,sha256=n2YdXlMbHvGy8ZLzC0WbZQaW4tpfYI12ZGAazHHDlT8,11126
|
|
38
|
+
praisonai-0.0.56.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
39
|
+
praisonai-0.0.56.dist-info/entry_points.txt,sha256=Qg41eW3A1-dvdV5tF7LqChfYof8Rihk2rN1fiEE3vnk,53
|
|
40
|
+
praisonai-0.0.56.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|