PraisonAI 0.0.54__tar.gz → 0.0.56__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (39) hide show
  1. {praisonai-0.0.54 → praisonai-0.0.56}/PKG-INFO +35 -58
  2. {praisonai-0.0.54 → praisonai-0.0.56}/README.md +34 -57
  3. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/deploy.py +1 -1
  4. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/test.py +1 -1
  5. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/chat.py +5 -5
  6. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/code.py +5 -5
  7. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/context.py +66 -20
  8. {praisonai-0.0.54 → praisonai-0.0.56}/pyproject.toml +1 -1
  9. {praisonai-0.0.54 → praisonai-0.0.56}/LICENSE +0 -0
  10. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/__init__.py +0 -0
  11. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/__main__.py +0 -0
  12. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/agents_generator.py +0 -0
  13. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/auto.py +0 -0
  14. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/chainlit_ui.py +0 -0
  15. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/cli.py +0 -0
  16. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/inbuilt_tools/__init__.py +0 -0
  17. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  18. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/inc/__init__.py +0 -0
  19. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/inc/models.py +0 -0
  20. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/android-chrome-192x192.png +0 -0
  21. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/android-chrome-512x512.png +0 -0
  22. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/apple-touch-icon.png +0 -0
  23. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/fantasy.svg +0 -0
  24. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/favicon-16x16.png +0 -0
  25. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/favicon-32x32.png +0 -0
  26. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/favicon.ico +0 -0
  27. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/game.svg +0 -0
  28. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/logo_dark.png +0 -0
  29. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/logo_light.png +0 -0
  30. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/movie.svg +0 -0
  31. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/public/thriller.svg +0 -0
  32. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/public/fantasy.svg +0 -0
  33. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/public/game.svg +0 -0
  34. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/public/logo_dark.png +0 -0
  35. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/public/logo_light.png +0 -0
  36. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/public/movie.svg +0 -0
  37. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/public/thriller.svg +0 -0
  38. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/ui/sql_alchemy.py +0 -0
  39. {praisonai-0.0.54 → praisonai-0.0.56}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.54
3
+ Version: 0.0.56
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -69,14 +69,38 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
69
69
  </picture>
70
70
  </div>
71
71
 
72
- ## Google Colab
72
+ ## Different User Interfaces:
73
+
74
+ | Interface | Description | URL |
75
+ |---|---|---|
76
+ | **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
77
+ | **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
78
+ | **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
79
+
80
+ ## Google Colab Multi Agents
73
81
 
74
82
  | | Cookbook | Open in Colab |
75
83
  | ------------- | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
76
84
  | Basic | PraisonAI | <a target="_blank" href="https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/praisonai-googlecolab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> |
77
85
  | Include Tools | PraisonAI Tools | <a target="_blank" href="https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/praisonai-tools-googlecolab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> |
78
86
 
79
- ## TL;DR
87
+ ## Install
88
+
89
+ | PraisonAI | PraisonAI Code | PraisonAI Chat |
90
+ | --- | --- | --- |
91
+ | `pip install praisonai` | `pip install "praisonai[code]"` | `pip install "praisonai[chat]"` |
92
+
93
+ ## Key Features
94
+
95
+ - Automated AI Agents Creation
96
+ - Use CrewAI or AutoGen Framework
97
+ - 100+ LLM Support
98
+ - Chat with ENTIRE Codebase
99
+ - Interactive UIs
100
+ - YAML-based Configuration
101
+ - Custom Tool Integration
102
+
103
+ ## TL;DR Multi Agents
80
104
 
81
105
  ```bash
82
106
  pip install praisonai
@@ -85,14 +109,6 @@ praisonai --init create a movie script about dog in moon
85
109
  praisonai
86
110
  ```
87
111
 
88
- ## Different User Interfaces:
89
-
90
- | Interface | Description | URL |
91
- |---|---|---|
92
- | **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
93
- | **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
94
- | **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
95
-
96
112
  ## Table of Contents
97
113
 
98
114
  - [Installation](#installation)
@@ -109,7 +125,7 @@ praisonai
109
125
  - [Contributing](#contributing)
110
126
  - [Star History](#star-history)
111
127
 
112
- ## Installation
128
+ ## Installation Multi Agents
113
129
 
114
130
  ```bash
115
131
  pip install praisonai
@@ -195,56 +211,17 @@ export OPENAI_API_KEY="Enter your API key"
195
211
  praisonai chat
196
212
  ```
197
213
 
198
- ## Create Custom Tools
199
-
200
- - https://docs.praison.ai/tools/custom/
201
-
202
- ### Step 1: Pre-requisite to Create a Custom Tool
203
-
204
- `agents.yaml` file should be present in the current directory.
214
+ ## Praison AI Code
205
215
 
206
- If it doesn't exist, create it by running the command `praisonai --init research about the latest AI News and prepare a detailed report`.
207
-
208
- ### Step 2: to Create a Custom Tool
209
-
210
- Create a file called tools.py in the same directory as the agents.yaml file.
211
-
212
- ```python
213
- # example tools.py
214
- from duckduckgo_search import DDGS
215
- from praisonai_tools import BaseTool
216
-
217
- class InternetSearchTool(BaseTool):
218
- name: str = "InternetSearchTool"
219
- description: str = "Search Internet for relevant information based on a query or latest news"
220
-
221
- def _run(self, query: str):
222
- ddgs = DDGS()
223
- results = ddgs.text(keywords=query, region='wt-wt', safesearch='moderate', max_results=5)
224
- return results
216
+ ```bash
217
+ pip install "praisonai[code]"
218
+ export OPENAI_API_KEY="Enter your API key"
219
+ praisonai code
225
220
  ```
226
221
 
227
- ### Step 3: to Create a Custom Tool
228
-
229
- Add the tool to the agents.yaml file as show below under the tools section `- InternetSearchTool`.
222
+ ## Create Custom Tools
230
223
 
231
- ```yaml
232
- framework: crewai
233
- topic: research about the latest AI News and prepare a detailed report
234
- roles:
235
- research_analyst:
236
- backstory: Experienced in gathering and analyzing data related to AI news trends.
237
- goal: Analyze AI News trends
238
- role: Research Analyst
239
- tasks:
240
- gather_data:
241
- description:
242
- Conduct in-depth research on the latest AI News trends from reputable
243
- sources.
244
- expected_output: Comprehensive report on current AI News trends.
245
- tools:
246
- - InternetSearchTool
247
- ```
224
+ - https://docs.praison.ai/tools/custom/
248
225
 
249
226
  ## Agents Playbook
250
227
 
@@ -28,14 +28,38 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
28
28
  </picture>
29
29
  </div>
30
30
 
31
- ## Google Colab
31
+ ## Different User Interfaces:
32
+
33
+ | Interface | Description | URL |
34
+ |---|---|---|
35
+ | **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
36
+ | **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
37
+ | **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
38
+
39
+ ## Google Colab Multi Agents
32
40
 
33
41
  | | Cookbook | Open in Colab |
34
42
  | ------------- | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
35
43
  | Basic | PraisonAI | <a target="_blank" href="https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/praisonai-googlecolab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> |
36
44
  | Include Tools | PraisonAI Tools | <a target="_blank" href="https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/praisonai-tools-googlecolab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> |
37
45
 
38
- ## TL;DR
46
+ ## Install
47
+
48
+ | PraisonAI | PraisonAI Code | PraisonAI Chat |
49
+ | --- | --- | --- |
50
+ | `pip install praisonai` | `pip install "praisonai[code]"` | `pip install "praisonai[chat]"` |
51
+
52
+ ## Key Features
53
+
54
+ - Automated AI Agents Creation
55
+ - Use CrewAI or AutoGen Framework
56
+ - 100+ LLM Support
57
+ - Chat with ENTIRE Codebase
58
+ - Interactive UIs
59
+ - YAML-based Configuration
60
+ - Custom Tool Integration
61
+
62
+ ## TL;DR Multi Agents
39
63
 
40
64
  ```bash
41
65
  pip install praisonai
@@ -44,14 +68,6 @@ praisonai --init create a movie script about dog in moon
44
68
  praisonai
45
69
  ```
46
70
 
47
- ## Different User Interfaces:
48
-
49
- | Interface | Description | URL |
50
- |---|---|---|
51
- | **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
52
- | **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
53
- | **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
54
-
55
71
  ## Table of Contents
56
72
 
57
73
  - [Installation](#installation)
@@ -68,7 +84,7 @@ praisonai
68
84
  - [Contributing](#contributing)
69
85
  - [Star History](#star-history)
70
86
 
71
- ## Installation
87
+ ## Installation Multi Agents
72
88
 
73
89
  ```bash
74
90
  pip install praisonai
@@ -154,56 +170,17 @@ export OPENAI_API_KEY="Enter your API key"
154
170
  praisonai chat
155
171
  ```
156
172
 
157
- ## Create Custom Tools
158
-
159
- - https://docs.praison.ai/tools/custom/
160
-
161
- ### Step 1: Pre-requisite to Create a Custom Tool
162
-
163
- `agents.yaml` file should be present in the current directory.
173
+ ## Praison AI Code
164
174
 
165
- If it doesn't exist, create it by running the command `praisonai --init research about the latest AI News and prepare a detailed report`.
166
-
167
- ### Step 2: to Create a Custom Tool
168
-
169
- Create a file called tools.py in the same directory as the agents.yaml file.
170
-
171
- ```python
172
- # example tools.py
173
- from duckduckgo_search import DDGS
174
- from praisonai_tools import BaseTool
175
-
176
- class InternetSearchTool(BaseTool):
177
- name: str = "InternetSearchTool"
178
- description: str = "Search Internet for relevant information based on a query or latest news"
179
-
180
- def _run(self, query: str):
181
- ddgs = DDGS()
182
- results = ddgs.text(keywords=query, region='wt-wt', safesearch='moderate', max_results=5)
183
- return results
175
+ ```bash
176
+ pip install "praisonai[code]"
177
+ export OPENAI_API_KEY="Enter your API key"
178
+ praisonai code
184
179
  ```
185
180
 
186
- ### Step 3: to Create a Custom Tool
187
-
188
- Add the tool to the agents.yaml file as show below under the tools section `- InternetSearchTool`.
181
+ ## Create Custom Tools
189
182
 
190
- ```yaml
191
- framework: crewai
192
- topic: research about the latest AI News and prepare a detailed report
193
- roles:
194
- research_analyst:
195
- backstory: Experienced in gathering and analyzing data related to AI news trends.
196
- goal: Analyze AI News trends
197
- role: Research Analyst
198
- tasks:
199
- gather_data:
200
- description:
201
- Conduct in-depth research on the latest AI News trends from reputable
202
- sources.
203
- expected_output: Comprehensive report on current AI News trends.
204
- tools:
205
- - InternetSearchTool
206
- ```
183
+ - https://docs.praison.ai/tools/custom/
207
184
 
208
185
  ## Agents Playbook
209
186
 
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.54 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.56 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -7,7 +7,7 @@ load_dotenv()
7
7
  import autogen
8
8
  config_list = [
9
9
  {
10
- 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-3.5-turbo"),
10
+ 'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini"),
11
11
  'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
12
12
  'api_key': os.environ.get("OPENAI_API_KEY")
13
13
  }
@@ -179,7 +179,7 @@ async def start():
179
179
  cl.user_session.set("model_name", model_name)
180
180
  else:
181
181
  # If no setting found, use default or environment variable
182
- model_name = os.getenv("MODEL_NAME", "gpt-3.5-turbo")
182
+ model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
183
183
  cl.user_session.set("model_name", model_name)
184
184
  logger.debug(f"Model name: {model_name}")
185
185
  settings = cl.ChatSettings(
@@ -187,7 +187,7 @@ async def start():
187
187
  TextInput(
188
188
  id="model_name",
189
189
  label="Enter the Model Name",
190
- placeholder="e.g., gpt-3.5-turbo",
190
+ placeholder="e.g., gpt-4o-mini",
191
191
  initial=model_name
192
192
  )
193
193
  ]
@@ -221,7 +221,7 @@ async def setup_agent(settings):
221
221
 
222
222
  @cl.on_message
223
223
  async def main(message: cl.Message):
224
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-3.5-turbo"
224
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
225
225
  message_history = cl.user_session.get("message_history", [])
226
226
  message_history.append({"role": "user", "content": message.content})
227
227
 
@@ -268,14 +268,14 @@ async def send_count():
268
268
  @cl.on_chat_resume
269
269
  async def on_chat_resume(thread: cl_data.ThreadDict):
270
270
  logger.info(f"Resuming chat: {thread['id']}")
271
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-3.5-turbo"
271
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
272
272
  logger.debug(f"Model name: {model_name}")
273
273
  settings = cl.ChatSettings(
274
274
  [
275
275
  TextInput(
276
276
  id="model_name",
277
277
  label="Enter the Model Name",
278
- placeholder="e.g., gpt-3.5-turbo",
278
+ placeholder="e.g., gpt-4o-mini",
279
279
  initial=model_name
280
280
  )
281
281
  ]
@@ -180,7 +180,7 @@ async def start():
180
180
  cl.user_session.set("model_name", model_name)
181
181
  else:
182
182
  # If no setting found, use default or environment variable
183
- model_name = os.getenv("MODEL_NAME", "gpt-3.5-turbo")
183
+ model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
184
184
  cl.user_session.set("model_name", model_name)
185
185
  logger.debug(f"Model name: {model_name}")
186
186
  settings = cl.ChatSettings(
@@ -188,7 +188,7 @@ async def start():
188
188
  TextInput(
189
189
  id="model_name",
190
190
  label="Enter the Model Name",
191
- placeholder="e.g., gpt-3.5-turbo",
191
+ placeholder="e.g., gpt-4o-mini",
192
192
  initial=model_name
193
193
  )
194
194
  ]
@@ -228,7 +228,7 @@ async def setup_agent(settings):
228
228
 
229
229
  @cl.on_message
230
230
  async def main(message: cl.Message):
231
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-3.5-turbo"
231
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
232
232
  message_history = cl.user_session.get("message_history", [])
233
233
  message_history.append({"role": "user", "content": message.content})
234
234
  gatherer = ContextGatherer()
@@ -282,14 +282,14 @@ async def send_count():
282
282
  @cl.on_chat_resume
283
283
  async def on_chat_resume(thread: cl_data.ThreadDict):
284
284
  logger.info(f"Resuming chat: {thread['id']}")
285
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-3.5-turbo"
285
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
286
286
  logger.debug(f"Model name: {model_name}")
287
287
  settings = cl.ChatSettings(
288
288
  [
289
289
  TextInput(
290
290
  id="model_name",
291
291
  label="Enter the Model Name",
292
- placeholder="e.g., gpt-3.5-turbo",
292
+ placeholder="e.g., gpt-4o-mini",
293
293
  initial=model_name
294
294
  )
295
295
  ]
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import fnmatch
3
- import re
4
3
  import yaml
5
4
  from pathlib import Path
6
5
  import logging
@@ -34,10 +33,12 @@ class ContextGatherer:
34
33
  self.max_file_size = max_file_size
35
34
  self.max_tokens = int(os.getenv("PRAISONAI_MAX_TOKENS", max_tokens))
36
35
  self.ignore_patterns = self.get_ignore_patterns()
36
+ self.include_paths = self.get_include_paths()
37
+ self.included_files = []
37
38
 
38
39
  def get_ignore_patterns(self):
39
40
  """
40
- Loads ignore patterns from various sources, prioritizing them in
41
+ Loads ignore patterns from various sources, prioritizing them in
41
42
  the following order:
42
43
  1. .praisonignore
43
44
  2. settings.yaml (under code.ignore_files)
@@ -95,6 +96,19 @@ class ContextGatherer:
95
96
  logger.debug(f"Final ignore patterns: {modified_ignore_patterns}")
96
97
  return modified_ignore_patterns
97
98
 
99
+ def get_include_paths(self):
100
+ include_paths = []
101
+
102
+ # 1. Load from .praisoninclude
103
+ include_file = os.path.join(self.directory, '.praisoninclude')
104
+ if os.path.exists(include_file):
105
+ with open(include_file, 'r') as f:
106
+ include_paths.extend(
107
+ line.strip() for line in f
108
+ if line.strip() and not line.startswith('#')
109
+ )
110
+ return include_paths
111
+
98
112
  def should_ignore(self, file_path):
99
113
  """
100
114
  Check if a file or directory should be ignored based on patterns.
@@ -116,31 +130,65 @@ class ContextGatherer:
116
130
  any(file_path.endswith(ext) for ext in self.relevant_extensions)
117
131
 
118
132
  def gather_context(self):
119
- """Gather context from relevant files, respecting ignore patterns."""
133
+ """Gather context from relevant files, respecting ignore patterns and include paths."""
120
134
  context = []
121
135
  total_files = 0
122
136
  processed_files = 0
123
137
 
124
- for root, dirs, files in os.walk(self.directory):
125
- total_files += len(files)
126
- dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
127
- for file in files:
128
- file_path = os.path.join(root, file)
129
- if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
138
+ if not self.include_paths:
139
+ # No include paths specified, process the entire directory
140
+ for root, dirs, files in os.walk(self.directory):
141
+ total_files += len(files)
142
+ dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
143
+ for file in files:
144
+ file_path = os.path.join(root, file)
145
+ if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
146
+ try:
147
+ with open(file_path, 'r', encoding='utf-8') as f:
148
+ content = f.read()
149
+ context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
150
+ self.included_files.append(Path(file_path).relative_to(self.directory))
151
+ except Exception as e:
152
+ logger.error(f"Error reading {file_path}: {e}")
153
+ processed_files += 1
154
+ print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
155
+ else:
156
+ # Process specified include paths
157
+ for include_path in self.include_paths:
158
+ full_path = os.path.join(self.directory, include_path)
159
+ if os.path.isdir(full_path):
160
+ for root, dirs, files in os.walk(full_path):
161
+ total_files += len(files)
162
+ dirs[:] = [d for d in dirs if not self.should_ignore(os.path.join(root, d))]
163
+ for file in files:
164
+ file_path = os.path.join(root, file)
165
+ if not self.should_ignore(file_path) and self.is_relevant_file(file_path):
166
+ try:
167
+ with open(file_path, 'r', encoding='utf-8') as f:
168
+ content = f.read()
169
+ context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
170
+ self.included_files.append(Path(file_path).relative_to(self.directory))
171
+ except Exception as e:
172
+ logger.error(f"Error reading {file_path}: {e}")
173
+ processed_files += 1
174
+ print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
175
+ elif os.path.isfile(full_path) and self.is_relevant_file(full_path):
130
176
  try:
131
- with open(file_path, 'r', encoding='utf-8') as f:
177
+ with open(full_path, 'r', encoding='utf-8') as f:
132
178
  content = f.read()
133
- context.append(f"File: {file_path}\n\n{content}\n\n{'='*50}\n")
179
+ context.append(f"File: {full_path}\n\n{content}\n\n{'='*50}\n")
180
+ self.included_files.append(Path(full_path).relative_to(self.directory))
134
181
  except Exception as e:
135
- logger.error(f"Error reading {file_path}: {e}")
136
- processed_files += 1
137
- print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
182
+ logger.error(f"Error reading {full_path}: {e}")
183
+ processed_files += 1
184
+ print(f"\rProcessed {processed_files}/{total_files} files", end="", flush=True)
185
+
138
186
  print() # New line after progress indicator
139
187
  return '\n'.join(context)
140
188
 
141
189
  def count_tokens(self, text):
142
190
  """Count tokens using a simple whitespace-based tokenizer."""
143
- return len(text.split())
191
+ return len(text.split())
144
192
 
145
193
  def truncate_context(self, context):
146
194
  """Truncate context to stay within the token limit."""
@@ -165,12 +213,9 @@ class ContextGatherer:
165
213
  contents = sorted(path.iterdir())
166
214
  pointers = [('└── ' if i == len(contents) - 1 else '├── ') for i in range(len(contents))]
167
215
  for pointer, item in zip(pointers, contents):
168
- # Use should_ignore for consistency
169
- if self.should_ignore(item):
170
- continue
171
-
172
216
  rel_path = item.relative_to(start_dir)
173
- tree.append(f"{prefix}{pointer}{rel_path}")
217
+ if rel_path in self.included_files:
218
+ tree.append(f"{prefix}{pointer}{rel_path}")
174
219
 
175
220
  if item.is_dir():
176
221
  add_to_tree(item, prefix + (' ' if pointer == '└── ' else '│ '))
@@ -193,6 +238,7 @@ class ContextGatherer:
193
238
  def main():
194
239
  gatherer = ContextGatherer()
195
240
  context, token_count, context_tree = gatherer.run()
241
+ print(context_tree)
196
242
  print(f"\nThe context contains approximately {token_count} tokens.")
197
243
  print("First 500 characters of context:")
198
244
  print(context[:500] + "...")
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "PraisonAI"
3
- version = "0.0.54"
3
+ version = "0.0.56"
4
4
  description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
5
5
  authors = ["Mervin Praison"]
6
6
  license = ""
File without changes
File without changes
File without changes