cognee 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. cognee/__init__.py +1 -0
  2. cognee/api/health.py +2 -12
  3. cognee/api/v1/add/add.py +46 -6
  4. cognee/api/v1/add/routers/get_add_router.py +5 -1
  5. cognee/api/v1/cognify/cognify.py +29 -9
  6. cognee/api/v1/datasets/datasets.py +11 -0
  7. cognee/api/v1/responses/default_tools.py +0 -1
  8. cognee/api/v1/responses/dispatch_function.py +1 -1
  9. cognee/api/v1/responses/routers/default_tools.py +0 -1
  10. cognee/api/v1/search/search.py +11 -9
  11. cognee/api/v1/settings/routers/get_settings_router.py +7 -1
  12. cognee/api/v1/ui/ui.py +47 -16
  13. cognee/api/v1/update/routers/get_update_router.py +1 -1
  14. cognee/api/v1/update/update.py +3 -3
  15. cognee/cli/_cognee.py +61 -10
  16. cognee/cli/commands/add_command.py +3 -3
  17. cognee/cli/commands/cognify_command.py +3 -3
  18. cognee/cli/commands/config_command.py +9 -7
  19. cognee/cli/commands/delete_command.py +3 -3
  20. cognee/cli/commands/search_command.py +3 -7
  21. cognee/cli/config.py +0 -1
  22. cognee/context_global_variables.py +5 -0
  23. cognee/exceptions/exceptions.py +1 -1
  24. cognee/infrastructure/databases/cache/__init__.py +2 -0
  25. cognee/infrastructure/databases/cache/cache_db_interface.py +79 -0
  26. cognee/infrastructure/databases/cache/config.py +44 -0
  27. cognee/infrastructure/databases/cache/get_cache_engine.py +67 -0
  28. cognee/infrastructure/databases/cache/redis/RedisAdapter.py +243 -0
  29. cognee/infrastructure/databases/exceptions/__init__.py +1 -0
  30. cognee/infrastructure/databases/exceptions/exceptions.py +18 -2
  31. cognee/infrastructure/databases/graph/get_graph_engine.py +1 -1
  32. cognee/infrastructure/databases/graph/graph_db_interface.py +5 -0
  33. cognee/infrastructure/databases/graph/kuzu/adapter.py +67 -44
  34. cognee/infrastructure/databases/graph/neo4j_driver/adapter.py +13 -3
  35. cognee/infrastructure/databases/graph/neo4j_driver/deadlock_retry.py +1 -1
  36. cognee/infrastructure/databases/graph/neptune_driver/neptune_utils.py +1 -1
  37. cognee/infrastructure/databases/relational/sqlalchemy/SqlAlchemyAdapter.py +1 -1
  38. cognee/infrastructure/databases/vector/embeddings/FastembedEmbeddingEngine.py +21 -3
  39. cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py +17 -10
  40. cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py +17 -4
  41. cognee/infrastructure/databases/vector/embeddings/config.py +2 -3
  42. cognee/infrastructure/databases/vector/exceptions/exceptions.py +1 -1
  43. cognee/infrastructure/databases/vector/lancedb/LanceDBAdapter.py +0 -1
  44. cognee/infrastructure/files/exceptions.py +1 -1
  45. cognee/infrastructure/files/storage/LocalFileStorage.py +9 -9
  46. cognee/infrastructure/files/storage/S3FileStorage.py +11 -11
  47. cognee/infrastructure/files/utils/guess_file_type.py +6 -0
  48. cognee/infrastructure/llm/prompts/search_type_selector_prompt.txt +0 -5
  49. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py +19 -9
  50. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py +17 -5
  51. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py +17 -5
  52. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py +32 -0
  53. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/__init__.py +0 -0
  54. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py +109 -0
  55. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py +33 -8
  56. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +40 -18
  57. cognee/infrastructure/loaders/LoaderEngine.py +27 -7
  58. cognee/infrastructure/loaders/external/__init__.py +7 -0
  59. cognee/infrastructure/loaders/external/advanced_pdf_loader.py +2 -8
  60. cognee/infrastructure/loaders/external/beautiful_soup_loader.py +310 -0
  61. cognee/infrastructure/loaders/supported_loaders.py +7 -0
  62. cognee/modules/data/exceptions/exceptions.py +1 -1
  63. cognee/modules/data/methods/__init__.py +3 -0
  64. cognee/modules/data/methods/get_dataset_data.py +4 -1
  65. cognee/modules/data/methods/has_dataset_data.py +21 -0
  66. cognee/modules/engine/models/TableRow.py +0 -1
  67. cognee/modules/ingestion/save_data_to_file.py +9 -2
  68. cognee/modules/pipelines/exceptions/exceptions.py +1 -1
  69. cognee/modules/pipelines/operations/pipeline.py +12 -1
  70. cognee/modules/pipelines/operations/run_tasks.py +25 -197
  71. cognee/modules/pipelines/operations/run_tasks_data_item.py +260 -0
  72. cognee/modules/pipelines/operations/run_tasks_distributed.py +121 -38
  73. cognee/modules/retrieval/EntityCompletionRetriever.py +48 -8
  74. cognee/modules/retrieval/base_graph_retriever.py +3 -1
  75. cognee/modules/retrieval/base_retriever.py +3 -1
  76. cognee/modules/retrieval/chunks_retriever.py +5 -1
  77. cognee/modules/retrieval/code_retriever.py +20 -2
  78. cognee/modules/retrieval/completion_retriever.py +50 -9
  79. cognee/modules/retrieval/cypher_search_retriever.py +11 -1
  80. cognee/modules/retrieval/graph_completion_context_extension_retriever.py +47 -8
  81. cognee/modules/retrieval/graph_completion_cot_retriever.py +32 -1
  82. cognee/modules/retrieval/graph_completion_retriever.py +54 -10
  83. cognee/modules/retrieval/lexical_retriever.py +20 -2
  84. cognee/modules/retrieval/natural_language_retriever.py +10 -1
  85. cognee/modules/retrieval/summaries_retriever.py +5 -1
  86. cognee/modules/retrieval/temporal_retriever.py +62 -10
  87. cognee/modules/retrieval/user_qa_feedback.py +3 -2
  88. cognee/modules/retrieval/utils/completion.py +5 -0
  89. cognee/modules/retrieval/utils/description_to_codepart_search.py +1 -1
  90. cognee/modules/retrieval/utils/session_cache.py +156 -0
  91. cognee/modules/search/methods/get_search_type_tools.py +0 -5
  92. cognee/modules/search/methods/no_access_control_search.py +12 -1
  93. cognee/modules/search/methods/search.py +34 -2
  94. cognee/modules/search/types/SearchType.py +0 -1
  95. cognee/modules/settings/get_settings.py +23 -0
  96. cognee/modules/users/methods/get_authenticated_user.py +3 -1
  97. cognee/modules/users/methods/get_default_user.py +1 -6
  98. cognee/modules/users/roles/methods/create_role.py +2 -2
  99. cognee/modules/users/tenants/methods/create_tenant.py +2 -2
  100. cognee/shared/exceptions/exceptions.py +1 -1
  101. cognee/tasks/codingagents/coding_rule_associations.py +1 -2
  102. cognee/tasks/documents/exceptions/exceptions.py +1 -1
  103. cognee/tasks/graph/extract_graph_from_data.py +2 -0
  104. cognee/tasks/ingestion/data_item_to_text_file.py +3 -3
  105. cognee/tasks/ingestion/ingest_data.py +11 -5
  106. cognee/tasks/ingestion/save_data_item_to_storage.py +12 -1
  107. cognee/tasks/storage/add_data_points.py +3 -10
  108. cognee/tasks/storage/index_data_points.py +19 -14
  109. cognee/tasks/storage/index_graph_edges.py +25 -11
  110. cognee/tasks/web_scraper/__init__.py +34 -0
  111. cognee/tasks/web_scraper/config.py +26 -0
  112. cognee/tasks/web_scraper/default_url_crawler.py +446 -0
  113. cognee/tasks/web_scraper/models.py +46 -0
  114. cognee/tasks/web_scraper/types.py +4 -0
  115. cognee/tasks/web_scraper/utils.py +142 -0
  116. cognee/tasks/web_scraper/web_scraper_task.py +396 -0
  117. cognee/tests/cli_tests/cli_unit_tests/test_cli_utils.py +0 -1
  118. cognee/tests/integration/web_url_crawler/test_default_url_crawler.py +13 -0
  119. cognee/tests/integration/web_url_crawler/test_tavily_crawler.py +19 -0
  120. cognee/tests/integration/web_url_crawler/test_url_adding_e2e.py +344 -0
  121. cognee/tests/subprocesses/reader.py +25 -0
  122. cognee/tests/subprocesses/simple_cognify_1.py +31 -0
  123. cognee/tests/subprocesses/simple_cognify_2.py +31 -0
  124. cognee/tests/subprocesses/writer.py +32 -0
  125. cognee/tests/tasks/descriptive_metrics/metrics_test_utils.py +0 -2
  126. cognee/tests/tasks/descriptive_metrics/neo4j_metrics_test.py +8 -3
  127. cognee/tests/tasks/entity_extraction/entity_extraction_test.py +89 -0
  128. cognee/tests/tasks/web_scraping/web_scraping_test.py +172 -0
  129. cognee/tests/test_add_docling_document.py +56 -0
  130. cognee/tests/test_chromadb.py +7 -11
  131. cognee/tests/test_concurrent_subprocess_access.py +76 -0
  132. cognee/tests/test_conversation_history.py +240 -0
  133. cognee/tests/test_kuzu.py +27 -15
  134. cognee/tests/test_lancedb.py +7 -11
  135. cognee/tests/test_library.py +32 -2
  136. cognee/tests/test_neo4j.py +24 -16
  137. cognee/tests/test_neptune_analytics_vector.py +7 -11
  138. cognee/tests/test_permissions.py +9 -13
  139. cognee/tests/test_pgvector.py +4 -4
  140. cognee/tests/test_remote_kuzu.py +8 -11
  141. cognee/tests/test_s3_file_storage.py +1 -1
  142. cognee/tests/test_search_db.py +6 -8
  143. cognee/tests/unit/infrastructure/databases/cache/test_cache_config.py +89 -0
  144. cognee/tests/unit/modules/retrieval/conversation_history_test.py +154 -0
  145. {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/METADATA +21 -6
  146. {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/RECORD +155 -126
  147. {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/entry_points.txt +1 -0
  148. distributed/Dockerfile +0 -3
  149. distributed/entrypoint.py +21 -9
  150. distributed/signal.py +5 -0
  151. distributed/workers/data_point_saving_worker.py +64 -34
  152. distributed/workers/graph_saving_worker.py +71 -47
  153. cognee/infrastructure/databases/graph/memgraph/memgraph_adapter.py +0 -1116
  154. cognee/modules/retrieval/insights_retriever.py +0 -133
  155. cognee/tests/test_memgraph.py +0 -109
  156. cognee/tests/unit/modules/retrieval/insights_retriever_test.py +0 -251
  157. {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/WHEEL +0 -0
  158. {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/licenses/LICENSE +0 -0
  159. {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/licenses/NOTICE.md +0 -0
@@ -0,0 +1,310 @@
1
+ """BeautifulSoup-based web crawler for extracting content from web pages.
2
+
3
+ This module provides the BeautifulSoupCrawler class for fetching and extracting content
4
+ from web pages using BeautifulSoup or Playwright for JavaScript-rendered pages. It
5
+ supports robots.txt handling, rate limiting, and custom extraction rules.
6
+ """
7
+
8
+ from typing import Union, Dict, Any, Optional, List
9
+ from dataclasses import dataclass
10
+ from bs4 import BeautifulSoup
11
+ from cognee.infrastructure.loaders.LoaderInterface import LoaderInterface
12
+ from cognee.shared.logging_utils import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ @dataclass
18
+ class ExtractionRule:
19
+ """Normalized extraction rule for web content.
20
+
21
+ Attributes:
22
+ selector: CSS selector for extraction (if any).
23
+ xpath: XPath expression for extraction (if any).
24
+ attr: HTML attribute to extract (if any).
25
+ all: If True, extract all matching elements; otherwise, extract first.
26
+ join_with: String to join multiple extracted elements.
27
+ """
28
+
29
+ selector: Optional[str] = None
30
+ xpath: Optional[str] = None
31
+ attr: Optional[str] = None
32
+ all: bool = False
33
+ join_with: str = " "
34
+
35
+
36
+ class BeautifulSoupLoader(LoaderInterface):
37
+ """Crawler for fetching and extracting web content using BeautifulSoup.
38
+
39
+ Supports asynchronous HTTP requests, Playwright for JavaScript rendering, robots.txt
40
+ compliance, and rate limiting. Extracts content using CSS selectors or XPath rules.
41
+
42
+ Attributes:
43
+ concurrency: Number of concurrent requests allowed.
44
+ crawl_delay: Minimum seconds between requests to the same domain.
45
+ max_crawl_delay: Maximum crawl delay to respect from robots.txt (None = no limit).
46
+ timeout: Per-request timeout in seconds.
47
+ max_retries: Number of retries for failed requests.
48
+ retry_delay_factor: Multiplier for exponential backoff on retries.
49
+ headers: HTTP headers for requests (e.g., User-Agent).
50
+ robots_cache_ttl: Time-to-live for robots.txt cache in seconds.
51
+ """
52
+
53
+ @property
54
+ def supported_extensions(self) -> List[str]:
55
+ return ["html"]
56
+
57
+ @property
58
+ def supported_mime_types(self) -> List[str]:
59
+ return ["text/html", "text/plain"]
60
+
61
+ @property
62
+ def loader_name(self) -> str:
63
+ return "beautiful_soup_loader"
64
+
65
+ def can_handle(self, extension: str, mime_type: str) -> bool:
66
+ can = extension in self.supported_extensions and mime_type in self.supported_mime_types
67
+ return can
68
+
69
+ def _get_default_extraction_rules(self):
70
+ # Comprehensive default extraction rules for common HTML content
71
+ return {
72
+ # Meta information
73
+ "title": {"selector": "title", "all": False},
74
+ "meta_description": {
75
+ "selector": "meta[name='description']",
76
+ "attr": "content",
77
+ "all": False,
78
+ },
79
+ "meta_keywords": {
80
+ "selector": "meta[name='keywords']",
81
+ "attr": "content",
82
+ "all": False,
83
+ },
84
+ # Open Graph meta tags
85
+ "og_title": {
86
+ "selector": "meta[property='og:title']",
87
+ "attr": "content",
88
+ "all": False,
89
+ },
90
+ "og_description": {
91
+ "selector": "meta[property='og:description']",
92
+ "attr": "content",
93
+ "all": False,
94
+ },
95
+ # Main content areas (prioritized selectors)
96
+ "article": {"selector": "article", "all": True, "join_with": "\n\n"},
97
+ "main": {"selector": "main", "all": True, "join_with": "\n\n"},
98
+ # Semantic content sections
99
+ "headers_h1": {"selector": "h1", "all": True, "join_with": "\n"},
100
+ "headers_h2": {"selector": "h2", "all": True, "join_with": "\n"},
101
+ "headers_h3": {"selector": "h3", "all": True, "join_with": "\n"},
102
+ "headers_h4": {"selector": "h4", "all": True, "join_with": "\n"},
103
+ "headers_h5": {"selector": "h5", "all": True, "join_with": "\n"},
104
+ "headers_h6": {"selector": "h6", "all": True, "join_with": "\n"},
105
+ # Text content
106
+ "paragraphs": {"selector": "p", "all": True, "join_with": "\n\n"},
107
+ "blockquotes": {"selector": "blockquote", "all": True, "join_with": "\n\n"},
108
+ "preformatted": {"selector": "pre", "all": True, "join_with": "\n\n"},
109
+ # Lists
110
+ "ordered_lists": {"selector": "ol", "all": True, "join_with": "\n"},
111
+ "unordered_lists": {"selector": "ul", "all": True, "join_with": "\n"},
112
+ "list_items": {"selector": "li", "all": True, "join_with": "\n"},
113
+ "definition_lists": {"selector": "dl", "all": True, "join_with": "\n"},
114
+ # Tables
115
+ "tables": {"selector": "table", "all": True, "join_with": "\n\n"},
116
+ "table_captions": {
117
+ "selector": "caption",
118
+ "all": True,
119
+ "join_with": "\n",
120
+ },
121
+ # Code blocks
122
+ "code_blocks": {"selector": "code", "all": True, "join_with": "\n"},
123
+ # Figures and media descriptions
124
+ "figures": {"selector": "figure", "all": True, "join_with": "\n\n"},
125
+ "figcaptions": {"selector": "figcaption", "all": True, "join_with": "\n"},
126
+ "image_alts": {"selector": "img", "attr": "alt", "all": True, "join_with": " "},
127
+ # Links (text content, not URLs to avoid clutter)
128
+ "link_text": {"selector": "a", "all": True, "join_with": " "},
129
+ # Emphasized text
130
+ "strong": {"selector": "strong", "all": True, "join_with": " "},
131
+ "emphasis": {"selector": "em", "all": True, "join_with": " "},
132
+ "marked": {"selector": "mark", "all": True, "join_with": " "},
133
+ # Time and data elements
134
+ "time": {"selector": "time", "all": True, "join_with": " "},
135
+ "data": {"selector": "data", "all": True, "join_with": " "},
136
+ # Sections and semantic structure
137
+ "sections": {"selector": "section", "all": True, "join_with": "\n\n"},
138
+ "asides": {"selector": "aside", "all": True, "join_with": "\n\n"},
139
+ "details": {"selector": "details", "all": True, "join_with": "\n"},
140
+ "summary": {"selector": "summary", "all": True, "join_with": "\n"},
141
+ # Navigation (may contain important links/structure)
142
+ "nav": {"selector": "nav", "all": True, "join_with": "\n"},
143
+ # Footer information
144
+ "footer": {"selector": "footer", "all": True, "join_with": "\n"},
145
+ # Divs with specific content roles
146
+ "content_divs": {
147
+ "selector": "div[role='main'], div[role='article'], div.content, div#content",
148
+ "all": True,
149
+ "join_with": "\n\n",
150
+ },
151
+ }
152
+
153
+ async def load(
154
+ self,
155
+ file_path: str,
156
+ extraction_rules: dict[str, Any] = None,
157
+ join_all_matches: bool = False,
158
+ **kwargs,
159
+ ):
160
+ """Load an HTML file, extract content, and save to storage.
161
+
162
+ Args:
163
+ file_path: Path to the HTML file
164
+ extraction_rules: Dict of CSS selector rules for content extraction
165
+ join_all_matches: If True, extract all matching elements for each rule
166
+ **kwargs: Additional arguments
167
+
168
+ Returns:
169
+ Path to the stored extracted text file
170
+ """
171
+ if extraction_rules is None:
172
+ extraction_rules = self._get_default_extraction_rules()
173
+ logger.info("Using default comprehensive extraction rules for HTML content")
174
+
175
+ logger.info(f"Processing HTML file: {file_path}")
176
+
177
+ from cognee.infrastructure.files.utils.get_file_metadata import get_file_metadata
178
+ from cognee.infrastructure.files.storage import get_file_storage, get_storage_config
179
+
180
+ with open(file_path, "rb") as f:
181
+ file_metadata = await get_file_metadata(f)
182
+ f.seek(0)
183
+ html = f.read()
184
+
185
+ storage_file_name = "text_" + file_metadata["content_hash"] + ".txt"
186
+
187
+ # Normalize extraction rules
188
+ normalized_rules: List[ExtractionRule] = []
189
+ for _, rule in extraction_rules.items():
190
+ r = self._normalize_rule(rule)
191
+ if join_all_matches:
192
+ r.all = True
193
+ normalized_rules.append(r)
194
+
195
+ pieces = []
196
+ for rule in normalized_rules:
197
+ text = self._extract_from_html(html, rule)
198
+ if text:
199
+ pieces.append(text)
200
+
201
+ full_content = " ".join(pieces).strip()
202
+
203
+ # remove after defaults for extraction rules
204
+ # Fallback: If no content extracted, check if the file is plain text (not HTML)
205
+ if not full_content:
206
+ from bs4 import BeautifulSoup
207
+
208
+ soup = BeautifulSoup(html, "html.parser")
209
+ # If there are no HTML tags, treat as plain text
210
+ if not soup.find():
211
+ logger.warning(
212
+ f"No HTML tags found in {file_path}. Treating as plain text. "
213
+ "This may happen when content is pre-extracted (e.g., via Tavily with text format)."
214
+ )
215
+ full_content = html.decode("utf-8") if isinstance(html, bytes) else html
216
+ full_content = full_content.strip()
217
+
218
+ if not full_content:
219
+ logger.warning(f"No content extracted from HTML file: {file_path}")
220
+
221
+ # Store the extracted content
222
+ storage_config = get_storage_config()
223
+ data_root_directory = storage_config["data_root_directory"]
224
+ storage = get_file_storage(data_root_directory)
225
+
226
+ full_file_path = await storage.store(storage_file_name, full_content)
227
+
228
+ logger.info(f"Extracted {len(full_content)} characters from HTML")
229
+ return full_file_path
230
+
231
+ def _normalize_rule(self, rule: Union[str, Dict[str, Any]]) -> ExtractionRule:
232
+ """Normalize an extraction rule to an ExtractionRule dataclass.
233
+
234
+ Args:
235
+ rule: A string (CSS selector) or dict with extraction parameters.
236
+
237
+ Returns:
238
+ ExtractionRule: Normalized extraction rule.
239
+
240
+ Raises:
241
+ ValueError: If the rule is invalid.
242
+ """
243
+ if isinstance(rule, str):
244
+ return ExtractionRule(selector=rule)
245
+ if isinstance(rule, dict):
246
+ return ExtractionRule(
247
+ selector=rule.get("selector"),
248
+ xpath=rule.get("xpath"),
249
+ attr=rule.get("attr"),
250
+ all=bool(rule.get("all", False)),
251
+ join_with=rule.get("join_with", " "),
252
+ )
253
+ raise ValueError(f"Invalid extraction rule: {rule}")
254
+
255
+ def _extract_from_html(self, html: str, rule: ExtractionRule) -> str:
256
+ """Extract content from HTML using BeautifulSoup or lxml XPath.
257
+
258
+ Args:
259
+ html: The HTML content to extract from.
260
+ rule: The extraction rule to apply.
261
+
262
+ Returns:
263
+ str: The extracted content.
264
+
265
+ Raises:
266
+ RuntimeError: If XPath is used but lxml is not installed.
267
+ """
268
+ soup = BeautifulSoup(html, "html.parser")
269
+
270
+ if rule.xpath:
271
+ try:
272
+ from lxml import html as lxml_html
273
+ except ImportError:
274
+ raise RuntimeError(
275
+ "XPath requested but lxml is not available. Install lxml or use CSS selectors."
276
+ )
277
+ doc = lxml_html.fromstring(html)
278
+ nodes = doc.xpath(rule.xpath)
279
+ texts = []
280
+ for n in nodes:
281
+ if hasattr(n, "text_content"):
282
+ texts.append(n.text_content().strip())
283
+ else:
284
+ texts.append(str(n).strip())
285
+ return rule.join_with.join(t for t in texts if t)
286
+
287
+ if not rule.selector:
288
+ return ""
289
+
290
+ if rule.all:
291
+ nodes = soup.select(rule.selector)
292
+ pieces = []
293
+ for el in nodes:
294
+ if rule.attr:
295
+ val = el.get(rule.attr)
296
+ if val:
297
+ pieces.append(val.strip())
298
+ else:
299
+ text = el.get_text(strip=True)
300
+ if text:
301
+ pieces.append(text)
302
+ return rule.join_with.join(pieces).strip()
303
+ else:
304
+ el = soup.select_one(rule.selector)
305
+ if el is None:
306
+ return ""
307
+ if rule.attr:
308
+ val = el.get(rule.attr)
309
+ return (val or "").strip()
310
+ return el.get_text(strip=True)
@@ -23,3 +23,10 @@ try:
23
23
  supported_loaders[AdvancedPdfLoader.loader_name] = AdvancedPdfLoader
24
24
  except ImportError:
25
25
  pass
26
+
27
+ try:
28
+ from cognee.infrastructure.loaders.external import BeautifulSoupLoader
29
+
30
+ supported_loaders[BeautifulSoupLoader.loader_name] = BeautifulSoupLoader
31
+ except ImportError:
32
+ pass
@@ -10,7 +10,7 @@ class UnstructuredLibraryImportError(CogneeConfigurationError):
10
10
  self,
11
11
  message: str = "Import error. Unstructured library is not installed.",
12
12
  name: str = "UnstructuredModuleImportError",
13
- status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
13
+ status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
14
14
  ):
15
15
  super().__init__(message, name, status_code)
16
16
 
@@ -23,3 +23,6 @@ from .create_authorized_dataset import create_authorized_dataset
23
23
 
24
24
  # Check
25
25
  from .check_dataset_name import check_dataset_name
26
+
27
+ # Boolean check
28
+ from .has_dataset_data import has_dataset_data
@@ -9,7 +9,10 @@ async def get_dataset_data(dataset_id: UUID) -> list[Data]:
9
9
 
10
10
  async with db_engine.get_async_session() as session:
11
11
  result = await session.execute(
12
- select(Data).join(Data.datasets).filter((Dataset.id == dataset_id))
12
+ select(Data)
13
+ .join(Data.datasets)
14
+ .filter((Dataset.id == dataset_id))
15
+ .order_by(Data.data_size.desc())
13
16
  )
14
17
 
15
18
  data = list(result.scalars().all())
@@ -0,0 +1,21 @@
1
+ from uuid import UUID
2
+
3
+ from sqlalchemy import select
4
+ from sqlalchemy.sql import func
5
+
6
+ from cognee.infrastructure.databases.relational import get_relational_engine
7
+ from cognee.modules.data.models import DatasetData
8
+
9
+
10
+ async def has_dataset_data(dataset_id: UUID) -> bool:
11
+ db_engine = get_relational_engine()
12
+
13
+ async with db_engine.get_async_session() as session:
14
+ count_query = (
15
+ select(func.count())
16
+ .select_from(DatasetData)
17
+ .where(DatasetData.dataset_id == dataset_id)
18
+ )
19
+ count = await session.execute(count_query)
20
+
21
+ return count.scalar_one() > 0
@@ -5,7 +5,6 @@ from typing import Optional
5
5
 
6
6
  class TableRow(DataPoint):
7
7
  name: str
8
- is_a: Optional[TableType] = None
9
8
  description: str
10
9
  properties: str
11
10
 
@@ -1,10 +1,12 @@
1
- from typing import BinaryIO, Union
1
+ from typing import BinaryIO, Union, Optional
2
2
  from cognee.infrastructure.files.storage import get_file_storage, get_storage_config
3
3
  from .classify import classify
4
4
  import hashlib
5
5
 
6
6
 
7
- async def save_data_to_file(data: Union[str, BinaryIO], filename: str = None):
7
+ async def save_data_to_file(
8
+ data: Union[str, BinaryIO], filename: str = None, file_extension: Optional[str] = None
9
+ ):
8
10
  storage_config = get_storage_config()
9
11
 
10
12
  data_root_directory = storage_config["data_root_directory"]
@@ -21,6 +23,11 @@ async def save_data_to_file(data: Union[str, BinaryIO], filename: str = None):
21
23
 
22
24
  file_name = file_metadata["name"]
23
25
 
26
+ if file_extension is not None:
27
+ extension = file_extension.lstrip(".")
28
+ file_name_without_ext = file_name.rsplit(".", 1)[0]
29
+ file_name = f"{file_name_without_ext}.{extension}"
30
+
24
31
  storage = get_file_storage(data_root_directory)
25
32
 
26
33
  full_file_path = await storage.store(file_name, data)
@@ -7,6 +7,6 @@ class PipelineRunFailedError(CogneeSystemError):
7
7
  self,
8
8
  message: str = "Pipeline run failed.",
9
9
  name: str = "PipelineRunFailedError",
10
- status_code: int = status.HTTP_422_UNPROCESSABLE_ENTITY,
10
+ status_code: int = status.HTTP_422_UNPROCESSABLE_CONTENT,
11
11
  ):
12
12
  super().__init__(message, name, status_code)
@@ -20,6 +20,7 @@ from cognee.modules.pipelines.layers.resolve_authorized_user_datasets import (
20
20
  from cognee.modules.pipelines.layers.check_pipeline_run_qualification import (
21
21
  check_pipeline_run_qualification,
22
22
  )
23
+ from typing import Any
23
24
 
24
25
  logger = get_logger("cognee.pipeline")
25
26
 
@@ -35,6 +36,7 @@ async def run_pipeline(
35
36
  vector_db_config: dict = None,
36
37
  graph_db_config: dict = None,
37
38
  incremental_loading: bool = False,
39
+ data_per_batch: int = 20,
38
40
  ):
39
41
  validate_pipeline_tasks(tasks)
40
42
  await setup_and_check_environment(vector_db_config, graph_db_config)
@@ -50,6 +52,7 @@ async def run_pipeline(
50
52
  pipeline_name=pipeline_name,
51
53
  context={"dataset": dataset},
52
54
  incremental_loading=incremental_loading,
55
+ data_per_batch=data_per_batch,
53
56
  ):
54
57
  yield run_info
55
58
 
@@ -62,6 +65,7 @@ async def run_pipeline_per_dataset(
62
65
  pipeline_name: str = "custom_pipeline",
63
66
  context: dict = None,
64
67
  incremental_loading=False,
68
+ data_per_batch: int = 20,
65
69
  ):
66
70
  # Will only be used if ENABLE_BACKEND_ACCESS_CONTROL is set to True
67
71
  await set_database_global_context_variables(dataset.id, dataset.owner_id)
@@ -77,7 +81,14 @@ async def run_pipeline_per_dataset(
77
81
  return
78
82
 
79
83
  pipeline_run = run_tasks(
80
- tasks, dataset.id, data, user, pipeline_name, context, incremental_loading
84
+ tasks,
85
+ dataset.id,
86
+ data,
87
+ user,
88
+ pipeline_name,
89
+ context,
90
+ incremental_loading,
91
+ data_per_batch,
81
92
  )
82
93
 
83
94
  async for pipeline_run_info in pipeline_run: