not-again-ai 0.19.0__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +0,0 @@
1
- import importlib.util
2
-
3
- if importlib.util.find_spec("playwright") is None:
4
- raise ImportError(
5
- "not_again_ai.data requires the 'data' extra to be installed. "
6
- "You can install it using 'pip install not_again_ai[data]'."
7
- )
@@ -0,0 +1,203 @@
1
+ import os
2
+
3
+ import httpx
4
+ from loguru import logger
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class SearchWebResult(BaseModel):
9
+ title: str
10
+ url: str
11
+ description: str
12
+ netloc: str | None = None
13
+
14
+
15
+ class SearchWebResults(BaseModel):
16
+ results: list[SearchWebResult]
17
+
18
+
19
+ async def search(
20
+ query: str,
21
+ count: int = 20,
22
+ offset: int = 0,
23
+ country: str = "US",
24
+ search_lang: str = "en",
25
+ ui_lang: str = "en-US",
26
+ freshness: str | None = None,
27
+ timezone: str = "America/New_York",
28
+ state: str = "MA",
29
+ user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.",
30
+ ) -> SearchWebResults:
31
+ """
32
+ Search using Brave Search API.
33
+
34
+ Args:
35
+ query: The search query string
36
+ count: Number of search results to return (1-20, default 10)
37
+ offset: Number of search results to skip (default 0)
38
+ country: Country code for search results (default "US")
39
+ search_lang: Language for search (default "en")
40
+ ui_lang: User interface language (default "en-US")
41
+ freshness: Freshness of results ("pd", "pw", "pm", "py" or YYYY-MM-DDtoYYYY-MM-DD or None)
42
+ timezone: Timezone for search results (default "America/New_York")
43
+ state: State for search results (default "MA")
44
+ user_agent: User agent string for the request (default is a common browser UA)
45
+
46
+ Returns:
47
+ SearchWebResults: A model containing the search results
48
+
49
+ Raises:
50
+ httpx.HTTPError: If the request fails
51
+ ValueError: If BRAVE_SEARCH_API_KEY is not set
52
+ """
53
+ api_key = os.getenv("BRAVE_SEARCH_API_KEY")
54
+ if not api_key:
55
+ raise ValueError("BRAVE_SEARCH_API_KEY environment variable is not set")
56
+
57
+ url = "https://api.search.brave.com/res/v1/web/search"
58
+
59
+ headers = {
60
+ "Accept": "application/json",
61
+ "Accept-Encoding": "gzip",
62
+ "X-Subscription-Token": api_key,
63
+ "X-Loc-Country": country,
64
+ "X-Loc-Timezone": timezone,
65
+ "X-Loc-State": state,
66
+ "User-Agent": user_agent,
67
+ }
68
+
69
+ params: dict[str, str | int | bool] = {
70
+ "q": query,
71
+ "count": count,
72
+ "offset": offset,
73
+ "country": country,
74
+ "search_lang": search_lang,
75
+ "ui_lang": ui_lang,
76
+ "text_decorations": False,
77
+ "spellcheck": False,
78
+ "units": "imperial",
79
+ "extra_snippets": False,
80
+ "safesearch": "off",
81
+ }
82
+
83
+ # Add optional parameters if provided
84
+ if freshness:
85
+ params["freshness"] = freshness
86
+
87
+ try:
88
+ async with httpx.AsyncClient() as client:
89
+ response = await client.get(url, headers=headers, params=params)
90
+ response.raise_for_status()
91
+ data = response.json()
92
+ results_list: list[SearchWebResult] = []
93
+ for item in data.get("web", {}).get("results", []):
94
+ result = SearchWebResult(
95
+ title=item.get("title", ""),
96
+ url=item.get("url", ""),
97
+ description=item.get("snippet", ""),
98
+ netloc=item.get("meta_url", {}).get("netloc", None),
99
+ )
100
+ results_list.append(result)
101
+ return SearchWebResults(results=results_list)
102
+
103
+ except httpx.HTTPError as e:
104
+ logger.error(f"HTTP error during Brave search: {e}")
105
+ raise
106
+ except Exception as e:
107
+ logger.error(f"Unexpected error during Brave search: {e}")
108
+ raise
109
+
110
+
111
+ class SearchNewsResult(BaseModel):
112
+ title: str
113
+ url: str
114
+ description: str
115
+ age: str
116
+ netloc: str | None = None
117
+
118
+
119
+ class SearchNewsResults(BaseModel):
120
+ results: list[SearchNewsResult]
121
+
122
+
123
+ async def search_news(
124
+ query: str,
125
+ count: int = 20,
126
+ offset: int = 0,
127
+ country: str = "US",
128
+ search_lang: str = "en",
129
+ ui_lang: str = "en-US",
130
+ freshness: str | None = None,
131
+ user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.",
132
+ ) -> SearchNewsResults:
133
+ """
134
+ Search news using Brave News Search API.
135
+
136
+ Args:
137
+ query: The search query string
138
+ count: Number of news results to return (1-20, default 20)
139
+ offset: Number of search results to skip (default 0)
140
+ country: Country code for search results (default "US")
141
+ search_lang: Language for search (default "en")
142
+ ui_lang: User interface language (default "en-US")
143
+ freshness: Freshness of results ("pd", "pw", "pm", "py" or YYYY-MM-DDtoYYYY-MM-DD or None)
144
+ user_agent: User agent string for the request (default is a common browser UA)
145
+
146
+ Returns:
147
+ SearchNewsResults: A model containing the news search results
148
+
149
+ Raises:
150
+ httpx.HTTPError: If the request fails
151
+ ValueError: If BRAVE_SEARCH_API_KEY is not set
152
+ """
153
+ api_key = os.getenv("BRAVE_SEARCH_API_KEY")
154
+ if not api_key:
155
+ raise ValueError("BRAVE_SEARCH_API_KEY environment variable is not set")
156
+
157
+ url = "https://api.search.brave.com/res/v1/news/search"
158
+
159
+ headers = {
160
+ "Accept": "application/json",
161
+ "Accept-Encoding": "gzip",
162
+ "X-Subscription-Token": api_key,
163
+ "User-Agent": user_agent,
164
+ }
165
+
166
+ params: dict[str, str | int | bool] = {
167
+ "q": query,
168
+ "count": count,
169
+ "offset": offset,
170
+ "country": country,
171
+ "search_lang": search_lang,
172
+ "ui_lang": ui_lang,
173
+ "spellcheck": False,
174
+ "safesearch": "off",
175
+ }
176
+
177
+ # Add optional parameters if provided
178
+ if freshness:
179
+ params["freshness"] = freshness
180
+
181
+ try:
182
+ async with httpx.AsyncClient() as client:
183
+ response = await client.get(url, headers=headers, params=params)
184
+ response.raise_for_status()
185
+ data = response.json()
186
+ results_list: list[SearchNewsResult] = []
187
+ for item in data.get("results", []):
188
+ result = SearchNewsResult(
189
+ title=item.get("title", ""),
190
+ url=item.get("url", ""),
191
+ description=item.get("description", ""),
192
+ age=item.get("age"),
193
+ netloc=item.get("meta_url", {}).get("netloc", None),
194
+ )
195
+ results_list.append(result)
196
+ return SearchNewsResults(results=results_list)
197
+
198
+ except httpx.HTTPError as e:
199
+ logger.error(f"HTTP error during Brave news search: {e}")
200
+ raise
201
+ except Exception as e:
202
+ logger.error(f"Unexpected error during Brave news search: {e}")
203
+ raise
not_again_ai/data/web.py CHANGED
@@ -1,56 +1,160 @@
1
- from loguru import logger
2
- from playwright.sync_api import Browser, Playwright, sync_playwright
1
+ import asyncio
2
+ import io
3
+ import mimetypes
4
+ from pathlib import Path
5
+ import re
6
+ from urllib.parse import urlparse
3
7
 
8
+ from crawl4ai import AsyncWebCrawler, CacheMode
9
+ from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
10
+ from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
11
+ import httpx
12
+ from markitdown import MarkItDown, StreamInfo
13
+ from pydantic import BaseModel
4
14
 
5
- def create_browser(headless: bool = True) -> tuple[Playwright, Browser]:
6
- """Creates and returns a new Playwright instance and browser.
7
15
 
8
- Args:
9
- headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
16
+ class Link(BaseModel):
17
+ url: str
18
+ text: str
10
19
 
11
- Returns:
12
- tuple[Playwright, Browser]: A tuple containing the Playwright instance and browser.
20
+
21
+ class URLResult(BaseModel):
22
+ url: str
23
+ markdown: str
24
+ links: list[Link] = []
25
+
26
+
27
+ async def _markitdown_bytes_to_str(file_bytes: bytes, filename_extension: str) -> str:
13
28
  """
14
- pwright = sync_playwright().start()
15
- browser = pwright.chromium.launch(
16
- headless=headless,
17
- chromium_sandbox=False,
18
- timeout=15000,
19
- )
20
- return pwright, browser
29
+ Convert a file using MarkItDown defaults.
30
+ """
31
+ with io.BytesIO(file_bytes) as temp:
32
+ result = await asyncio.to_thread(
33
+ MarkItDown(enable_plugins=False).convert,
34
+ source=temp,
35
+ stream_info=StreamInfo(extension=filename_extension),
36
+ )
37
+ text = result.text_content
38
+ return text
21
39
 
22
40
 
23
- def get_raw_web_content(url: str, browser: Browser | None = None, headless: bool = True) -> str:
24
- """Fetches raw web content from a given URL using Playwright.
41
+ def _detect_pdf_extension(url: str) -> bool:
42
+ """
43
+ Detect if the URL is a PDF based on its extension.
44
+ """
45
+ parsed_url = urlparse(url)
46
+ filename = Path(parsed_url.path).name
47
+ return mimetypes.guess_type(filename)[0] == "application/pdf"
25
48
 
26
- Args:
27
- url (str): The URL to fetch content from.
28
- browser (Browser | None, optional): An existing browser instance to use. Defaults to None.
29
- headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
30
49
 
31
- Returns:
32
- str: The raw web content.
50
+ def _detect_google_sheets(url: str) -> bool:
51
+ """
52
+ Detect if the URL is a Google Sheets document.
33
53
  """
34
- p = None
35
- try:
36
- if browser is None:
37
- p, browser = create_browser(headless)
54
+ is_google_sheets = url.startswith("https://docs.google.com/spreadsheets/")
55
+ return is_google_sheets
56
+
57
+
58
+ async def _handle_pdf_content(url: str) -> URLResult:
59
+ md = MarkItDown(enable_plugins=False)
60
+ result = md.convert(url)
61
+ url_result = URLResult(
62
+ url=url,
63
+ markdown=result.markdown or "",
64
+ links=[],
65
+ )
66
+ return url_result
67
+
68
+
69
+ async def _handle_google_sheets_content(url: str) -> URLResult:
70
+ """
71
+ Handle Google Sheets by using the export URL to get the raw content.
72
+ """
73
+ edit_pattern = r"https://docs\.google\.com/spreadsheets/d/([a-zA-Z0-9-_]+)/edit"
74
+ export_pattern = r"https://docs\.google\.com/spreadsheets/d/([a-zA-Z0-9-_]+)/export\?format=csv"
75
+
76
+ # Check if it's already an export URL
77
+ export_match = re.search(export_pattern, url)
78
+ if export_match:
79
+ export_url = url
80
+ else:
81
+ # Check if it's an edit URL and extract document ID
82
+ edit_match = re.search(edit_pattern, url)
83
+ if edit_match:
84
+ doc_id = edit_match.group(1)
85
+ export_url = f"https://docs.google.com/spreadsheets/d/{doc_id}/export?format=csv&gid=0"
86
+ else:
87
+ return await _handle_web_content(url)
38
88
 
39
- page = browser.new_page(
40
- accept_downloads=False,
41
- java_script_enabled=True,
42
- viewport={"width": 1366, "height": 768},
43
- user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.3",
89
+ async with httpx.AsyncClient(follow_redirects=True) as client:
90
+ response = await client.get(export_url)
91
+ response.raise_for_status()
92
+ csv_bytes = response.content
93
+
94
+ # Convert CSV to markdown using MarkItDown
95
+ markdown_content = await _markitdown_bytes_to_str(csv_bytes, ".csv")
96
+
97
+ url_result = URLResult(
98
+ url=url,
99
+ markdown=markdown_content,
100
+ links=[],
101
+ )
102
+ return url_result
103
+
104
+
105
+ async def _handle_web_content(url: str) -> URLResult:
106
+ browser_config = BrowserConfig(
107
+ browser_type="chromium",
108
+ headless=True,
109
+ verbose=False,
110
+ user_agent_mode="random",
111
+ java_script_enabled=True,
112
+ )
113
+ run_config = CrawlerRunConfig(
114
+ scan_full_page=True,
115
+ user_agent_mode="random",
116
+ cache_mode=CacheMode.DISABLED,
117
+ markdown_generator=DefaultMarkdownGenerator(),
118
+ )
119
+
120
+ async with AsyncWebCrawler(config=browser_config) as crawler:
121
+ result = await crawler.arun(
122
+ url=url,
123
+ config=run_config,
44
124
  )
45
- page.goto(url)
46
- content = page.content()
47
- page.close()
48
- return content
49
- except Exception as e:
50
- logger.error(f"Failed to get web content: {e}")
51
- return ""
52
- finally:
53
- if browser:
54
- browser.close()
55
- if p:
56
- p.stop()
125
+
126
+ if result.response_headers.get("content-type") == "application/pdf":
127
+ return await _handle_pdf_content(url)
128
+
129
+ links: list[Link] = []
130
+ seen_urls: set[str] = set()
131
+ combined_link_data = result.links.get("internal", []) + result.links.get("external", [])
132
+ for link_data in combined_link_data:
133
+ href = link_data.get("href", "")
134
+ if href and href not in seen_urls:
135
+ seen_urls.add(href)
136
+ link = Link(
137
+ url=href,
138
+ text=link_data.get("title", "") or link_data.get("text", ""),
139
+ )
140
+ links.append(link)
141
+
142
+ url_result = URLResult(
143
+ url=url,
144
+ markdown=result.markdown or "",
145
+ links=links,
146
+ )
147
+ return url_result
148
+
149
+
150
+ async def process_url(url: str) -> URLResult:
151
+ """
152
+ Process a URL to extract content and convert it to Markdown and links
153
+ """
154
+ if _detect_pdf_extension(url):
155
+ url_result = await _handle_pdf_content(url)
156
+ elif _detect_google_sheets(url):
157
+ url_result = await _handle_google_sheets_content(url)
158
+ else:
159
+ url_result = await _handle_web_content(url)
160
+ return url_result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: not-again-ai
3
- Version: 0.19.0
3
+ Version: 0.20.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Project-URL: Homepage, https://github.com/DaveCoDev/not-again-ai
6
6
  Project-URL: Documentation, https://davecodev.github.io/not-again-ai/
@@ -22,8 +22,9 @@ Requires-Python: >=3.11
22
22
  Requires-Dist: loguru<1.0,>=0.7
23
23
  Requires-Dist: pydantic<3.0,>=2.11
24
24
  Provides-Extra: data
25
- Requires-Dist: playwright<2.0,>=1.51; extra == 'data'
26
- Requires-Dist: pytest-playwright<1.0,>=0.7; extra == 'data'
25
+ Requires-Dist: crawl4ai<1.0,>=0.6; extra == 'data'
26
+ Requires-Dist: httpx<1.0,>=0.28; extra == 'data'
27
+ Requires-Dist: markitdown[pdf]==0.1.2; extra == 'data'
27
28
  Provides-Extra: llm
28
29
  Requires-Dist: anthropic<1.0,>=0.50; extra == 'llm'
29
30
  Requires-Dist: azure-identity<2.0,>=1.21; extra == 'llm'
@@ -83,7 +84,9 @@ The package is split into subpackages, so you can install only the parts you nee
83
84
 
84
85
  ### Data
85
86
  1. `pip install not_again_ai[data]`
86
- 1. `playwright install` to download the browser binaries.
87
+ 1. `crawl4ai-setup` to run crawl4ai post-installation setup.
88
+ 1. Set the `BRAVE_SEARCH_API_KEY` environment variable to use the Brave Search API for web data extraction.
89
+ 1. Get the API key from https://api-dashboard.search.brave.com/app/keys. You must have at least the Free "Data for Search" subscription.
87
90
 
88
91
 
89
92
  ### LLM
@@ -312,3 +315,5 @@ Default settings are configured in [`.vscode/settings.json`](./.vscode/settings.
312
315
 
313
316
  # Attributions
314
317
  [python-blueprint](https://github.com/johnthagen/python-blueprint) for the Python package skeleton.
318
+
319
+ This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
@@ -3,8 +3,9 @@ not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
3
3
  not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
5
5
  not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
6
- not_again_ai/data/__init__.py,sha256=1jF6mwvtB2PT7IEc3xpbRtZm3g3Lyf8zUqH4AEE4qlQ,244
7
- not_again_ai/data/web.py,sha256=wjx9cc33jcoJBGonYCIpwygPBFOwz7F-dx_ominmbnI,1838
6
+ not_again_ai/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ not_again_ai/data/brave_search_api.py,sha256=cTH7smRf8ITPpcQEcoIEvJo6VAST9_mg7FSL88xdGMc,6603
8
+ not_again_ai/data/web.py,sha256=LLWM5SkgI3-ILhtXYYuDm_eVnij3uS4tL059RyMq0lU,4737
8
9
  not_again_ai/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  not_again_ai/llm/chat_completion/__init__.py,sha256=HozawvdRkTFgq8XR16GJUHN1ukEa4Ya68wVPVrl-afs,250
10
11
  not_again_ai/llm/chat_completion/interface.py,sha256=OU6ghG7RlveahkHZWdRHFg0uzbSrSh2Dz7u5-4rrypA,2700
@@ -39,7 +40,7 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
39
40
  not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
40
41
  not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
41
42
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
42
- not_again_ai-0.19.0.dist-info/METADATA,sha256=LeCIas912YMtvKEJcChoPqYrM3ay_EurUZehvlQ9t8o,12004
43
- not_again_ai-0.19.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
- not_again_ai-0.19.0.dist-info/licenses/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
45
- not_again_ai-0.19.0.dist-info/RECORD,,
43
+ not_again_ai-0.20.0.dist-info/METADATA,sha256=sYV6N2dmYarZnbjIqLoCrjfrsa8m1G-1rYxX3XV89tI,12389
44
+ not_again_ai-0.20.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
45
+ not_again_ai-0.20.0.dist-info/licenses/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
46
+ not_again_ai-0.20.0.dist-info/RECORD,,