not-again-ai 0.18.0__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +0,0 @@
1
- import importlib.util
2
-
3
- if importlib.util.find_spec("playwright") is None:
4
- raise ImportError(
5
- "not_again_ai.data requires the 'data' extra to be installed. "
6
- "You can install it using 'pip install not_again_ai[data]'."
7
- )
@@ -0,0 +1,203 @@
1
+ import os
2
+
3
+ import httpx
4
+ from loguru import logger
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class SearchWebResult(BaseModel):
9
+ title: str
10
+ url: str
11
+ description: str
12
+ netloc: str | None = None
13
+
14
+
15
+ class SearchWebResults(BaseModel):
16
+ results: list[SearchWebResult]
17
+
18
+
19
+ async def search(
20
+ query: str,
21
+ count: int = 20,
22
+ offset: int = 0,
23
+ country: str = "US",
24
+ search_lang: str = "en",
25
+ ui_lang: str = "en-US",
26
+ freshness: str | None = None,
27
+ timezone: str = "America/New_York",
28
+ state: str = "MA",
29
+ user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.",
30
+ ) -> SearchWebResults:
31
+ """
32
+ Search using Brave Search API.
33
+
34
+ Args:
35
+ query: The search query string
36
+ count: Number of search results to return (1-20, default 10)
37
+ offset: Number of search results to skip (default 0)
38
+ country: Country code for search results (default "US")
39
+ search_lang: Language for search (default "en")
40
+ ui_lang: User interface language (default "en-US")
41
+ freshness: Freshness of results ("pd", "pw", "pm", "py" or YYYY-MM-DDtoYYYY-MM-DD or None)
42
+ timezone: Timezone for search results (default "America/New_York")
43
+ state: State for search results (default "MA")
44
+ user_agent: User agent string for the request (default is a common browser UA)
45
+
46
+ Returns:
47
+ SearchWebResults: A model containing the search results
48
+
49
+ Raises:
50
+ httpx.HTTPError: If the request fails
51
+ ValueError: If BRAVE_SEARCH_API_KEY is not set
52
+ """
53
+ api_key = os.getenv("BRAVE_SEARCH_API_KEY")
54
+ if not api_key:
55
+ raise ValueError("BRAVE_SEARCH_API_KEY environment variable is not set")
56
+
57
+ url = "https://api.search.brave.com/res/v1/web/search"
58
+
59
+ headers = {
60
+ "Accept": "application/json",
61
+ "Accept-Encoding": "gzip",
62
+ "X-Subscription-Token": api_key,
63
+ "X-Loc-Country": country,
64
+ "X-Loc-Timezone": timezone,
65
+ "X-Loc-State": state,
66
+ "User-Agent": user_agent,
67
+ }
68
+
69
+ params: dict[str, str | int | bool] = {
70
+ "q": query,
71
+ "count": count,
72
+ "offset": offset,
73
+ "country": country,
74
+ "search_lang": search_lang,
75
+ "ui_lang": ui_lang,
76
+ "text_decorations": False,
77
+ "spellcheck": False,
78
+ "units": "imperial",
79
+ "extra_snippets": False,
80
+ "safesearch": "off",
81
+ }
82
+
83
+ # Add optional parameters if provided
84
+ if freshness:
85
+ params["freshness"] = freshness
86
+
87
+ try:
88
+ async with httpx.AsyncClient() as client:
89
+ response = await client.get(url, headers=headers, params=params)
90
+ response.raise_for_status()
91
+ data = response.json()
92
+ results_list: list[SearchWebResult] = []
93
+ for item in data.get("web", {}).get("results", []):
94
+ result = SearchWebResult(
95
+ title=item.get("title", ""),
96
+ url=item.get("url", ""),
97
+ description=item.get("snippet", ""),
98
+ netloc=item.get("meta_url", {}).get("netloc", None),
99
+ )
100
+ results_list.append(result)
101
+ return SearchWebResults(results=results_list)
102
+
103
+ except httpx.HTTPError as e:
104
+ logger.error(f"HTTP error during Brave search: {e}")
105
+ raise
106
+ except Exception as e:
107
+ logger.error(f"Unexpected error during Brave search: {e}")
108
+ raise
109
+
110
+
111
+ class SearchNewsResult(BaseModel):
112
+ title: str
113
+ url: str
114
+ description: str
115
+ age: str
116
+ netloc: str | None = None
117
+
118
+
119
+ class SearchNewsResults(BaseModel):
120
+ results: list[SearchNewsResult]
121
+
122
+
123
+ async def search_news(
124
+ query: str,
125
+ count: int = 20,
126
+ offset: int = 0,
127
+ country: str = "US",
128
+ search_lang: str = "en",
129
+ ui_lang: str = "en-US",
130
+ freshness: str | None = None,
131
+ user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.",
132
+ ) -> SearchNewsResults:
133
+ """
134
+ Search news using Brave News Search API.
135
+
136
+ Args:
137
+ query: The search query string
138
+ count: Number of news results to return (1-20, default 20)
139
+ offset: Number of search results to skip (default 0)
140
+ country: Country code for search results (default "US")
141
+ search_lang: Language for search (default "en")
142
+ ui_lang: User interface language (default "en-US")
143
+ freshness: Freshness of results ("pd", "pw", "pm", "py" or YYYY-MM-DDtoYYYY-MM-DD or None)
144
+ user_agent: User agent string for the request (default is a common browser UA)
145
+
146
+ Returns:
147
+ SearchNewsResults: A model containing the news search results
148
+
149
+ Raises:
150
+ httpx.HTTPError: If the request fails
151
+ ValueError: If BRAVE_SEARCH_API_KEY is not set
152
+ """
153
+ api_key = os.getenv("BRAVE_SEARCH_API_KEY")
154
+ if not api_key:
155
+ raise ValueError("BRAVE_SEARCH_API_KEY environment variable is not set")
156
+
157
+ url = "https://api.search.brave.com/res/v1/news/search"
158
+
159
+ headers = {
160
+ "Accept": "application/json",
161
+ "Accept-Encoding": "gzip",
162
+ "X-Subscription-Token": api_key,
163
+ "User-Agent": user_agent,
164
+ }
165
+
166
+ params: dict[str, str | int | bool] = {
167
+ "q": query,
168
+ "count": count,
169
+ "offset": offset,
170
+ "country": country,
171
+ "search_lang": search_lang,
172
+ "ui_lang": ui_lang,
173
+ "spellcheck": False,
174
+ "safesearch": "off",
175
+ }
176
+
177
+ # Add optional parameters if provided
178
+ if freshness:
179
+ params["freshness"] = freshness
180
+
181
+ try:
182
+ async with httpx.AsyncClient() as client:
183
+ response = await client.get(url, headers=headers, params=params)
184
+ response.raise_for_status()
185
+ data = response.json()
186
+ results_list: list[SearchNewsResult] = []
187
+ for item in data.get("results", []):
188
+ result = SearchNewsResult(
189
+ title=item.get("title", ""),
190
+ url=item.get("url", ""),
191
+ description=item.get("description", ""),
192
+ age=item.get("age"),
193
+ netloc=item.get("meta_url", {}).get("netloc", None),
194
+ )
195
+ results_list.append(result)
196
+ return SearchNewsResults(results=results_list)
197
+
198
+ except httpx.HTTPError as e:
199
+ logger.error(f"HTTP error during Brave news search: {e}")
200
+ raise
201
+ except Exception as e:
202
+ logger.error(f"Unexpected error during Brave news search: {e}")
203
+ raise
not_again_ai/data/web.py CHANGED
@@ -1,56 +1,160 @@
1
- from loguru import logger
2
- from playwright.sync_api import Browser, Playwright, sync_playwright
1
+ import asyncio
2
+ import io
3
+ import mimetypes
4
+ from pathlib import Path
5
+ import re
6
+ from urllib.parse import urlparse
3
7
 
8
+ from crawl4ai import AsyncWebCrawler, CacheMode
9
+ from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
10
+ from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
11
+ import httpx
12
+ from markitdown import MarkItDown, StreamInfo
13
+ from pydantic import BaseModel
4
14
 
5
- def create_browser(headless: bool = True) -> tuple[Playwright, Browser]:
6
- """Creates and returns a new Playwright instance and browser.
7
15
 
8
- Args:
9
- headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
16
+ class Link(BaseModel):
17
+ url: str
18
+ text: str
10
19
 
11
- Returns:
12
- tuple[Playwright, Browser]: A tuple containing the Playwright instance and browser.
20
+
21
+ class URLResult(BaseModel):
22
+ url: str
23
+ markdown: str
24
+ links: list[Link] = []
25
+
26
+
27
+ async def _markitdown_bytes_to_str(file_bytes: bytes, filename_extension: str) -> str:
13
28
  """
14
- pwright = sync_playwright().start()
15
- browser = pwright.chromium.launch(
16
- headless=headless,
17
- chromium_sandbox=False,
18
- timeout=15000,
19
- )
20
- return pwright, browser
29
+ Convert a file using MarkItDown defaults.
30
+ """
31
+ with io.BytesIO(file_bytes) as temp:
32
+ result = await asyncio.to_thread(
33
+ MarkItDown(enable_plugins=False).convert,
34
+ source=temp,
35
+ stream_info=StreamInfo(extension=filename_extension),
36
+ )
37
+ text = result.text_content
38
+ return text
21
39
 
22
40
 
23
- def get_raw_web_content(url: str, browser: Browser | None = None, headless: bool = True) -> str:
24
- """Fetches raw web content from a given URL using Playwright.
41
+ def _detect_pdf_extension(url: str) -> bool:
42
+ """
43
+ Detect if the URL is a PDF based on its extension.
44
+ """
45
+ parsed_url = urlparse(url)
46
+ filename = Path(parsed_url.path).name
47
+ return mimetypes.guess_type(filename)[0] == "application/pdf"
25
48
 
26
- Args:
27
- url (str): The URL to fetch content from.
28
- browser (Browser | None, optional): An existing browser instance to use. Defaults to None.
29
- headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
30
49
 
31
- Returns:
32
- str: The raw web content.
50
+ def _detect_google_sheets(url: str) -> bool:
51
+ """
52
+ Detect if the URL is a Google Sheets document.
33
53
  """
34
- p = None
35
- try:
36
- if browser is None:
37
- p, browser = create_browser(headless)
54
+ is_google_sheets = url.startswith("https://docs.google.com/spreadsheets/")
55
+ return is_google_sheets
56
+
57
+
58
+ async def _handle_pdf_content(url: str) -> URLResult:
59
+ md = MarkItDown(enable_plugins=False)
60
+ result = md.convert(url)
61
+ url_result = URLResult(
62
+ url=url,
63
+ markdown=result.markdown or "",
64
+ links=[],
65
+ )
66
+ return url_result
67
+
68
+
69
+ async def _handle_google_sheets_content(url: str) -> URLResult:
70
+ """
71
+ Handle Google Sheets by using the export URL to get the raw content.
72
+ """
73
+ edit_pattern = r"https://docs\.google\.com/spreadsheets/d/([a-zA-Z0-9-_]+)/edit"
74
+ export_pattern = r"https://docs\.google\.com/spreadsheets/d/([a-zA-Z0-9-_]+)/export\?format=csv"
75
+
76
+ # Check if it's already an export URL
77
+ export_match = re.search(export_pattern, url)
78
+ if export_match:
79
+ export_url = url
80
+ else:
81
+ # Check if it's an edit URL and extract document ID
82
+ edit_match = re.search(edit_pattern, url)
83
+ if edit_match:
84
+ doc_id = edit_match.group(1)
85
+ export_url = f"https://docs.google.com/spreadsheets/d/{doc_id}/export?format=csv&gid=0"
86
+ else:
87
+ return await _handle_web_content(url)
38
88
 
39
- page = browser.new_page(
40
- accept_downloads=False,
41
- java_script_enabled=True,
42
- viewport={"width": 1366, "height": 768},
43
- user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.3",
89
+ async with httpx.AsyncClient(follow_redirects=True) as client:
90
+ response = await client.get(export_url)
91
+ response.raise_for_status()
92
+ csv_bytes = response.content
93
+
94
+ # Convert CSV to markdown using MarkItDown
95
+ markdown_content = await _markitdown_bytes_to_str(csv_bytes, ".csv")
96
+
97
+ url_result = URLResult(
98
+ url=url,
99
+ markdown=markdown_content,
100
+ links=[],
101
+ )
102
+ return url_result
103
+
104
+
105
+ async def _handle_web_content(url: str) -> URLResult:
106
+ browser_config = BrowserConfig(
107
+ browser_type="chromium",
108
+ headless=True,
109
+ verbose=False,
110
+ user_agent_mode="random",
111
+ java_script_enabled=True,
112
+ )
113
+ run_config = CrawlerRunConfig(
114
+ scan_full_page=True,
115
+ user_agent_mode="random",
116
+ cache_mode=CacheMode.DISABLED,
117
+ markdown_generator=DefaultMarkdownGenerator(),
118
+ )
119
+
120
+ async with AsyncWebCrawler(config=browser_config) as crawler:
121
+ result = await crawler.arun(
122
+ url=url,
123
+ config=run_config,
44
124
  )
45
- page.goto(url)
46
- content = page.content()
47
- page.close()
48
- return content
49
- except Exception as e:
50
- logger.error(f"Failed to get web content: {e}")
51
- return ""
52
- finally:
53
- if browser:
54
- browser.close()
55
- if p:
56
- p.stop()
125
+
126
+ if result.response_headers.get("content-type") == "application/pdf":
127
+ return await _handle_pdf_content(url)
128
+
129
+ links: list[Link] = []
130
+ seen_urls: set[str] = set()
131
+ combined_link_data = result.links.get("internal", []) + result.links.get("external", [])
132
+ for link_data in combined_link_data:
133
+ href = link_data.get("href", "")
134
+ if href and href not in seen_urls:
135
+ seen_urls.add(href)
136
+ link = Link(
137
+ url=href,
138
+ text=link_data.get("title", "") or link_data.get("text", ""),
139
+ )
140
+ links.append(link)
141
+
142
+ url_result = URLResult(
143
+ url=url,
144
+ markdown=result.markdown or "",
145
+ links=links,
146
+ )
147
+ return url_result
148
+
149
+
150
+ async def process_url(url: str) -> URLResult:
151
+ """
152
+ Process a URL to extract content and convert it to Markdown and links
153
+ """
154
+ if _detect_pdf_extension(url):
155
+ url_result = await _handle_pdf_content(url)
156
+ elif _detect_google_sheets(url):
157
+ url_result = await _handle_google_sheets_content(url)
158
+ else:
159
+ url_result = await _handle_web_content(url)
160
+ return url_result
@@ -2,6 +2,7 @@ from collections.abc import AsyncGenerator, Callable
2
2
  from typing import Any
3
3
 
4
4
  from not_again_ai.llm.chat_completion.providers.anthropic_api import anthropic_chat_completion
5
+ from not_again_ai.llm.chat_completion.providers.gemini_api import gemini_chat_completion
5
6
  from not_again_ai.llm.chat_completion.providers.ollama_api import ollama_chat_completion, ollama_chat_completion_stream
6
7
  from not_again_ai.llm.chat_completion.providers.openai_api import openai_chat_completion, openai_chat_completion_stream
7
8
  from not_again_ai.llm.chat_completion.types import ChatCompletionChunk, ChatCompletionRequest, ChatCompletionResponse
@@ -16,6 +17,8 @@ def chat_completion(
16
17
  - `openai` - OpenAI
17
18
  - `azure_openai` - Azure OpenAI
18
19
  - `ollama` - Ollama
20
+ - `anthropic` - Anthropic
21
+ - `gemini` - Gemini
19
22
 
20
23
  Args:
21
24
  request: Request parameter object
@@ -31,6 +34,8 @@ def chat_completion(
31
34
  return ollama_chat_completion(request, client)
32
35
  elif provider == "anthropic":
33
36
  return anthropic_chat_completion(request, client)
37
+ elif provider == "gemini":
38
+ return gemini_chat_completion(request, client)
34
39
  else:
35
40
  raise ValueError(f"Provider {provider} not supported")
36
41
 
@@ -43,8 +48,6 @@ async def chat_completion_stream(
43
48
  """Stream a chat completion response from the given provider. Currently supported providers:
44
49
  - `openai` - OpenAI
45
50
  - `azure_openai` - Azure OpenAI
46
- - `ollama` - Ollama
47
- - `anthropic` - Anthropic
48
51
 
49
52
  Args:
50
53
  request: Request parameter object
@@ -103,12 +103,12 @@ def anthropic_chat_completion(request: ChatCompletionRequest, client: Callable[.
103
103
  elif tool_choice_value in ["auto", "any"]:
104
104
  tool_choice["type"] = "auto"
105
105
  if kwargs.get("parallel_tool_calls") is not None:
106
- tool_choice["disable_parallel_tool_use"] = str(not kwargs["parallel_tool_calls"])
106
+ tool_choice["disable_parallel_tool_use"] = not kwargs["parallel_tool_calls"] # type: ignore
107
107
  else:
108
108
  tool_choice["name"] = tool_choice_value
109
109
  tool_choice["type"] = "tool"
110
110
  if kwargs.get("parallel_tool_calls") is not None:
111
- tool_choice["disable_parallel_tool_use"] = str(not kwargs["parallel_tool_calls"])
111
+ tool_choice["disable_parallel_tool_use"] = not kwargs["parallel_tool_calls"] # type: ignore
112
112
  kwargs["tool_choice"] = tool_choice
113
113
  kwargs.pop("parallel_tool_calls", None)
114
114
 
@@ -0,0 +1,237 @@
1
+ import base64
2
+ from collections.abc import Callable
3
+ import os
4
+ import time
5
+ from typing import Any
6
+
7
+ from google import genai
8
+ from google.genai import types
9
+ from google.genai.types import FunctionCall, FunctionCallingConfigMode, GenerateContentResponse
10
+
11
+ from not_again_ai.llm.chat_completion.types import (
12
+ AssistantMessage,
13
+ ChatCompletionChoice,
14
+ ChatCompletionRequest,
15
+ ChatCompletionResponse,
16
+ Function,
17
+ ImageContent,
18
+ Role,
19
+ TextContent,
20
+ ToolCall,
21
+ )
22
+
23
+ # This should be all of the options we want to support in types.GenerateContentConfig, that are not handled otherwise
24
+ GEMINI_PARAMETER_MAP = {
25
+ "max_completion_tokens": "max_output_tokens",
26
+ "temperature": "temperature",
27
+ "top_p": "top_p",
28
+ "top_k": "top_k",
29
+ }
30
+
31
+ GEMINI_FINISH_REASON_MAP = {
32
+ "STOP": "stop",
33
+ "MAX_TOKENS": "max_tokens",
34
+ "SAFETY": "safety",
35
+ "RECITATION": "recitation",
36
+ "LANGUAGE": "language",
37
+ "OTHER": "other",
38
+ "BLOCKLIST": "blocklist",
39
+ "PROHIBITED_CONTENT": "prohibited_content",
40
+ "SPII": "spii",
41
+ "MALFORMED_FUNCTION_CALL": "malformed_function_call",
42
+ "IMAGE_SAFETY": "image_safety",
43
+ }
44
+
45
+
46
+ def gemini_chat_completion(request: ChatCompletionRequest, client: Callable[..., Any]) -> ChatCompletionResponse:
47
+ """Experimental Gemini chat completion function."""
48
+ # Handle messages
49
+ # Any system messages need to be removed from messages and concatenated into a single string (in order)
50
+ system = ""
51
+ contents = []
52
+ for message in request.messages:
53
+ if message.role == "system":
54
+ # Handle both string content and structured content
55
+ if isinstance(message.content, str):
56
+ system += message.content + "\n"
57
+ else:
58
+ # If it's a list of content parts, extract text content
59
+ for part in message.content:
60
+ if hasattr(part, "text"):
61
+ system += part.text + "\n"
62
+ elif message.role == "tool":
63
+ tool_name = message.name if message.name is not None else ""
64
+ function_response_part = types.Part.from_function_response(
65
+ name=tool_name,
66
+ response={"result": message.content},
67
+ )
68
+ contents.append(
69
+ types.Content(
70
+ role="user",
71
+ parts=[function_response_part],
72
+ )
73
+ )
74
+ elif message.role == "assistant":
75
+ if message.content and isinstance(message.content, str):
76
+ contents.append(types.Content(role="model", parts=[types.Part(text=message.content)]))
77
+ function_parts = []
78
+ if isinstance(message, AssistantMessage) and message.tool_calls:
79
+ for tool_call in message.tool_calls:
80
+ function_call_part = types.Part(
81
+ function_call=FunctionCall(
82
+ id=tool_call.id,
83
+ name=tool_call.function.name,
84
+ args=tool_call.function.arguments,
85
+ )
86
+ )
87
+ function_parts.append(function_call_part)
88
+ if function_parts:
89
+ contents.append(types.Content(role="model", parts=function_parts))
90
+ elif message.role == "user":
91
+ if isinstance(message.content, str):
92
+ contents.append(types.Content(role="user", parts=[types.Part(text=message.content)]))
93
+ elif isinstance(message.content, list):
94
+ parts = []
95
+ for part in message.content:
96
+ if isinstance(part, TextContent):
97
+ parts.append(types.Part(text=part.text))
98
+ elif isinstance(part, ImageContent):
99
+ # Extract MIME type and data from data URI
100
+ uri_parts = part.image_url.url.split(",", 1)
101
+ if len(uri_parts) == 2:
102
+ mime_type = uri_parts[0].split(":")[1].split(";")[0]
103
+ base64_data = uri_parts[1]
104
+ image_data = base64.b64decode(base64_data)
105
+ parts.append(types.Part.from_bytes(mime_type=mime_type, data=image_data))
106
+ contents.append(types.Content(role="user", parts=parts))
107
+
108
+ kwargs: dict[str, Any] = {}
109
+ kwargs["contents"] = contents
110
+ kwargs["model"] = request.model
111
+ config: dict[str, Any] = {}
112
+ config["system_instruction"] = system.rstrip()
113
+ config["automatic_function_calling"] = {"disable": True}
114
+
115
+ # Handle the possible tool choice options
116
+ if request.tool_choice:
117
+ tool_choice = request.tool_choice
118
+ if tool_choice == "auto":
119
+ config["tool_config"] = types.FunctionCallingConfig(mode=FunctionCallingConfigMode.AUTO)
120
+ elif tool_choice == "any":
121
+ config["tool_config"] = types.FunctionCallingConfig(mode=FunctionCallingConfigMode.ANY)
122
+ elif tool_choice == "none":
123
+ config["tool_config"] = types.FunctionCallingConfig(mode=FunctionCallingConfigMode.NONE)
124
+ elif isinstance(tool_choice, list):
125
+ config["tool_config"] = types.FunctionCallingConfig(
126
+ mode=FunctionCallingConfigMode.ANY, allowed_function_names=tool_choice
127
+ )
128
+ elif tool_choice not in (None, "auto", "any", "none"):
129
+ config["tool_config"] = types.FunctionCallingConfig(
130
+ mode=FunctionCallingConfigMode.ANY, allowed_function_names=[tool_choice]
131
+ )
132
+
133
+ # Handle tools
134
+ tools = []
135
+ for tool in request.tools or []:
136
+ tools.append(types.Tool(function_declarations=[tool])) # type: ignore
137
+ if tools:
138
+ config["tools"] = tools
139
+
140
+ # Everything else defined in GEMINI_PARAMETER_MAP goes into kwargs["config"]
141
+ request_kwargs = request.model_dump(mode="json", exclude_none=True)
142
+ for key, value in GEMINI_PARAMETER_MAP.items():
143
+ if value is not None and key in request_kwargs:
144
+ config[value] = request_kwargs.pop(key)
145
+
146
+ kwargs["config"] = types.GenerateContentConfig(**config)
147
+
148
+ start_time = time.time()
149
+ response: GenerateContentResponse = client(**kwargs)
150
+ end_time = time.time()
151
+ response_duration = round(end_time - start_time, 4)
152
+
153
+ finish_reason = "other"
154
+ if response.candidates and response.candidates[0].finish_reason:
155
+ finish_reason_str = str(response.candidates[0].finish_reason)
156
+ finish_reason = GEMINI_FINISH_REASON_MAP.get(finish_reason_str, "other")
157
+
158
+ tool_calls: list[ToolCall] = []
159
+ tool_call_objs = response.function_calls
160
+ if tool_call_objs:
161
+ for tool_call_obj in tool_call_objs:
162
+ tool_call_id = tool_call_obj.id if tool_call_obj.id else ""
163
+ tool_calls.append(
164
+ ToolCall(
165
+ id=tool_call_id,
166
+ function=Function(
167
+ name=tool_call_obj.name if tool_call_obj.name is not None else "",
168
+ arguments=tool_call_obj.args if tool_call_obj.args is not None else {},
169
+ ),
170
+ )
171
+ )
172
+
173
+ assistant_message = ""
174
+ if (
175
+ response.candidates
176
+ and response.candidates[0].content
177
+ and response.candidates[0].content.parts
178
+ and response.candidates[0].content.parts[0].text
179
+ ):
180
+ assistant_message = response.candidates[0].content.parts[0].text
181
+
182
+ choice = ChatCompletionChoice(
183
+ message=AssistantMessage(
184
+ role=Role.ASSISTANT,
185
+ content=assistant_message,
186
+ tool_calls=tool_calls,
187
+ ),
188
+ finish_reason=finish_reason,
189
+ )
190
+
191
+ completion_tokens = 0
192
+ # Add null check for usage_metadata
193
+ if response.usage_metadata is not None:
194
+ if response.usage_metadata.thoughts_token_count:
195
+ completion_tokens = response.usage_metadata.thoughts_token_count
196
+ if response.usage_metadata.candidates_token_count:
197
+ completion_tokens += response.usage_metadata.candidates_token_count
198
+
199
+ # Set safe default for prompt_tokens
200
+ prompt_tokens = 0
201
+ if response.usage_metadata is not None and response.usage_metadata.prompt_token_count:
202
+ prompt_tokens = response.usage_metadata.prompt_token_count
203
+
204
+ chat_completion_response = ChatCompletionResponse(
205
+ choices=[choice],
206
+ completion_tokens=completion_tokens,
207
+ prompt_tokens=prompt_tokens,
208
+ response_duration=response_duration,
209
+ )
210
+ return chat_completion_response
211
+
212
+
213
+ def create_client_callable(client_class: type[genai.Client], **client_args: Any) -> Callable[..., Any]:
214
+ """Creates a callable that instantiates and uses a Google genai client.
215
+
216
+ Args:
217
+ client_class: The Google genai client class to instantiate
218
+ **client_args: Arguments to pass to the client constructor
219
+
220
+ Returns:
221
+ A callable that creates a client and returns completion results
222
+ """
223
+ filtered_args = {k: v for k, v in client_args.items() if v is not None}
224
+
225
+ def client_callable(**kwargs: Any) -> Any:
226
+ client = client_class(**filtered_args)
227
+ completion = client.models.generate_content(**kwargs)
228
+ return completion
229
+
230
+ return client_callable
231
+
232
+
233
+ def gemini_client(api_key: str | None = None) -> Callable[..., Any]:
234
+ if not api_key:
235
+ api_key = os.environ.get("GEMINI_API_KEY")
236
+ client_callable = create_client_callable(genai.Client, api_key=api_key)
237
+ return client_callable
@@ -138,10 +138,7 @@ class ChatCompletionRequest(BaseModel):
138
138
 
139
139
  class ChatCompletionChoice(BaseModel):
140
140
  message: AssistantMessage
141
- finish_reason: Literal[
142
- "stop", "length", "tool_calls", "content_filter", "end_turn", "max_tokens", "stop_sequence", "tool_use"
143
- ]
144
-
141
+ finish_reason: str
145
142
  json_message: dict[str, Any] | None = Field(default=None)
146
143
  logprobs: list[dict[str, Any] | list[dict[str, Any]]] | None = Field(default=None)
147
144
 
@@ -0,0 +1,4 @@
1
+ from not_again_ai.llm.image_gen.interface import create_image
2
+ from not_again_ai.llm.image_gen.types import ImageGenRequest
3
+
4
+ __all__ = ["ImageGenRequest", "create_image"]
@@ -0,0 +1,24 @@
1
+ from collections.abc import Callable
2
+ from typing import Any
3
+
4
+ from not_again_ai.llm.image_gen.providers.openai_api import openai_create_image
5
+ from not_again_ai.llm.image_gen.types import ImageGenRequest, ImageGenResponse
6
+
7
+
8
+ def create_image(request: ImageGenRequest, provider: str, client: Callable[..., Any]) -> ImageGenResponse:
9
+ """Get a image response from the given provider. Currently supported providers:
10
+ - `openai` - OpenAI
11
+ - `azure_openai` - Azure OpenAI
12
+
13
+ Args:
14
+ request: Request parameter object
15
+ provider: The supported provider name
16
+ client: Client information, see the provider's implementation for what can be provided
17
+
18
+ Returns:
19
+ ImageGenResponse: The image generation response.
20
+ """
21
+ if provider == "openai" or provider == "azure_openai":
22
+ return openai_create_image(request, client)
23
+ else:
24
+ raise ValueError(f"Provider {provider} not supported")
File without changes
@@ -0,0 +1,144 @@
1
+ import base64
2
+ from collections.abc import Callable
3
+ import time
4
+ from typing import Any, Literal
5
+
6
+ from azure.identity import DefaultAzureCredential, get_bearer_token_provider
7
+ from openai import AzureOpenAI, OpenAI
8
+ from openai.types.images_response import ImagesResponse
9
+
10
+ from not_again_ai.llm.image_gen.types import ImageGenRequest, ImageGenResponse
11
+
12
+
13
+ def openai_create_image(request: ImageGenRequest, client: Callable[..., Any]) -> ImageGenResponse:
14
+ """Create an image using OpenAI API.
15
+
16
+ Args:
17
+ request (ImageGenRequest): The request object containing parameters for image generation.
18
+ client (Callable[..., Any]): The OpenAI client callable.
19
+
20
+ Returns:
21
+ ImageGenResponse: The response object containing the generated image and metadata.
22
+ """
23
+ kwargs = request.model_dump(exclude_none=True)
24
+ if kwargs.get("images"):
25
+ kwargs["image"] = kwargs.pop("images", None)
26
+
27
+ start_time = time.time()
28
+ response: ImagesResponse = client(**kwargs)
29
+ end_time = time.time()
30
+ response_duration = round(end_time - start_time, 4)
31
+
32
+ images: list[bytes] = []
33
+ if response.data:
34
+ for data in response.data:
35
+ images.append(base64.b64decode(data.b64_json or ""))
36
+
37
+ input_tokens = response.usage.input_tokens if response.usage else -1
38
+ output_tokens = response.usage.output_tokens if response.usage else -1
39
+ input_tokens_details = response.usage.input_tokens_details.to_dict() if response.usage else {}
40
+ image_gen_response = ImageGenResponse(
41
+ images=images,
42
+ input_tokens=input_tokens,
43
+ output_tokens=output_tokens,
44
+ input_tokens_details=input_tokens_details,
45
+ response_duration=response_duration,
46
+ )
47
+ return image_gen_response
48
+
49
+
50
+ def create_client_callable(client_class: type[OpenAI | AzureOpenAI], **client_args: Any) -> Callable[..., Any]:
51
+ """
52
+ Creates the correct callable depending on the parameters provided.
53
+ """
54
+ filtered_args = {k: v for k, v in client_args.items() if v is not None}
55
+
56
+ def client_callable(**kwargs: Any) -> Any:
57
+ client = client_class(**filtered_args)
58
+ # If mask or image is not none, use client.images.edit instead of client.images.generate
59
+ if kwargs.get("mask") or kwargs.get("image"):
60
+ completion = client.images.edit(**kwargs)
61
+ else:
62
+ completion = client.images.generate(**kwargs)
63
+ return completion
64
+
65
+ return client_callable
66
+
67
+
68
+ class InvalidOAIAPITypeError(Exception):
69
+ """Raised when an invalid OAIAPIType string is provided."""
70
+
71
+
72
+ def openai_client(
73
+ api_type: Literal["openai", "azure_openai"] = "openai",
74
+ api_key: str | None = None,
75
+ organization: str | None = None,
76
+ aoai_api_version: str = "2024-06-01",
77
+ azure_endpoint: str | None = None,
78
+ timeout: float | None = None,
79
+ max_retries: int | None = None,
80
+ ) -> Callable[..., Any]:
81
+ """Create an OpenAI or Azure OpenAI client instance based on the specified API type and other provided parameters.
82
+
83
+ It is preferred to use RBAC authentication for Azure OpenAI. You must be signed in with the Azure CLI and have correct role assigned.
84
+ See https://techcommunity.microsoft.com/t5/microsoft-developer-community/using-keyless-authentication-with-azure-openai/ba-p/4111521
85
+
86
+ Args:
87
+ api_type (str, optional): Type of the API to be used. Accepted values are 'openai' or 'azure_openai'.
88
+ Defaults to 'openai'.
89
+ api_key (str, optional): The API key to authenticate the client. If not provided,
90
+ OpenAI automatically uses `OPENAI_API_KEY` from the environment.
91
+ If provided for Azure OpenAI, it will be used for authentication instead of the Azure AD token provider.
92
+ organization (str, optional): The ID of the organization. If not provided,
93
+ OpenAI automotically uses `OPENAI_ORG_ID` from the environment.
94
+ aoai_api_version (str, optional): Only applicable if using Azure OpenAI https://learn.microsoft.com/azure/ai-services/openai/reference#rest-api-versioning
95
+ azure_endpoint (str, optional): The endpoint to use for Azure OpenAI.
96
+ timeout (float, optional): By default requests time out after 10 minutes.
97
+ max_retries (int, optional): Certain errors are automatically retried 2 times by default,
98
+ with a short exponential backoff. Connection errors (for example, due to a network connectivity problem),
99
+ 408 Request Timeout, 409 Conflict, 429 Rate Limit, and >=500 Internal errors are all retried by default.
100
+
101
+ Returns:
102
+ Callable[..., Any]: A callable that creates a client and returns completion results
103
+
104
+
105
+ Raises:
106
+ InvalidOAIAPITypeError: If an invalid API type string is provided.
107
+ NotImplementedError: If the specified API type is recognized but not yet supported (e.g., 'azure_openai').
108
+ """
109
+ if api_type not in ["openai", "azure_openai"]:
110
+ raise InvalidOAIAPITypeError(f"Invalid OAIAPIType: {api_type}. Must be 'openai' or 'azure_openai'.")
111
+
112
+ if api_type == "openai":
113
+ return create_client_callable(
114
+ OpenAI,
115
+ api_key=api_key,
116
+ organization=organization,
117
+ timeout=timeout,
118
+ max_retries=max_retries,
119
+ )
120
+ elif api_type == "azure_openai":
121
+ if api_key:
122
+ return create_client_callable(
123
+ AzureOpenAI,
124
+ api_version=aoai_api_version,
125
+ azure_endpoint=azure_endpoint,
126
+ api_key=api_key,
127
+ timeout=timeout,
128
+ max_retries=max_retries,
129
+ )
130
+ else:
131
+ azure_credential = DefaultAzureCredential()
132
+ ad_token_provider = get_bearer_token_provider(
133
+ azure_credential, "https://cognitiveservices.azure.com/.default"
134
+ )
135
+ return create_client_callable(
136
+ AzureOpenAI,
137
+ api_version=aoai_api_version,
138
+ azure_endpoint=azure_endpoint,
139
+ azure_ad_token_provider=ad_token_provider,
140
+ timeout=timeout,
141
+ max_retries=max_retries,
142
+ )
143
+ else:
144
+ raise NotImplementedError(f"API type '{api_type}' is invalid.")
@@ -0,0 +1,24 @@
1
+ from pathlib import Path
2
+ from typing import Any
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class ImageGenRequest(BaseModel):
8
+ prompt: str
9
+ model: str
10
+ images: list[Path] | None = Field(default=None)
11
+ mask: Path | None = Field(default=None)
12
+ n: int = Field(default=1)
13
+ quality: str | None = Field(default=None)
14
+ size: str | None = Field(default=None)
15
+ background: str | None = Field(default=None)
16
+ moderation: str | None = Field(default=None)
17
+
18
+
19
+ class ImageGenResponse(BaseModel):
20
+ images: list[bytes]
21
+ input_tokens: int
22
+ output_tokens: int
23
+ response_duration: float
24
+ input_tokens_details: dict[str, Any] | None = Field(default=None)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: not-again-ai
3
- Version: 0.18.0
3
+ Version: 0.20.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Project-URL: Homepage, https://github.com/DaveCoDev/not-again-ai
6
6
  Project-URL: Documentation, https://davecodev.github.io/not-again-ai/
@@ -19,16 +19,18 @@ Classifier: Programming Language :: Python :: 3.11
19
19
  Classifier: Programming Language :: Python :: 3.12
20
20
  Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.11
22
- Requires-Dist: loguru>=0.7
23
- Requires-Dist: pydantic>=2.10
22
+ Requires-Dist: loguru<1.0,>=0.7
23
+ Requires-Dist: pydantic<3.0,>=2.11
24
24
  Provides-Extra: data
25
- Requires-Dist: playwright<2.0,>=1.51; extra == 'data'
26
- Requires-Dist: pytest-playwright<1.0,>=0.7; extra == 'data'
25
+ Requires-Dist: crawl4ai<1.0,>=0.6; extra == 'data'
26
+ Requires-Dist: httpx<1.0,>=0.28; extra == 'data'
27
+ Requires-Dist: markitdown[pdf]==0.1.2; extra == 'data'
27
28
  Provides-Extra: llm
28
- Requires-Dist: anthropic<1.0,>=0.49; extra == 'llm'
29
+ Requires-Dist: anthropic<1.0,>=0.50; extra == 'llm'
29
30
  Requires-Dist: azure-identity<2.0,>=1.21; extra == 'llm'
31
+ Requires-Dist: google-genai<2.0,>1.12; extra == 'llm'
30
32
  Requires-Dist: ollama<1.0,>=0.4; extra == 'llm'
31
- Requires-Dist: openai<2.0,>=1.68; extra == 'llm'
33
+ Requires-Dist: openai<2.0,>=1.76; extra == 'llm'
32
34
  Requires-Dist: python-liquid<3.0,>=2.0; extra == 'llm'
33
35
  Requires-Dist: tiktoken<1.0,>=0.9; extra == 'llm'
34
36
  Provides-Extra: statistics
@@ -62,7 +64,7 @@ It is encouraged to also **a)** use this as a template for your own Python packa
62
64
  **b)** instead of installing the package, copy and paste functions into your own projects.
63
65
  We make this easier by limiting the number of dependencies and use an MIT license.
64
66
 
65
- **Documentation** available within individual **[notebooks](notebooks)**, docstrings within the source, or auto-generated at [DaveCoDev.github.io/not-again-ai/](https://DaveCoDev.github.io/not-again-ai/).
67
+ **Documentation** available within individual **[notebooks](notebooks)** or docstrings within the source code.
66
68
 
67
69
  # Installation
68
70
 
@@ -82,7 +84,9 @@ The package is split into subpackages, so you can install only the parts you nee
82
84
 
83
85
  ### Data
84
86
  1. `pip install not_again_ai[data]`
85
- 1. `playwright install` to download the browser binaries.
87
+ 1. `crawl4ai-setup` to run crawl4ai post-installation setup.
88
+ 1. Set the `BRAVE_SEARCH_API_KEY` environment variable to use the Brave Search API for web data extraction.
89
+ 1. Get the API key from https://api-dashboard.search.brave.com/app/keys. You must have at least the Free "Data for Search" subscription.
86
90
 
87
91
 
88
92
  ### LLM
@@ -138,7 +142,7 @@ all machines that use the project, both during development and in production.
138
142
  To install all dependencies into an isolated virtual environment:
139
143
 
140
144
  ```shell
141
- uv sync --all-extras
145
+ uv sync --all-extras --all-groups
142
146
  ```
143
147
 
144
148
  To upgrade all dependencies to their latest versions:
@@ -311,3 +315,5 @@ Default settings are configured in [`.vscode/settings.json`](./.vscode/settings.
311
315
 
312
316
  # Attributions
313
317
  [python-blueprint](https://github.com/johnthagen/python-blueprint) for the Python package skeleton.
318
+
319
+ This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
@@ -3,14 +3,16 @@ not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
3
3
  not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
5
5
  not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
6
- not_again_ai/data/__init__.py,sha256=1jF6mwvtB2PT7IEc3xpbRtZm3g3Lyf8zUqH4AEE4qlQ,244
7
- not_again_ai/data/web.py,sha256=wjx9cc33jcoJBGonYCIpwygPBFOwz7F-dx_ominmbnI,1838
6
+ not_again_ai/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ not_again_ai/data/brave_search_api.py,sha256=cTH7smRf8ITPpcQEcoIEvJo6VAST9_mg7FSL88xdGMc,6603
8
+ not_again_ai/data/web.py,sha256=LLWM5SkgI3-ILhtXYYuDm_eVnij3uS4tL059RyMq0lU,4737
8
9
  not_again_ai/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  not_again_ai/llm/chat_completion/__init__.py,sha256=HozawvdRkTFgq8XR16GJUHN1ukEa4Ya68wVPVrl-afs,250
10
- not_again_ai/llm/chat_completion/interface.py,sha256=xRZXQ75dxrkt5WNtOTtrAa2Oy4ZB-PG2WihW9FBmW-s,2525
11
- not_again_ai/llm/chat_completion/types.py,sha256=Z_pQjVK_7rEvAE2fj5srKxHPFJBLPV8e0iCFibnzT7M,5596
11
+ not_again_ai/llm/chat_completion/interface.py,sha256=OU6ghG7RlveahkHZWdRHFg0uzbSrSh2Dz7u5-4rrypA,2700
12
+ not_again_ai/llm/chat_completion/types.py,sha256=0pBo1Fgm__JU3NyMShGouAIolcANPpTfXn8WJHODlQw,5472
12
13
  not_again_ai/llm/chat_completion/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- not_again_ai/llm/chat_completion/providers/anthropic_api.py,sha256=_-NPc5pfhsRwwy-GYc1vAiyc0agmGLyo5_7-mcPEnBU,6189
14
+ not_again_ai/llm/chat_completion/providers/anthropic_api.py,sha256=Eix3_GgQvDyPr6pKfSDrfqRg_bTb0pqMI8fdQrx9e84,6211
15
+ not_again_ai/llm/chat_completion/providers/gemini_api.py,sha256=ovyTssfN3achMr2Laa2Hu557CjYN6o7UkO5IGXg6lzk,9461
14
16
  not_again_ai/llm/chat_completion/providers/ollama_api.py,sha256=Puo2eE2VynvZOoqrUlNYtPgRGCRMVa8syc3TfBxS1hs,10661
15
17
  not_again_ai/llm/chat_completion/providers/openai_api.py,sha256=1wdeV50KYX_KIf2uofsICKYoHVSvj4kTRpS1Vuw3NSQ,17887
16
18
  not_again_ai/llm/embedding/__init__.py,sha256=wscUfROukvw0M0vYccfaVTdXV0P-eICAT5mqM0LaHHc,182
@@ -19,6 +21,11 @@ not_again_ai/llm/embedding/types.py,sha256=J4FFLx35Aow2kOaafDReeY9cUNqhWMjaAk5gX
19
21
  not_again_ai/llm/embedding/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
22
  not_again_ai/llm/embedding/providers/ollama_api.py,sha256=m-OCis9WAUT2baGsGVPzejlive40eSNyO6tHmPh6joM,3201
21
23
  not_again_ai/llm/embedding/providers/openai_api.py,sha256=JFFqbq0O5snIEnr9VESdp5xehikQBPbs7nwyE6acFsY,5441
24
+ not_again_ai/llm/image_gen/__init__.py,sha256=v31PgYdTxMQRRxXPFl40BW5Y8RSHrZuwabuD-yC9gfI,170
25
+ not_again_ai/llm/image_gen/interface.py,sha256=XGE0aDvQwe-aWRuGNLMECO6KnMiK8qLv2APvr0hZ0tY,930
26
+ not_again_ai/llm/image_gen/types.py,sha256=Qhdov5azWwmmbqqE3Ln7t-Fb_Ipyp8r3z0_80omyASc,672
27
+ not_again_ai/llm/image_gen/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
+ not_again_ai/llm/image_gen/providers/openai_api.py,sha256=3IEhdId1UU_imvDebytTt0dSCjEEyPHf2o4vVt6RqSE,6198
22
29
  not_again_ai/llm/prompting/__init__.py,sha256=7YnHro1yH01FLGnao27WyqQDFjNYf9npE5UxoR9YrUU,84
23
30
  not_again_ai/llm/prompting/compile_prompt.py,sha256=uBn655yTiQ325z1CUgnkU2k7ICIvaYRJOm50B7w2lSs,4683
24
31
  not_again_ai/llm/prompting/interface.py,sha256=SMKYabmu3zTWbEDukU6aLU_JQ88apeBWWOF_qZ0s3ww,1783
@@ -33,7 +40,7 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
33
40
  not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
34
41
  not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
35
42
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
36
- not_again_ai-0.18.0.dist-info/METADATA,sha256=n41TWZaLvs_XPNbSEQojju9DI4nPhkjE055xX7ZJGjQ,12021
37
- not_again_ai-0.18.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
- not_again_ai-0.18.0.dist-info/licenses/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
39
- not_again_ai-0.18.0.dist-info/RECORD,,
43
+ not_again_ai-0.20.0.dist-info/METADATA,sha256=sYV6N2dmYarZnbjIqLoCrjfrsa8m1G-1rYxX3XV89tI,12389
44
+ not_again_ai-0.20.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
45
+ not_again_ai-0.20.0.dist-info/licenses/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
46
+ not_again_ai-0.20.0.dist-info/RECORD,,