not-again-ai 0.12.1__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ import importlib.util
2
+
3
+ if importlib.util.find_spec("playwright") is None:
4
+ raise ImportError(
5
+ "not_again_ai.data requires the 'data' extra to be installed. "
6
+ "You can install it using 'pip install not_again_ai[data]'."
7
+ )
@@ -0,0 +1,56 @@
1
+ from loguru import logger
2
+ from playwright.sync_api import Browser, Playwright, sync_playwright
3
+
4
+
5
+ def create_browser(headless: bool = True) -> tuple[Playwright, Browser]:
6
+ """Creates and returns a new Playwright instance and browser.
7
+
8
+ Args:
9
+ headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
10
+
11
+ Returns:
12
+ tuple[Playwright, Browser]: A tuple containing the Playwright instance and browser.
13
+ """
14
+ pwright = sync_playwright().start()
15
+ browser = pwright.chromium.launch(
16
+ headless=headless,
17
+ chromium_sandbox=False,
18
+ timeout=15000,
19
+ )
20
+ return pwright, browser
21
+
22
+
23
+ def get_raw_web_content(url: str, browser: Browser | None = None, headless: bool = True) -> str:
24
+ """Fetches raw web content from a given URL using Playwright.
25
+
26
+ Args:
27
+ url (str): The URL to fetch content from.
28
+ browser (Browser | None, optional): An existing browser instance to use. Defaults to None.
29
+ headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
30
+
31
+ Returns:
32
+ str: The raw web content.
33
+ """
34
+ p = None
35
+ try:
36
+ if browser is None:
37
+ p, browser = create_browser(headless)
38
+
39
+ page = browser.new_page(
40
+ accept_downloads=False,
41
+ java_script_enabled=True,
42
+ viewport={"width": 1366, "height": 768},
43
+ user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.3",
44
+ )
45
+ page.goto(url)
46
+ content = page.content()
47
+ page.close()
48
+ return content
49
+ except Exception as e:
50
+ logger.error(f"Failed to get web content: {e}")
51
+ return ""
52
+ finally:
53
+ if browser:
54
+ browser.close()
55
+ if p:
56
+ p.stop()
@@ -64,8 +64,8 @@ def chat_completion(
64
64
  tool_names = []
65
65
  tool_args_list = []
66
66
  for tool_call in tool_calls:
67
- tool_names.append(tool_call.function.name) # type: ignore
68
- tool_args_list.append(json.loads(tool_call.function.arguments)) # type: ignore
67
+ tool_names.append(tool_call.function.name)
68
+ tool_args_list.append(json.loads(tool_call.function.arguments))
69
69
  response_data["tool_names"] = tool_names
70
70
  response_data["tool_args_list"] = tool_args_list
71
71
 
@@ -1,16 +1,16 @@
1
+ from collections.abc import Generator
1
2
  import contextlib
2
3
  import json
3
4
  import time
4
5
  from typing import Any
5
6
 
6
- from openai import OpenAI
7
- from pydantic import BaseModel
7
+ from openai import AzureOpenAI, OpenAI
8
8
 
9
9
 
10
10
  def chat_completion(
11
11
  messages: list[dict[str, Any]],
12
12
  model: str,
13
- client: OpenAI,
13
+ client: OpenAI | AzureOpenAI | Any,
14
14
  tools: list[dict[str, Any]] | None = None,
15
15
  tool_choice: str = "auto",
16
16
  max_tokens: int | None = None,
@@ -33,7 +33,15 @@ def chat_completion(
33
33
  model (str): ID of the model to use. See the model endpoint compatibility table:
34
34
  https://platform.openai.com/docs/models/model-endpoint-compatibility
35
35
  for details on which models work with the Chat API.
36
- client (OpenAI): An instance of the OpenAI client.
36
+ client (OpenAI | AzureOpenAI | Any): An instance of the OpenAI or AzureOpenAI client.
37
+ If anything else is provided, we assume that it follows the OpenAI spec and call it by passing kwargs directly.
38
+ For example you can provide something like:
39
+ ```
40
+ def custom_client(**kwargs):
41
+ client = openai_client()
42
+ completion = client.chat.completions.create(**kwargs)
43
+ return completion.to_dict()
44
+ ```
37
45
  tools (list[dict[str, Any]], optional):A list of tools the model may call.
38
46
  Use this to provide a list of functions the model may generate JSON inputs for. Defaults to None.
39
47
  tool_choice (str, optional): The tool choice to use. Can be "auto", "required", "none", or a specific function name.
@@ -88,8 +96,6 @@ def chat_completion(
88
96
  elif json_schema is not None:
89
97
  if isinstance(json_schema, dict):
90
98
  response_format = {"type": "json_schema", "json_schema": json_schema}
91
- elif issubclass(json_schema, BaseModel):
92
- response_format = json_schema
93
99
  else:
94
100
  response_format = {"type": "text"}
95
101
 
@@ -120,67 +126,71 @@ def chat_completion(
120
126
  kwargs["top_logprobs"] = logprobs[1]
121
127
 
122
128
  start_time = time.time()
123
- response = client.chat.completions.create(**kwargs)
129
+ if isinstance(client, OpenAI | AzureOpenAI):
130
+ response = client.chat.completions.create(**kwargs)
131
+ response = response.to_dict()
132
+ else:
133
+ response = client(**kwargs)
124
134
  end_time = time.time()
125
135
  response_duration = end_time - start_time
126
136
 
127
137
  response_data: dict[str, Any] = {"choices": []}
128
- for response_choice in response.choices:
138
+ for response_choice in response["choices"]:
129
139
  response_data_curr = {}
130
- finish_reason = response_choice.finish_reason
140
+ finish_reason = response_choice["finish_reason"]
131
141
  response_data_curr["finish_reason"] = finish_reason
132
142
 
133
143
  # We first check for tool calls because even if the finish_reason is stop, the model may have called a tool
134
- tool_calls = response_choice.message.tool_calls
144
+ tool_calls = response_choice["message"].get("tool_calls", None)
135
145
  if tool_calls:
136
146
  tool_names = []
137
147
  tool_args_list = []
138
148
  for tool_call in tool_calls:
139
- tool_names.append(tool_call.function.name)
140
- tool_args_list.append(json.loads(tool_call.function.arguments))
141
- response_data_curr["message"] = response_choice.message.content
149
+ tool_names.append(tool_call["function"]["name"])
150
+ tool_args_list.append(json.loads(tool_call["function"]["arguments"]))
151
+ response_data_curr["message"] = response_choice["message"]["content"]
142
152
  response_data_curr["tool_names"] = tool_names
143
153
  response_data_curr["tool_args_list"] = tool_args_list
144
154
  elif finish_reason == "stop" or finish_reason == "length":
145
- message = response_choice.message.content
155
+ message = response_choice["message"]["content"]
146
156
  if json_mode or json_schema is not None:
147
157
  with contextlib.suppress(json.JSONDecodeError):
148
158
  message = json.loads(message)
149
159
  response_data_curr["message"] = message
150
160
 
151
- if response_choice.logprobs and response_choice.logprobs.content is not None:
161
+ if response_choice["logprobs"] and response_choice["logprobs"]["content"] is not None:
152
162
  logprobs_list: list[dict[str, Any] | list[dict[str, Any]]] = []
153
- for logprob in response_choice.logprobs.content:
154
- if logprob.top_logprobs:
163
+ for logprob in response_choice["logprobs"]["content"]:
164
+ if logprob["top_logprobs"]:
155
165
  curr_logprob_infos = []
156
- for top_logprob in logprob.top_logprobs:
166
+ for top_logprob in logprob["top_logprobs"]:
157
167
  curr_logprob_infos.append(
158
168
  {
159
- "token": top_logprob.token,
160
- "logprob": top_logprob.logprob,
161
- "bytes": top_logprob.bytes,
169
+ "token": top_logprob["token"],
170
+ "logprob": top_logprob["logprob"],
171
+ "bytes": top_logprob["bytes"],
162
172
  }
163
173
  )
164
174
  logprobs_list.append(curr_logprob_infos)
165
175
  else:
166
176
  logprobs_list.append(
167
177
  {
168
- "token": logprob.token,
169
- "logprob": logprob.logprob,
170
- "bytes": logprob.bytes,
178
+ "token": logprob["token"],
179
+ "logprob": logprob["logprob"],
180
+ "bytes": logprob["bytes"],
171
181
  }
172
182
  )
173
183
 
174
184
  response_data_curr["logprobs"] = logprobs_list
175
185
  response_data["choices"].append(response_data_curr)
176
186
 
177
- usage = response.usage
187
+ usage = response["usage"]
178
188
  if usage is not None:
179
- response_data["completion_tokens"] = usage.completion_tokens
180
- response_data["prompt_tokens"] = usage.prompt_tokens
189
+ response_data["completion_tokens"] = usage["completion_tokens"]
190
+ response_data["prompt_tokens"] = usage["prompt_tokens"]
181
191
 
182
- if seed is not None and response.system_fingerprint is not None:
183
- response_data["system_fingerprint"] = response.system_fingerprint
192
+ if seed is not None and response["system_fingerprint"] is not None:
193
+ response_data["system_fingerprint"] = response["system_fingerprint"]
184
194
 
185
195
  response_data["response_duration"] = round(response_duration, 4)
186
196
 
@@ -189,3 +199,141 @@ def chat_completion(
189
199
  del response_data["choices"]
190
200
 
191
201
  return response_data
202
+
203
+
204
+ def chat_completion_stream(
205
+ messages: list[dict[str, Any]],
206
+ model: str,
207
+ client: OpenAI | AzureOpenAI | Any,
208
+ tools: list[dict[str, Any]] | None = None,
209
+ tool_choice: str = "auto",
210
+ max_tokens: int | None = None,
211
+ temperature: float = 0.7,
212
+ seed: int | None = None,
213
+ **kwargs: Any,
214
+ ) -> Generator[dict[str, Any], None, None]:
215
+ """Stream a chat completion from the OpenAI API.
216
+
217
+ Args:
218
+ messages (list[dict[str, Any]]): The messages to send to the model.
219
+ model (str): The model to use for the chat completion.
220
+ client (OpenAI | AzureOpenAI | Any): The client to use to send the request.
221
+ If anything else is provided, we assume that it follows the OpenAI spec and call it by passing kwargs directly.
222
+ For example you can provide something like:
223
+ ```
224
+ def custom_client(**kwargs) -> Generator[dict[str, Any], None, None]: # type: ignore
225
+ client = openai_client()
226
+ completion = client.chat.completions.create(**kwargs)
227
+ for chunk in completion:
228
+ yield chunk.to_dict()
229
+ ```
230
+ tools (list[dict[str, Any]], optional):A list of tools the model may call.
231
+ Use this to provide a list of functions the model may generate JSON inputs for. Defaults to None.
232
+ tool_choice (str, optional): The tool choice to use. Can be "auto", "required", "none", or a specific function name.
233
+ Note the function name cannot be any of "auto", "required", or "none". Defaults to "auto".
234
+ max_tokens (int | None): The maximum number of tokens to generate.
235
+ temperature (float): The temperature to use for the chat completion.
236
+ seed (int, optional): If specified, OpenAI will make a best effort to sample deterministically,
237
+ such that repeated requests with the same `seed` and parameters should return the same result.
238
+ Does not currently return `system_fingerprint`.
239
+
240
+ Returns:
241
+ Generator[dict[str, Any], None, None]: A generator of chunks of the chat completion.
242
+ Each chunk is a dictionary with the following keys:
243
+ role (str): The role of the chunk. Can be "assistant", "tool", or "usage".
244
+ content (str): The content of the chunk.
245
+ tool_name (str | None): The name of the tool called by the model.
246
+ tool_call_id (str | None): The ID of the tool call.
247
+ completion_tokens (int | None): The number of tokens used by the model to generate the completion.
248
+ prompt_tokens (int | None): The number of tokens in the messages sent to the model.
249
+ """
250
+
251
+ class ChatCompletionStreamParser:
252
+ def __init__(self) -> None:
253
+ # Remembers if we are currently streaming an assistant message or tool call
254
+ self.last_type: str = ""
255
+ self.last_tool_name: str | None = None
256
+ self.last_tool_call_id: str | None = None
257
+
258
+ def process_chunk(self, chunk: dict[str, Any]) -> dict[str, Any] | None:
259
+ """Convert the current chunk into a more digestible format
260
+ {
261
+ "role": Literal["assistant", "tool", "usage"],
262
+ "content": str,
263
+ "tool_name": str | None,
264
+ "tool_call_id": str | None,
265
+ "completion_tokens": int | None,
266
+ "prompt_tokens": int | None,
267
+ }
268
+ """
269
+ processed_chunk: dict[str, Any] = {}
270
+ if chunk["choices"]:
271
+ choice = chunk["choices"][0]
272
+ # This checks if its just a regular message currently being streamed
273
+ if choice["delta"].get("role", "") and choice["delta"].get("tool_calls", None) is None:
274
+ if choice["delta"]["role"] != self.last_type:
275
+ self.last_type = choice["delta"]["role"]
276
+ processed_chunk["role"] = self.last_type
277
+ if not choice["delta"]["content"]:
278
+ processed_chunk["content"] = ""
279
+ else:
280
+ processed_chunk["content"] = choice["delta"]["content"]
281
+ else:
282
+ processed_chunk["role"] = self.last_type
283
+ elif choice["delta"].get("tool_calls", None):
284
+ # tool_calls will always be present if the model is calling a tool
285
+ tool_call = choice["delta"]["tool_calls"][0]
286
+ if tool_call["function"].get("name"):
287
+ self.last_type = "tool"
288
+ self.last_tool_name = tool_call["function"]["name"]
289
+ self.last_tool_call_id = tool_call["id"]
290
+ processed_chunk["role"] = "tool"
291
+ processed_chunk["content"] = tool_call["function"]["arguments"]
292
+ processed_chunk["tool_name"] = self.last_tool_name
293
+ processed_chunk["tool_call_id"] = self.last_tool_call_id
294
+ elif choice["delta"].get("content", ""):
295
+ # This is the case after the first regular assistant message
296
+ processed_chunk["role"] = self.last_type
297
+ processed_chunk["content"] = choice["delta"]["content"]
298
+ else:
299
+ if chunk.get("usage"):
300
+ processed_chunk["role"] = "usage"
301
+ processed_chunk["completion_tokens"] = chunk["usage"]["completion_tokens"]
302
+ processed_chunk["prompt_tokens"] = chunk["usage"]["prompt_tokens"]
303
+ else:
304
+ return None
305
+ return processed_chunk
306
+
307
+ kwargs.update(
308
+ {
309
+ "messages": messages,
310
+ "model": model,
311
+ "max_tokens": max_tokens,
312
+ "temperature": temperature,
313
+ "stream": True,
314
+ "stream_options": {"include_usage": True},
315
+ }
316
+ )
317
+
318
+ if tools is not None:
319
+ kwargs["tools"] = tools
320
+ if tool_choice not in ["none", "auto", "required"]:
321
+ kwargs["tool_choice"] = {"type": "function", "function": {"name": tool_choice}}
322
+ else:
323
+ kwargs["tool_choice"] = tool_choice
324
+
325
+ if seed is not None:
326
+ kwargs["seed"] = seed
327
+
328
+ if isinstance(client, OpenAI | AzureOpenAI):
329
+ response = client.chat.completions.create(**kwargs)
330
+ else:
331
+ response = client(**kwargs)
332
+
333
+ parser = ChatCompletionStreamParser()
334
+ for chunk in response:
335
+ if isinstance(client, OpenAI | AzureOpenAI):
336
+ chunk = chunk.to_dict()
337
+ processed_chunk = parser.process_chunk(chunk)
338
+ if processed_chunk:
339
+ yield processed_chunk
@@ -5,6 +5,8 @@ from pathlib import Path
5
5
  from typing import Any
6
6
 
7
7
  from liquid import Template
8
+ from openai.lib._pydantic import to_strict_json_schema
9
+ from pydantic import BaseModel
8
10
 
9
11
 
10
12
  def _validate_message_vision(message: dict[str, list[dict[str, Path | str]] | str]) -> bool:
@@ -162,3 +164,28 @@ def chat_prompt(messages_unformatted: list[dict[str, Any]], variables: dict[str,
162
164
  message["content"] = Template(message["content"]).render(**variables)
163
165
 
164
166
  return messages_formatted
167
+
168
+
169
+ def pydantic_to_json_schema(
170
+ pydantic_model: type[BaseModel], schema_name: str, description: str | None = None
171
+ ) -> dict[str, Any]:
172
+ """Converts a Pydantic model to a JSON schema expected by Structured Outputs.
173
+ Must adhere to the supported schemas: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
174
+
175
+ Args:
176
+ pydantic_model: The Pydantic model to convert.
177
+ schema_name: The name of the schema.
178
+ description: An optional description of the schema.
179
+
180
+ Returns:
181
+ A JSON schema dictionary representing the Pydantic model.
182
+ """
183
+ converted_pydantic = to_strict_json_schema(pydantic_model)
184
+ schema = {
185
+ "name": schema_name,
186
+ "strict": True,
187
+ "schema": converted_pydantic,
188
+ }
189
+ if description:
190
+ schema["description"] = description
191
+ return schema
@@ -1,3 +1,6 @@
1
+ from collections.abc import Collection, Set
2
+ from typing import Literal
3
+
1
4
  import tiktoken
2
5
 
3
6
 
@@ -18,18 +21,38 @@ def load_tokenizer(model: str) -> tiktoken.Encoding:
18
21
  return encoding
19
22
 
20
23
 
21
- def truncate_str(text: str, max_len: int, tokenizer: tiktoken.Encoding) -> str:
24
+ def truncate_str(
25
+ text: str,
26
+ max_len: int,
27
+ tokenizer: tiktoken.Encoding,
28
+ allowed_special: Literal["all"] | Set[str] = set(),
29
+ disallowed_special: Literal["all"] | Collection[str] = (),
30
+ ) -> str:
22
31
  """Truncates a string to a maximum token length.
23
32
 
33
+ Special tokens are artificial tokens used to unlock capabilities from a model,
34
+ such as fill-in-the-middle. So we want to be careful about accidentally encoding special
35
+ tokens, since they can be used to trick a model into doing something we don't want it to do.
36
+
37
+ Hence, by default, encode will raise an error if it encounters text that corresponds
38
+ to a special token. This can be controlled on a per-token level using the `allowed_special`
39
+ and `disallowed_special` parameters. In particular:
40
+ - Setting `disallowed_special` to () will prevent this function from raising errors and
41
+ cause all text corresponding to special tokens to be encoded as natural text.
42
+ - Setting `allowed_special` to "all" will cause this function to treat all text
43
+ corresponding to special tokens to be encoded as special tokens.
44
+
24
45
  Args:
25
46
  text (str): The string to truncate.
26
47
  max_len (int): The maximum number of tokens to keep.
27
48
  tokenizer (tiktoken.Encoding): A tiktoken encoding object
49
+ allowed_special (str | set[str]):
50
+ disallowed_special (str | set[str]):
28
51
 
29
52
  Returns:
30
53
  str: The truncated string.
31
54
  """
32
- tokens = tokenizer.encode(text)
55
+ tokens = tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special)
33
56
  if len(tokens) > max_len:
34
57
  tokens = tokens[:max_len]
35
58
  # Decode the tokens back to a string
@@ -39,33 +62,70 @@ def truncate_str(text: str, max_len: int, tokenizer: tiktoken.Encoding) -> str:
39
62
  return text
40
63
 
41
64
 
42
- def num_tokens_in_string(text: str, tokenizer: tiktoken.Encoding) -> int:
65
+ def num_tokens_in_string(
66
+ text: str,
67
+ tokenizer: tiktoken.Encoding,
68
+ allowed_special: Literal["all"] | Set[str] = set(),
69
+ disallowed_special: Literal["all"] | Collection[str] = (),
70
+ ) -> int:
43
71
  """Return the number of tokens in a string.
44
72
 
73
+ Special tokens are artificial tokens used to unlock capabilities from a model,
74
+ such as fill-in-the-middle. So we want to be careful about accidentally encoding special
75
+ tokens, since they can be used to trick a model into doing something we don't want it to do.
76
+
77
+ Hence, by default, encode will raise an error if it encounters text that corresponds
78
+ to a special token. This can be controlled on a per-token level using the `allowed_special`
79
+ and `disallowed_special` parameters. In particular:
80
+ - Setting `disallowed_special` to () will prevent this function from raising errors and
81
+ cause all text corresponding to special tokens to be encoded as natural text.
82
+ - Setting `allowed_special` to "all" will cause this function to treat all text
83
+ corresponding to special tokens to be encoded as special tokens.
84
+
45
85
  Args:
46
86
  text (str): The string to count the tokens.
47
87
  tokenizer (tiktoken.Encoding): A tiktoken encoding object
88
+ allowed_special (str | set[str]):
89
+ disallowed_special (str | set[str]):
48
90
 
49
91
  Returns:
50
92
  int: The number of tokens in the string.
51
93
  """
52
- return len(tokenizer.encode(text))
94
+ return len(tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special))
53
95
 
54
96
 
55
97
  def num_tokens_from_messages(
56
- messages: list[dict[str, str]], tokenizer: tiktoken.Encoding, model: str = "gpt-3.5-turbo-0125"
98
+ messages: list[dict[str, str]],
99
+ tokenizer: tiktoken.Encoding,
100
+ model: str = "gpt-3.5-turbo-0125",
101
+ allowed_special: Literal["all"] | Set[str] = set(),
102
+ disallowed_special: Literal["all"] | Collection[str] = (),
57
103
  ) -> int:
58
104
  """Return the number of tokens used by a list of messages.
59
105
  NOTE: Does not support counting tokens used by function calling or prompts with images.
60
106
  Reference: # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
61
107
  and https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
62
108
 
109
+ Special tokens are artificial tokens used to unlock capabilities from a model,
110
+ such as fill-in-the-middle. So we want to be careful about accidentally encoding special
111
+ tokens, since they can be used to trick a model into doing something we don't want it to do.
112
+
113
+ Hence, by default, encode will raise an error if it encounters text that corresponds
114
+ to a special token. This can be controlled on a per-token level using the `allowed_special`
115
+ and `disallowed_special` parameters. In particular:
116
+ - Setting `disallowed_special` to () will prevent this function from raising errors and
117
+ cause all text corresponding to special tokens to be encoded as natural text.
118
+ - Setting `allowed_special` to "all" will cause this function to treat all text
119
+ corresponding to special tokens to be encoded as special tokens.
120
+
63
121
  Args:
64
122
  messages (list[dict[str, str]]): A list of messages to count the tokens
65
123
  should ideally be the result after calling llm.prompts.chat_prompt.
66
124
  tokenizer (tiktoken.Encoding): A tiktoken encoding object
67
125
  model (str): The model to use for tokenization. Defaults to "gpt-3.5-turbo-0125".
68
126
  See https://platform.openai.com/docs/models for a list of OpenAI models.
127
+ allowed_special (str | set[str]):
128
+ disallowed_special (str | set[str]):
69
129
 
70
130
  Returns:
71
131
  int: The number of tokens used by the messages.
@@ -111,7 +171,13 @@ See https://github.com/openai/openai-python/blob/main/chatml.md for information
111
171
  for message in messages:
112
172
  num_tokens += tokens_per_message
113
173
  for key, value in message.items():
114
- num_tokens += len(tokenizer.encode(value))
174
+ num_tokens += len(
175
+ tokenizer.encode(
176
+ value,
177
+ allowed_special=allowed_special,
178
+ disallowed_special=disallowed_special,
179
+ )
180
+ )
115
181
  if key == "name":
116
182
  num_tokens += tokens_per_name
117
183
  num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.12.1
3
+ Version: 0.14.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -17,25 +17,27 @@ Classifier: Programming Language :: Python :: 3
17
17
  Classifier: Programming Language :: Python :: 3.11
18
18
  Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Typing :: Typed
20
+ Provides-Extra: data
20
21
  Provides-Extra: llm
21
22
  Provides-Extra: local-llm
22
23
  Provides-Extra: statistics
23
24
  Provides-Extra: viz
24
- Requires-Dist: azure-ai-inference (==1.0.0b3) ; extra == "llm"
25
- Requires-Dist: azure-identity (>=1.17,<2.0) ; extra == "llm"
25
+ Requires-Dist: azure-ai-inference (==1.0.0b5) ; extra == "llm"
26
+ Requires-Dist: azure-identity (>=1.19,<2.0) ; extra == "llm"
26
27
  Requires-Dist: jinja2 (>=3.1,<4.0) ; extra == "local-llm"
27
- Requires-Dist: loguru (==0.7.2)
28
- Requires-Dist: numpy (>=1.26,<2.0) ; extra == "statistics" or extra == "viz"
28
+ Requires-Dist: loguru (>=0.7,<0.8)
29
+ Requires-Dist: numpy (>=2.1,<3.0) ; extra == "statistics" or extra == "viz"
29
30
  Requires-Dist: ollama (>=0.3,<0.4) ; extra == "local-llm"
30
- Requires-Dist: openai (>=1.41,<2.0) ; extra == "llm"
31
+ Requires-Dist: openai (>=1.52,<2.0) ; extra == "llm"
31
32
  Requires-Dist: pandas (>=2.2,<3.0) ; extra == "viz"
32
- Requires-Dist: pydantic (>=2.8,<3.0) ; extra == "llm"
33
+ Requires-Dist: pydantic (>=2.9,<3.0)
34
+ Requires-Dist: pytest-playwright (>=0.5,<0.6) ; extra == "data"
33
35
  Requires-Dist: python-liquid (>=1.12,<2.0) ; extra == "llm"
34
36
  Requires-Dist: scikit-learn (>=1.5,<2.0) ; extra == "statistics"
35
37
  Requires-Dist: scipy (>=1.14,<2.0) ; extra == "statistics"
36
38
  Requires-Dist: seaborn (>=0.13,<0.14) ; extra == "viz"
37
- Requires-Dist: tiktoken (>=0.7,<0.8) ; extra == "llm"
38
- Requires-Dist: transformers (>=4.44,<5.0) ; extra == "local-llm"
39
+ Requires-Dist: tiktoken (>=0.8,<0.9) ; extra == "llm"
40
+ Requires-Dist: transformers (>=4.45,<5.0) ; extra == "local-llm"
39
41
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
40
42
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
41
43
  Description-Content-Type: text/markdown
@@ -72,34 +74,53 @@ $ pip install not_again_ai[llm,local_llm,statistics,viz]
72
74
  Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
73
75
 
74
76
  The package is split into subpackages, so you can install only the parts you need.
75
- * **Base only**: `pip install not_again_ai`
76
- * **LLM**: `pip install not_again_ai[llm]`
77
- 1. OpenAI API
78
- 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
79
- 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
80
- 1. Azure OpenAI (AOAI)
81
- 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
82
- 1. Requires the correct role assigned to your user account and being signed into the Azure CLI.
83
- 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
84
- 1. GitHub Models
85
- 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
86
- 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
87
- * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
88
- 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
89
- 1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
90
- 2. If you wish to use Ollama:
91
- 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
92
- 2. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
93
- 3. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
94
- ```bash
95
- [Service]
96
- ...
97
- Environment="OLLAMA_HOST=0.0.0.0"
98
- ```
99
- 4. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
100
- 3. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
101
- * **Statistics**: `pip install not_again_ai[statistics]`
102
- * **Visualization**: `pip install not_again_ai[viz]`
77
+
78
+ ### Base
79
+ 1. `pip install not_again_ai`
80
+
81
+
82
+ ### Data
83
+ 1. `pip install not_again_ai[data]`
84
+ 1. `playwright install` to download the browser binaries.
85
+
86
+
87
+ ### LLM
88
+ 1. `pip install not_again_ai[llm]`
89
+ 1. Setup OpenAI API
90
+ 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
91
+ 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
92
+ 1. Setup Azure OpenAI (AOAI)
93
+ 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
94
+ * Requires the correct role assigned to your user account and being signed into the Azure CLI.
95
+ 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
96
+ 1. Setup GitHub Models
97
+ 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
98
+ 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
99
+
100
+
101
+ ### Local LLM
102
+ 1. `pip install not_again_ai[llm,local_llm]`
103
+ 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
104
+ * Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
105
+ 1. If you wish to use Ollama:
106
+ 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
107
+ 1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
108
+ 1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
109
+ ```bash
110
+ [Service]
111
+ ...
112
+ Environment="OLLAMA_HOST=0.0.0.0"
113
+ ```
114
+ 1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
115
+ 1. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
116
+
117
+
118
+ ### Statistics
119
+ 1. `pip install not_again_ai[statistics]`
120
+
121
+
122
+ ### Visualization
123
+ 1. `pip install not_again_ai[viz]`
103
124
 
104
125
 
105
126
  # Development Information
@@ -229,10 +250,10 @@ areas of the project that are currently not tested.
229
250
 
230
251
  pytest and code coverage are configured in [`pyproject.toml`](./pyproject.toml).
231
252
 
232
- To pass arguments to `pytest` through `nox`:
253
+ To run selected tests:
233
254
 
234
255
  ```bash
235
- (.venv) $ nox -s test -- -k invalid_factorial
256
+ (.venv) $ nox -s test -- -k "test_web"
236
257
  ```
237
258
 
238
259
  ## Code Style Checking
@@ -2,17 +2,19 @@ not_again_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
4
4
  not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
5
+ not_again_ai/data/__init__.py,sha256=1jF6mwvtB2PT7IEc3xpbRtZm3g3Lyf8zUqH4AEE4qlQ,244
6
+ not_again_ai/data/web.py,sha256=wjx9cc33jcoJBGonYCIpwygPBFOwz7F-dx_ominmbnI,1838
5
7
  not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
6
8
  not_again_ai/llm/gh_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
9
  not_again_ai/llm/gh_models/azure_ai_client.py,sha256=GkVn9ZwYbsLm3X0A3pGKKHuoqrxc-BZnZ4n9ExelRUQ,580
8
- not_again_ai/llm/gh_models/chat_completion.py,sha256=t6HfwOh8UKtE7OqJsCaFOjE2CqpnJV3gQPNXZvoSyYo,3631
10
+ not_again_ai/llm/gh_models/chat_completion.py,sha256=zI6Kfqb9AW0t_Yd1ecaXy7q70gygJ_XKcFbtYrKIbn4,3599
9
11
  not_again_ai/llm/openai_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- not_again_ai/llm/openai_api/chat_completion.py,sha256=xlqVAp2YaCXqw5zU_vAPCW7SaJfUe1vdeUqKSjoqDtE,9771
12
+ not_again_ai/llm/openai_api/chat_completion.py,sha256=5jO-J97zcKPJrzHn6V4NZB3nZa1RZwbIeMC3gbXlWWQ,17100
11
13
  not_again_ai/llm/openai_api/context_management.py,sha256=BJSG100_qw9MeTCZGztDV5CBXjVOxU4x7gyoRlLxWnI,3561
12
14
  not_again_ai/llm/openai_api/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
13
15
  not_again_ai/llm/openai_api/openai_client.py,sha256=AK9SDBkpP94u5Q73-Q5i5HRPQh_D8cF8Dfl0IgPsJDQ,3816
14
- not_again_ai/llm/openai_api/prompts.py,sha256=B62xs3WKaTv7SfT_TVC-PqO9oeWWpO0xS4_oxW9MYMQ,7093
15
- not_again_ai/llm/openai_api/tokens.py,sha256=RYBzl5vqE_MzWM60QbWC_6X9YOQoOgBOeR-68rM34II,4421
16
+ not_again_ai/llm/openai_api/prompts.py,sha256=lZYxgzoM2VqXWKUDToKWKR6w49KNYKu5TnqKLxG3TsM,8034
17
+ not_again_ai/llm/openai_api/tokens.py,sha256=Q4xdCEPrmgDCNjmcB4rg6ipvo4_McwSjc-b9gAHjUJs,8024
16
18
  not_again_ai/local_llm/__init__.py,sha256=BsUn39U3QQaw6yomQHfp_HIPHRIBoMAgjcP3CDADx04,882
17
19
  not_again_ai/local_llm/chat_completion.py,sha256=PmICXrGZJXIuqY00ULBGi2bKnPG8ticqTXZHSTzZK9o,4828
18
20
  not_again_ai/local_llm/huggingface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -35,8 +37,8 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
35
37
  not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
36
38
  not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
37
39
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
38
- not_again_ai-0.12.1.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
39
- not_again_ai-0.12.1.dist-info/METADATA,sha256=VydzFufICQyP6paN15KJTudJi6rSpwWn5H_W1v46p6Y,16389
40
- not_again_ai-0.12.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
41
- not_again_ai-0.12.1.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
42
- not_again_ai-0.12.1.dist-info/RECORD,,
40
+ not_again_ai-0.14.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
41
+ not_again_ai-0.14.0.dist-info/METADATA,sha256=kNL0KybcNVoN7fcCMNO1CohIWZAxc74gnV68zzoEDfI,16475
42
+ not_again_ai-0.14.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
43
+ not_again_ai-0.14.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
44
+ not_again_ai-0.14.0.dist-info/RECORD,,