not-again-ai 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  try:
2
- import jinja2 # noqa
2
+ import liquid # noqa
3
3
  import openai # noqa
4
4
  import tiktoken # noqa
5
5
  except ImportError:
@@ -1,3 +1,4 @@
1
+ import contextlib
1
2
  import json
2
3
  from typing import Any
3
4
 
@@ -8,9 +9,11 @@ def chat_completion(
8
9
  messages: list[dict[str, str]],
9
10
  model: str,
10
11
  client: OpenAI,
11
- functions: list[dict[str, Any]] | None = None,
12
+ tools: list[dict[str, Any]] | None = None,
13
+ tool_choice: str = "auto",
12
14
  max_tokens: int | None = None,
13
15
  temperature: float = 0.7,
16
+ json_mode: bool = False,
14
17
  **kwargs: Any,
15
18
  ) -> dict[str, Any]:
16
19
  """Get an OpenAI chat completion response: https://platform.openai.com/docs/api-reference/chat/create
@@ -21,57 +24,79 @@ def chat_completion(
21
24
  https://platform.openai.com/docs/models/model-endpoint-compatibility
22
25
  for details on which models work with the Chat API.
23
26
  client (OpenAI): An instance of the OpenAI client.
24
- functions (list, optional): A list of functions the model may generate JSON inputs for. Defaults to None.
27
+ tools (list[dict[str, Any]], optional): A list of tools the model may generate JSON inputs for.
28
+ Defaults to None.
29
+ tool_choice (str, optional): The tool choice to use. Can be "auto", "none", or a specific function name.
30
+ Defaults to "auto".
25
31
  max_tokens (int, optional): The maximum number of tokens to generate in the chat completion.
26
- Defaults to limited to the model's context length.
32
+ Defaults to None, which automatically limits to the model's maximum context length.
27
33
  temperature (float, optional): What sampling temperature to use, between 0 and 2.
28
34
  Higher values like 0.8 will make the output more random,
29
35
  while lower values like 0.2 will make it more focused and deterministic. Defaults to 0.7.
36
+ json_mode (bool, optional): When JSON mode is enabled, the model is constrained to only
37
+ generate strings that parse into valid JSON object and will return a dictionary.
38
+ See https://platform.openai.com/docs/guides/text-generation/json-mode
30
39
  **kwargs: Additional keyword arguments to pass to the OpenAI client chat completion.
31
40
 
32
41
  Returns:
33
42
  dict: A dictionary containing the following keys:
34
- - "finish_reason" (str): The reason the model stopped generating further tokens. Can be "stop" or "function_call".
35
- - "function_name" (str, optional): The name of the function called by the model, present only if "finish_reason" is "function_call".
36
- - "function_args" (dict, optional): The arguments of the function called by the model, present only if "finish_reason" is "function_call".
37
- - "message" (str, optional): The content of the generated assistant message, present only if "finish_reason" is "stop".
43
+ - "finish_reason" (str): The reason the model stopped generating further tokens.
44
+ Can be "stop", "length", or "tool_calls".
45
+ - "tool_names" (list[str], optional): The names of the tools called by the model.
46
+ - "tool_args_list" (list[dict], optional): The arguments of the tools called by the model.
47
+ - "message" (str | dict): The content of the generated assistant message.
48
+ If json_mode is True, this will be a dictionary.
38
49
  - "completion_tokens" (int): The number of tokens used by the model to generate the completion.
39
50
  - "prompt_tokens" (int): The number of tokens in the generated response.
40
51
  """
41
- if functions is None:
42
- response = client.chat.completions.create(
43
- messages=messages, # type: ignore
44
- model=model,
45
- max_tokens=max_tokens,
46
- temperature=temperature,
47
- n=1,
48
- **kwargs,
49
- )
50
- else:
51
- response = client.chat.completions.create( # type: ignore
52
- messages=messages,
53
- model=model,
54
- functions=functions,
55
- function_call="auto",
56
- max_tokens=max_tokens,
57
- temperature=temperature,
58
- n=1,
59
- **kwargs,
60
- )
52
+ response_format = {"type": "json_object"} if json_mode else None
53
+
54
+ kwargs.update(
55
+ {
56
+ "messages": messages,
57
+ "model": model,
58
+ "tools": tools,
59
+ "max_tokens": max_tokens,
60
+ "temperature": temperature,
61
+ "response_format": response_format,
62
+ "n": 1,
63
+ }
64
+ )
65
+
66
+ if tools is not None:
67
+ if tool_choice not in ["none", "auto"]:
68
+ kwargs["tool_choice"] = {"type": "function", "function": {"name": tool_choice}}
69
+ else:
70
+ kwargs["tool_choice"] = tool_choice
71
+
72
+ # Call the function with the set parameters
73
+ response = client.chat.completions.create(**kwargs)
61
74
 
62
75
  response_data = {}
63
76
  finish_reason = response.choices[0].finish_reason
64
77
  response_data["finish_reason"] = finish_reason
65
- if finish_reason == "function_call":
66
- function_call = response.choices[0].message.function_call
67
- if function_call is not None:
68
- response_data["function_name"] = function_call.name # type: ignore
69
- response_data["function_args"] = json.loads(function_call.arguments)
78
+
79
+ # Not checking finish_reason=="tool_calls" here because when a user providea function name as tool_choice,
80
+ # the finish reason is "stop", not "tool_calls"
81
+ tool_calls = response.choices[0].message.tool_calls
82
+ if tool_calls:
83
+ tool_names = []
84
+ tool_args_list = []
85
+ for tool_call in tool_calls:
86
+ tool_names.append(tool_call.function.name)
87
+ tool_args_list.append(json.loads(tool_call.function.arguments))
88
+ response_data["tool_names"] = tool_names
89
+ response_data["tool_args_list"] = tool_args_list
70
90
  elif finish_reason == "stop" or finish_reason == "length":
71
- message = response.choices[0].message
72
- response_data["message"] = message.content # type: ignore
91
+ message = response.choices[0].message.content
92
+ if json_mode:
93
+ with contextlib.suppress(json.JSONDecodeError):
94
+ message = json.loads(message)
95
+ response_data["message"] = message
96
+
73
97
  usage = response.usage
74
98
  if usage is not None:
75
- response_data["completion_tokens"] = usage.completion_tokens # type: ignore
76
- response_data["prompt_tokens"] = usage.prompt_tokens # type: ignore
99
+ response_data["completion_tokens"] = usage.completion_tokens
100
+ response_data["prompt_tokens"] = usage.prompt_tokens
101
+
77
102
  return response_data
@@ -18,7 +18,7 @@ def priority_truncation(
18
18
  variables: dict[str, str],
19
19
  priority: list[str],
20
20
  token_limit: int,
21
- model: str = "gpt-3.5-turbo-0613",
21
+ model: str = "gpt-3.5-turbo-1106",
22
22
  ) -> list[dict[str, str]]:
23
23
  """Formats messages_unformatted and injects variables into the messages in the order of priority, truncating the messages to fit the token limit.
24
24
 
@@ -37,7 +37,7 @@ def priority_truncation(
37
37
  variables: A dictionary where each key-value pair represents a variable name and its value to inject.
38
38
  priority: A list of variable names in their order of priority.
39
39
  token_limit: The maximum number of tokens allowed in the messages.
40
- model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-0613".
40
+ model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-1106".
41
41
  """
42
42
 
43
43
  # Check if all variables in the priority list are in the variables dict.
@@ -21,10 +21,12 @@ def openai_client(
21
21
  Defaults to 'openai'.
22
22
  api_key (str, optional): The API key to authenticate the client. If not provided,
23
23
  OpenAI automatically uses `OPENAI_API_KEY` from the environment.
24
- organization (str, optional): The ID of the organization (for enterprise users). If not provided,
24
+ organization (str, optional): The ID of the organization. If not provided,
25
25
  OpenAI automotically uses `OPENAI_ORG_ID` from the environment.
26
- timeout (float, optional): TBD
27
- max_retries (int, optional): TBD
26
+ timeout (float, optional): By default requests time out after 10 minutes.
27
+ max_retries (int, optional): Certain errors are automatically retried 2 times by default,
28
+ with a short exponential backoff. Connection errors (for example, due to a network connectivity problem),
29
+ 408 Request Timeout, 409 Conflict, 429 Rate Limit, and >=500 Internal errors are all retried by default.
28
30
 
29
31
  Returns:
30
32
  OpenAI: An instance of the OpenAI client.
@@ -34,7 +36,7 @@ def openai_client(
34
36
  NotImplementedError: If the specified API type is recognized but not yet supported (e.g., 'azure_openai').
35
37
 
36
38
  Examples:
37
- >>> client = oai_client(api_type="openai", api_key="YOUR_API_KEY")
39
+ >>> client = openai_client(api_type="openai", api_key="YOUR_API_KEY")
38
40
  """
39
41
  if api_type not in ["openai", "azure_openai"]:
40
42
  raise InvalidOAIAPITypeError(f"Invalid OAIAPIType: {api_type}. Must be 'openai' or 'azure_openai'.")
@@ -1,4 +1,4 @@
1
- import jinja2
1
+ from liquid import Template
2
2
 
3
3
 
4
4
  def _validate_message(message: dict[str, str]) -> bool:
@@ -19,15 +19,13 @@ def _validate_message(message: dict[str, str]) -> bool:
19
19
 
20
20
 
21
21
  def chat_prompt(messages_unformatted: list[dict[str, str]], variables: dict[str, str]) -> list[dict[str, str]]:
22
- """Formats a list of messages for OpenAI's chat completion API using Jinja2 templating.
23
-
24
- The content of each message is treated as a Jinja2 template that is rendered
25
- with the provided variables.
22
+ """
23
+ Formats a list of messages for OpenAI's chat completion API using Liquid templating.
26
24
 
27
25
  Args:
28
26
  messages_unformatted: A list of dictionaries where each dictionary
29
27
  represents a message. Each message must have 'role' and 'content'
30
- keys with string values, where content is a Jinja2 template.
28
+ keys with string values, where content is a Liquid template.
31
29
  variables: A dictionary where each key-value pair represents a variable
32
30
  name and its value for template rendering.
33
31
 
@@ -47,10 +45,13 @@ def chat_prompt(messages_unformatted: list[dict[str, str]], variables: dict[str,
47
45
  {"role": "user", "content": "Help me write Python code for the fibonnaci sequence"}
48
46
  ]
49
47
  """
48
+
50
49
  messages_formatted = messages_unformatted.copy()
51
50
  for message in messages_formatted:
52
- # Validate each message and return a ValueError if any message is invalid
53
51
  if not _validate_message(message):
54
52
  raise ValueError(f"Invalid message: {message}")
55
- message["content"] = jinja2.Template(message["content"]).render(**variables)
53
+
54
+ liquid_template = Template(message["content"])
55
+ message["content"] = liquid_template.render(**variables)
56
+
56
57
  return messages_formatted
@@ -1,15 +1,14 @@
1
1
  import tiktoken
2
2
 
3
3
 
4
- def truncate_str(text: str, max_len: int, model: str = "gpt-3.5-turbo-0613") -> str:
4
+ def truncate_str(text: str, max_len: int, model: str = "gpt-3.5-turbo-1106") -> str:
5
5
  """Truncates a string to a maximum token length.
6
6
 
7
7
  Args:
8
8
  text: The string to truncate.
9
9
  max_len: The maximum number of tokens to keep.
10
- model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-0613".
11
- See https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
12
- for a list of OpenAI models.
10
+ model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-1106".
11
+ See https://platform.openai.com/docs/models for a list of OpenAI models.
13
12
 
14
13
  Returns:
15
14
  The truncated string.
@@ -30,12 +29,13 @@ def truncate_str(text: str, max_len: int, model: str = "gpt-3.5-turbo-0613") ->
30
29
  return text
31
30
 
32
31
 
33
- def num_tokens_in_string(text: str, model: str = "gpt-3.5-turbo-0613") -> int:
32
+ def num_tokens_in_string(text: str, model: str = "gpt-3.5-turbo-1106") -> int:
34
33
  """Return the number of tokens in a string.
35
34
 
36
35
  Args:
37
36
  text: The string to count the tokens.
38
- model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-0613".
37
+ model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-1106".
38
+ See https://platform.openai.com/docs/models for a list of OpenAI models.
39
39
 
40
40
  Returns:
41
41
  The number of tokens in the string.
@@ -48,17 +48,17 @@ def num_tokens_in_string(text: str, model: str = "gpt-3.5-turbo-0613") -> int:
48
48
  return len(encoding.encode(text))
49
49
 
50
50
 
51
- def num_tokens_from_messages(messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0613") -> int:
51
+ def num_tokens_from_messages(messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-1106") -> int:
52
52
  """Return the number of tokens used by a list of messages.
53
53
  NOTE: Does not support counting tokens used by function calling.
54
54
  Reference: # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
55
+ and https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
55
56
 
56
57
  Args:
57
58
  messages: A list of messages to count the tokens
58
59
  should ideally be the result after calling llm.prompts.chat_prompt.
59
- model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-0613".
60
- See https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
61
- for a list of OpenAI models.
60
+ model: The model to use for tokenization. Defaults to "gpt-3.5-turbo-1106".
61
+ See https://platform.openai.com/docs/models for a list of OpenAI models.
62
62
 
63
63
  Returns:
64
64
  The number of tokens used by the messages.
@@ -71,16 +71,21 @@ def num_tokens_from_messages(messages: list[dict[str, str]], model: str = "gpt-3
71
71
  if model in {
72
72
  "gpt-3.5-turbo-0613",
73
73
  "gpt-3.5-turbo-16k-0613",
74
+ "gpt-3.5-turbo-1106",
74
75
  "gpt-4-0314",
75
76
  "gpt-4-32k-0314",
76
77
  "gpt-4-0613",
77
78
  "gpt-4-32k-0613",
79
+ "gpt-4-1106-preview",
80
+ "gpt-4-turbo-preview",
81
+ "gpt-4-0125-preview",
78
82
  }:
79
83
  tokens_per_message = 3 # every message follows <|start|>{role/name}\n{content}<|end|>\n
80
84
  tokens_per_name = 1 # if there's a name, the role is omitted
81
85
  elif model == "gpt-3.5-turbo-0301":
82
86
  tokens_per_message = 4
83
87
  tokens_per_name = -1
88
+ # Approximate catch-all. Assumes future versions of 3.5 and 4 will have the same token counts as the 0613 versions.
84
89
  elif "gpt-3.5-turbo" in model:
85
90
  return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
86
91
  elif "gpt-4" in model:
@@ -13,9 +13,11 @@ from not_again_ai.viz.utils import reset_plot_libs
13
13
  def ts_lineplot(
14
14
  ts_data: list[float] | (npt.NDArray[np.float64] | npt.NDArray[np.int64]),
15
15
  save_pathname: str,
16
- ts_x: list[float]
17
- | (npt.NDArray[np.float64] | (npt.NDArray[np.datetime64] | (npt.NDArray[np.int64] | pd.Series)))
18
- | None = None,
16
+ ts_x: (
17
+ list[float]
18
+ | (npt.NDArray[np.float64] | (npt.NDArray[np.datetime64] | (npt.NDArray[np.int64] | pd.Series)))
19
+ | None
20
+ ) = None,
19
21
  ts_names: list[str] | None = None,
20
22
  title: str | None = None,
21
23
  xlabel: str | None = "Time",
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2022-2023 DaveCoDev
3
+ Copyright (c) 2022-2024 DaveCoDev
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.3.1
3
+ Version: 0.4.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
7
7
  Author: DaveCoDev
8
8
  Author-email: dave.co.dev@gmail.com
9
- Requires-Python: >=3.10,<3.13
9
+ Requires-Python: >=3.11,<3.13
10
10
  Classifier: Development Status :: 3 - Alpha
11
11
  Classifier: Intended Audience :: Developers
12
12
  Classifier: Intended Audience :: Science/Research
@@ -14,7 +14,6 @@ Classifier: License :: OSI Approved :: MIT License
14
14
  Classifier: Operating System :: OS Independent
15
15
  Classifier: Programming Language :: Python
16
16
  Classifier: Programming Language :: Python :: 3
17
- Classifier: Programming Language :: Python :: 3.10
18
17
  Classifier: Programming Language :: Python :: 3.11
19
18
  Classifier: Programming Language :: Python :: 3.12
20
19
  Classifier: Programming Language :: Python :: 3 :: Only
@@ -22,13 +21,13 @@ Classifier: Typing :: Typed
22
21
  Provides-Extra: llm
23
22
  Provides-Extra: statistics
24
23
  Provides-Extra: viz
25
- Requires-Dist: jinja2 (>=3.1.2,<4.0.0) ; extra == "llm"
26
- Requires-Dist: numpy (>=1.26.2,<2.0.0) ; extra == "statistics" or extra == "viz"
27
- Requires-Dist: openai (>=1.6.1,<2.0.0) ; extra == "llm"
28
- Requires-Dist: pandas (>=2.1.4,<3.0.0) ; extra == "viz"
29
- Requires-Dist: scikit-learn (>=1.3.2,<2.0.0) ; extra == "statistics"
30
- Requires-Dist: scipy (>=1.11.4,<2.0.0) ; extra == "statistics"
31
- Requires-Dist: seaborn (>=0.13.0,<0.14.0) ; extra == "viz"
24
+ Requires-Dist: numpy (>=1.26.3,<2.0.0) ; extra == "statistics" or extra == "viz"
25
+ Requires-Dist: openai (>=1.10.0,<2.0.0) ; extra == "llm"
26
+ Requires-Dist: pandas (>=2.2.0,<3.0.0) ; extra == "viz"
27
+ Requires-Dist: python-liquid (>=1.10.2,<2.0.0) ; extra == "llm"
28
+ Requires-Dist: scikit-learn (>=1.4.0,<2.0.0) ; extra == "statistics"
29
+ Requires-Dist: scipy (>=1.12.0,<2.0.0) ; extra == "statistics"
30
+ Requires-Dist: seaborn (>=0.13.2,<0.14.0) ; extra == "viz"
32
31
  Requires-Dist: tiktoken (>=0.5.2,<0.6.0) ; extra == "llm"
33
32
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
34
33
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
@@ -39,7 +38,6 @@ Description-Content-Type: text/markdown
39
38
  [![GitHub Actions][github-actions-badge]](https://github.com/johnthagen/python-blueprint/actions)
40
39
  [![Packaged with Poetry][poetry-badge]](https://python-poetry.org/)
41
40
  [![Nox][nox-badge]](https://github.com/wntrblm/nox)
42
- [![Code style: Black][black-badge]](https://github.com/psf/black)
43
41
  [![Ruff][ruff-badge]](https://github.com/astral-sh/ruff)
44
42
  [![Type checked with mypy][mypy-badge]](https://mypy-lang.org/)
45
43
 
@@ -56,7 +54,7 @@ Description-Content-Type: text/markdown
56
54
 
57
55
  # Installation
58
56
 
59
- Requires: Python 3.10, 3.11, or 3.12
57
+ Requires: Python 3.11, or 3.12
60
58
 
61
59
  Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
62
60
 
@@ -266,9 +264,11 @@ To pass arguments to `pytest` through `nox`:
266
264
 
267
265
  ## Code Style Checking
268
266
 
269
- [PEP 8](https://peps.python.org/pep-0008/) is the universally accepted style guide for
270
- Python code. PEP 8 code compliance is verified using [Ruff](https://github.com/astral-sh/ruff).
271
- Ruff is configured in the `[tool.ruff]` section of `pyproject.toml`.
267
+ [PEP 8](https://peps.python.org/pep-0008/) is the universally accepted style guide for Python
268
+ code. PEP 8 code compliance is verified using [Ruff][Ruff]. Ruff is configured in the
269
+ `[tool.ruff]` section of [`pyproject.toml`](./pyproject.toml).
270
+
271
+ [Ruff]: https://github.com/astral-sh/ruff
272
272
 
273
273
  To lint code, run:
274
274
 
@@ -284,12 +284,7 @@ To automatically fix fixable lint errors, run:
284
284
 
285
285
  ## Automated Code Formatting
286
286
 
287
- Code is automatically formatted using [black](https://github.com/psf/black). Imports are
288
- automatically sorted and grouped using [Ruff](https://github.com/astral-sh/ruff).
289
-
290
- These tools are configured by:
291
-
292
- - [`pyproject.toml`](./pyproject.toml)
287
+ [Ruff][Ruff] is used to automatically format code and group and sort imports.
293
288
 
294
289
  To automatically format code, run:
295
290
 
@@ -2,12 +2,12 @@ not_again_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  not_again_ai/base/file_system.py,sha256=SX1ab2igdcFGjvdh4rDJIYVXbYC-jsYnCiM4oZtutAU,344
4
4
  not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
5
- not_again_ai/llm/__init__.py,sha256=tF4osBYY8C1sY7Ntlp5IZnpk1F_pOtj0bWEO8i5OjiY,280
6
- not_again_ai/llm/chat_completion.py,sha256=I3EKvXm5hfL6gLU_Z8nPZyLT-nptOxgivQG5tCy10PI,3705
7
- not_again_ai/llm/context_management.py,sha256=_E1gFr66Evvsu-eNMpVb8KwMQ_MLnY4wlrT-tPYhIns,3424
8
- not_again_ai/llm/openai_client.py,sha256=rH_QKszC8Wa1Mjvz646G0cNxkhLbFSiYylFSiVbo6eg,2155
9
- not_again_ai/llm/prompts.py,sha256=3yg2bcLiAZ9_85PPMpRFbjVcIJEL7Vfhu0m0-4em3P4,2385
10
- not_again_ai/llm/tokens.py,sha256=z-uFiI4Yl2b-Pb0ySSak0TCPbW7xpRVBNo-7YcmF4No,3925
5
+ not_again_ai/llm/__init__.py,sha256=_dMK-7rof9sBg1qGmWZUckppQODLbm5u1S-l_IVt_84,280
6
+ not_again_ai/llm/chat_completion.py,sha256=XVbvubZ8zBf6Oc7aIeOYO553fJt_utQWl2MzdDrC58E,4627
7
+ not_again_ai/llm/context_management.py,sha256=Hj-7ALjmZ5R0ArufCivR9Sdk1OP0sEmLH44vB8gtFDE,3424
8
+ not_again_ai/llm/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
9
+ not_again_ai/llm/prompts.py,sha256=nkp3udfAFCrn6nL58rDtsc7qkqaFBXUQBTXrW4uCZAI,2245
10
+ not_again_ai/llm/tokens.py,sha256=0FnH3BS9pIwbaTEX6Fv-03cxuEfiEIo5RgSxPLI38jM,4214
11
11
  not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
12
12
  not_again_ai/statistics/__init__.py,sha256=joPfCDsaK9lTyfUnWXmh7xQfsks5WmfOD1zhWwl9iKM,298
13
13
  not_again_ai/statistics/dependence.py,sha256=yZDk_e3ng96mp4hu8dDtQ0-uIn6KdSuGRS9uyM0O3x0,4429
@@ -15,10 +15,10 @@ not_again_ai/viz/__init__.py,sha256=dyMxlVQ-QxtenoJHQf9GRSVo_WInvlvIYH2VUkTTwYg,
15
15
  not_again_ai/viz/barplots.py,sha256=xhxEXf4mfaymxDQtpphg9auy03zQ9e29R6L9yHixW1Y,3382
16
16
  not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3umnzU,4354
17
17
  not_again_ai/viz/scatterplot.py,sha256=eBtIf0Tf_1EcN-akRNJgvwLU0zpRx1zOl0VF9QTnbZA,2290
18
- not_again_ai/viz/time_series.py,sha256=cqv509fbwneDYNEBd9JXzHcLNPpVGPRfogt4UzKMuk4,5188
18
+ not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
19
19
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
20
- not_again_ai-0.3.1.dist-info/LICENSE,sha256=cY8-HFTNHMfg-c_pcNKHZehkubXsHtjlO83jSl52HPI,1071
21
- not_again_ai-0.3.1.dist-info/METADATA,sha256=RuwDTtRkhWQ_zOdQi2GmhJrvfFKD_Uw3yB-e8eVpaLo,15707
22
- not_again_ai-0.3.1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
23
- not_again_ai-0.3.1.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
24
- not_again_ai-0.3.1.dist-info/RECORD,,
20
+ not_again_ai-0.4.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
21
+ not_again_ai-0.4.0.dist-info/METADATA,sha256=PcTSC4S0rh6iHpLFndEruMmoC14Fgm9ZJ4CFn5oOnPw,15462
22
+ not_again_ai-0.4.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
23
+ not_again_ai-0.4.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
24
+ not_again_ai-0.4.0.dist-info/RECORD,,