not-again-ai 0.12.1__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- not_again_ai/llm/gh_models/chat_completion.py +2 -2
- not_again_ai/llm/openai_api/chat_completion.py +38 -29
- not_again_ai/llm/openai_api/prompts.py +27 -0
- not_again_ai/llm/openai_api/tokens.py +72 -6
- {not_again_ai-0.12.1.dist-info → not_again_ai-0.13.0.dist-info}/METADATA +7 -7
- {not_again_ai-0.12.1.dist-info → not_again_ai-0.13.0.dist-info}/RECORD +9 -9
- {not_again_ai-0.12.1.dist-info → not_again_ai-0.13.0.dist-info}/LICENSE +0 -0
- {not_again_ai-0.12.1.dist-info → not_again_ai-0.13.0.dist-info}/WHEEL +0 -0
- {not_again_ai-0.12.1.dist-info → not_again_ai-0.13.0.dist-info}/entry_points.txt +0 -0
@@ -64,8 +64,8 @@ def chat_completion(
|
|
64
64
|
tool_names = []
|
65
65
|
tool_args_list = []
|
66
66
|
for tool_call in tool_calls:
|
67
|
-
tool_names.append(tool_call.function.name)
|
68
|
-
tool_args_list.append(json.loads(tool_call.function.arguments))
|
67
|
+
tool_names.append(tool_call.function.name)
|
68
|
+
tool_args_list.append(json.loads(tool_call.function.arguments))
|
69
69
|
response_data["tool_names"] = tool_names
|
70
70
|
response_data["tool_args_list"] = tool_args_list
|
71
71
|
|
@@ -3,14 +3,13 @@ import json
|
|
3
3
|
import time
|
4
4
|
from typing import Any
|
5
5
|
|
6
|
-
from openai import OpenAI
|
7
|
-
from pydantic import BaseModel
|
6
|
+
from openai import AzureOpenAI, OpenAI
|
8
7
|
|
9
8
|
|
10
9
|
def chat_completion(
|
11
10
|
messages: list[dict[str, Any]],
|
12
11
|
model: str,
|
13
|
-
client: OpenAI,
|
12
|
+
client: OpenAI | AzureOpenAI | Any,
|
14
13
|
tools: list[dict[str, Any]] | None = None,
|
15
14
|
tool_choice: str = "auto",
|
16
15
|
max_tokens: int | None = None,
|
@@ -33,7 +32,15 @@ def chat_completion(
|
|
33
32
|
model (str): ID of the model to use. See the model endpoint compatibility table:
|
34
33
|
https://platform.openai.com/docs/models/model-endpoint-compatibility
|
35
34
|
for details on which models work with the Chat API.
|
36
|
-
client (OpenAI): An instance of the OpenAI client.
|
35
|
+
client (OpenAI): An instance of the OpenAI or AzureOpenAI client.
|
36
|
+
If anything else is provided, we assume that it follows the OpenAI spec and call it by passing kwargs directly.
|
37
|
+
For example you can provide something like:
|
38
|
+
```
|
39
|
+
def custom_client(**kwargs):
|
40
|
+
client = openai_client()
|
41
|
+
completion = client.chat.completions.create(**kwargs)
|
42
|
+
return completion.to_dict()
|
43
|
+
```
|
37
44
|
tools (list[dict[str, Any]], optional):A list of tools the model may call.
|
38
45
|
Use this to provide a list of functions the model may generate JSON inputs for. Defaults to None.
|
39
46
|
tool_choice (str, optional): The tool choice to use. Can be "auto", "required", "none", or a specific function name.
|
@@ -88,8 +95,6 @@ def chat_completion(
|
|
88
95
|
elif json_schema is not None:
|
89
96
|
if isinstance(json_schema, dict):
|
90
97
|
response_format = {"type": "json_schema", "json_schema": json_schema}
|
91
|
-
elif issubclass(json_schema, BaseModel):
|
92
|
-
response_format = json_schema
|
93
98
|
else:
|
94
99
|
response_format = {"type": "text"}
|
95
100
|
|
@@ -120,67 +125,71 @@ def chat_completion(
|
|
120
125
|
kwargs["top_logprobs"] = logprobs[1]
|
121
126
|
|
122
127
|
start_time = time.time()
|
123
|
-
|
128
|
+
if isinstance(client, OpenAI | AzureOpenAI):
|
129
|
+
response = client.chat.completions.create(**kwargs)
|
130
|
+
response = response.to_dict()
|
131
|
+
else:
|
132
|
+
response = client(**kwargs)
|
124
133
|
end_time = time.time()
|
125
134
|
response_duration = end_time - start_time
|
126
135
|
|
127
136
|
response_data: dict[str, Any] = {"choices": []}
|
128
|
-
for response_choice in response
|
137
|
+
for response_choice in response["choices"]:
|
129
138
|
response_data_curr = {}
|
130
|
-
finish_reason = response_choice
|
139
|
+
finish_reason = response_choice["finish_reason"]
|
131
140
|
response_data_curr["finish_reason"] = finish_reason
|
132
141
|
|
133
142
|
# We first check for tool calls because even if the finish_reason is stop, the model may have called a tool
|
134
|
-
tool_calls = response_choice
|
143
|
+
tool_calls = response_choice["message"].get("tool_calls", None)
|
135
144
|
if tool_calls:
|
136
145
|
tool_names = []
|
137
146
|
tool_args_list = []
|
138
147
|
for tool_call in tool_calls:
|
139
|
-
tool_names.append(tool_call
|
140
|
-
tool_args_list.append(json.loads(tool_call
|
141
|
-
response_data_curr["message"] = response_choice
|
148
|
+
tool_names.append(tool_call["function"]["name"])
|
149
|
+
tool_args_list.append(json.loads(tool_call["function"]["arguments"]))
|
150
|
+
response_data_curr["message"] = response_choice["message"]["content"]
|
142
151
|
response_data_curr["tool_names"] = tool_names
|
143
152
|
response_data_curr["tool_args_list"] = tool_args_list
|
144
153
|
elif finish_reason == "stop" or finish_reason == "length":
|
145
|
-
message = response_choice
|
154
|
+
message = response_choice["message"]["content"]
|
146
155
|
if json_mode or json_schema is not None:
|
147
156
|
with contextlib.suppress(json.JSONDecodeError):
|
148
157
|
message = json.loads(message)
|
149
158
|
response_data_curr["message"] = message
|
150
159
|
|
151
|
-
if response_choice
|
160
|
+
if response_choice["logprobs"] and response_choice["logprobs"]["content"] is not None:
|
152
161
|
logprobs_list: list[dict[str, Any] | list[dict[str, Any]]] = []
|
153
|
-
for logprob in response_choice
|
154
|
-
if logprob
|
162
|
+
for logprob in response_choice["logprobs"]["content"]:
|
163
|
+
if logprob["top_logprobs"]:
|
155
164
|
curr_logprob_infos = []
|
156
|
-
for top_logprob in logprob
|
165
|
+
for top_logprob in logprob["top_logprobs"]:
|
157
166
|
curr_logprob_infos.append(
|
158
167
|
{
|
159
|
-
"token": top_logprob
|
160
|
-
"logprob": top_logprob
|
161
|
-
"bytes": top_logprob
|
168
|
+
"token": top_logprob["token"],
|
169
|
+
"logprob": top_logprob["logprob"],
|
170
|
+
"bytes": top_logprob["bytes"],
|
162
171
|
}
|
163
172
|
)
|
164
173
|
logprobs_list.append(curr_logprob_infos)
|
165
174
|
else:
|
166
175
|
logprobs_list.append(
|
167
176
|
{
|
168
|
-
"token": logprob
|
169
|
-
"logprob": logprob
|
170
|
-
"bytes": logprob
|
177
|
+
"token": logprob["token"],
|
178
|
+
"logprob": logprob["logprob"],
|
179
|
+
"bytes": logprob["bytes"],
|
171
180
|
}
|
172
181
|
)
|
173
182
|
|
174
183
|
response_data_curr["logprobs"] = logprobs_list
|
175
184
|
response_data["choices"].append(response_data_curr)
|
176
185
|
|
177
|
-
usage = response
|
186
|
+
usage = response["usage"]
|
178
187
|
if usage is not None:
|
179
|
-
response_data["completion_tokens"] = usage
|
180
|
-
response_data["prompt_tokens"] = usage
|
188
|
+
response_data["completion_tokens"] = usage["completion_tokens"]
|
189
|
+
response_data["prompt_tokens"] = usage["prompt_tokens"]
|
181
190
|
|
182
|
-
if seed is not None and response
|
183
|
-
response_data["system_fingerprint"] = response
|
191
|
+
if seed is not None and response["system_fingerprint"] is not None:
|
192
|
+
response_data["system_fingerprint"] = response["system_fingerprint"]
|
184
193
|
|
185
194
|
response_data["response_duration"] = round(response_duration, 4)
|
186
195
|
|
@@ -5,6 +5,8 @@ from pathlib import Path
|
|
5
5
|
from typing import Any
|
6
6
|
|
7
7
|
from liquid import Template
|
8
|
+
from openai.lib._pydantic import to_strict_json_schema
|
9
|
+
from pydantic import BaseModel
|
8
10
|
|
9
11
|
|
10
12
|
def _validate_message_vision(message: dict[str, list[dict[str, Path | str]] | str]) -> bool:
|
@@ -162,3 +164,28 @@ def chat_prompt(messages_unformatted: list[dict[str, Any]], variables: dict[str,
|
|
162
164
|
message["content"] = Template(message["content"]).render(**variables)
|
163
165
|
|
164
166
|
return messages_formatted
|
167
|
+
|
168
|
+
|
169
|
+
def pydantic_to_json_schema(
|
170
|
+
pydantic_model: type[BaseModel], schema_name: str, description: str | None = None
|
171
|
+
) -> dict[str, Any]:
|
172
|
+
"""Converts a Pydantic model to a JSON schema expected by Structured Outputs.
|
173
|
+
Must adhere to the supported schemas: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
|
174
|
+
|
175
|
+
Args:
|
176
|
+
pydantic_model: The Pydantic model to convert.
|
177
|
+
schema_name: The name of the schema.
|
178
|
+
description: An optional description of the schema.
|
179
|
+
|
180
|
+
Returns:
|
181
|
+
A JSON schema dictionary representing the Pydantic model.
|
182
|
+
"""
|
183
|
+
converted_pydantic = to_strict_json_schema(pydantic_model)
|
184
|
+
schema = {
|
185
|
+
"name": schema_name,
|
186
|
+
"strict": True,
|
187
|
+
"schema": converted_pydantic,
|
188
|
+
}
|
189
|
+
if description:
|
190
|
+
schema["description"] = description
|
191
|
+
return schema
|
@@ -1,3 +1,6 @@
|
|
1
|
+
from collections.abc import Collection, Set
|
2
|
+
from typing import Literal
|
3
|
+
|
1
4
|
import tiktoken
|
2
5
|
|
3
6
|
|
@@ -18,18 +21,38 @@ def load_tokenizer(model: str) -> tiktoken.Encoding:
|
|
18
21
|
return encoding
|
19
22
|
|
20
23
|
|
21
|
-
def truncate_str(
|
24
|
+
def truncate_str(
|
25
|
+
text: str,
|
26
|
+
max_len: int,
|
27
|
+
tokenizer: tiktoken.Encoding,
|
28
|
+
allowed_special: Literal["all"] | Set[str] = set(),
|
29
|
+
disallowed_special: Literal["all"] | Collection[str] = (),
|
30
|
+
) -> str:
|
22
31
|
"""Truncates a string to a maximum token length.
|
23
32
|
|
33
|
+
Special tokens are artificial tokens used to unlock capabilities from a model,
|
34
|
+
such as fill-in-the-middle. So we want to be careful about accidentally encoding special
|
35
|
+
tokens, since they can be used to trick a model into doing something we don't want it to do.
|
36
|
+
|
37
|
+
Hence, by default, encode will raise an error if it encounters text that corresponds
|
38
|
+
to a special token. This can be controlled on a per-token level using the `allowed_special`
|
39
|
+
and `disallowed_special` parameters. In particular:
|
40
|
+
- Setting `disallowed_special` to () will prevent this function from raising errors and
|
41
|
+
cause all text corresponding to special tokens to be encoded as natural text.
|
42
|
+
- Setting `allowed_special` to "all" will cause this function to treat all text
|
43
|
+
corresponding to special tokens to be encoded as special tokens.
|
44
|
+
|
24
45
|
Args:
|
25
46
|
text (str): The string to truncate.
|
26
47
|
max_len (int): The maximum number of tokens to keep.
|
27
48
|
tokenizer (tiktoken.Encoding): A tiktoken encoding object
|
49
|
+
allowed_special (str | set[str]):
|
50
|
+
disallowed_special (str | set[str]):
|
28
51
|
|
29
52
|
Returns:
|
30
53
|
str: The truncated string.
|
31
54
|
"""
|
32
|
-
tokens = tokenizer.encode(text)
|
55
|
+
tokens = tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special)
|
33
56
|
if len(tokens) > max_len:
|
34
57
|
tokens = tokens[:max_len]
|
35
58
|
# Decode the tokens back to a string
|
@@ -39,33 +62,70 @@ def truncate_str(text: str, max_len: int, tokenizer: tiktoken.Encoding) -> str:
|
|
39
62
|
return text
|
40
63
|
|
41
64
|
|
42
|
-
def num_tokens_in_string(
|
65
|
+
def num_tokens_in_string(
|
66
|
+
text: str,
|
67
|
+
tokenizer: tiktoken.Encoding,
|
68
|
+
allowed_special: Literal["all"] | Set[str] = set(),
|
69
|
+
disallowed_special: Literal["all"] | Collection[str] = (),
|
70
|
+
) -> int:
|
43
71
|
"""Return the number of tokens in a string.
|
44
72
|
|
73
|
+
Special tokens are artificial tokens used to unlock capabilities from a model,
|
74
|
+
such as fill-in-the-middle. So we want to be careful about accidentally encoding special
|
75
|
+
tokens, since they can be used to trick a model into doing something we don't want it to do.
|
76
|
+
|
77
|
+
Hence, by default, encode will raise an error if it encounters text that corresponds
|
78
|
+
to a special token. This can be controlled on a per-token level using the `allowed_special`
|
79
|
+
and `disallowed_special` parameters. In particular:
|
80
|
+
- Setting `disallowed_special` to () will prevent this function from raising errors and
|
81
|
+
cause all text corresponding to special tokens to be encoded as natural text.
|
82
|
+
- Setting `allowed_special` to "all" will cause this function to treat all text
|
83
|
+
corresponding to special tokens to be encoded as special tokens.
|
84
|
+
|
45
85
|
Args:
|
46
86
|
text (str): The string to count the tokens.
|
47
87
|
tokenizer (tiktoken.Encoding): A tiktoken encoding object
|
88
|
+
allowed_special (str | set[str]):
|
89
|
+
disallowed_special (str | set[str]):
|
48
90
|
|
49
91
|
Returns:
|
50
92
|
int: The number of tokens in the string.
|
51
93
|
"""
|
52
|
-
return len(tokenizer.encode(text))
|
94
|
+
return len(tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special))
|
53
95
|
|
54
96
|
|
55
97
|
def num_tokens_from_messages(
|
56
|
-
messages: list[dict[str, str]],
|
98
|
+
messages: list[dict[str, str]],
|
99
|
+
tokenizer: tiktoken.Encoding,
|
100
|
+
model: str = "gpt-3.5-turbo-0125",
|
101
|
+
allowed_special: Literal["all"] | Set[str] = set(),
|
102
|
+
disallowed_special: Literal["all"] | Collection[str] = (),
|
57
103
|
) -> int:
|
58
104
|
"""Return the number of tokens used by a list of messages.
|
59
105
|
NOTE: Does not support counting tokens used by function calling or prompts with images.
|
60
106
|
Reference: # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
|
61
107
|
and https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
62
108
|
|
109
|
+
Special tokens are artificial tokens used to unlock capabilities from a model,
|
110
|
+
such as fill-in-the-middle. So we want to be careful about accidentally encoding special
|
111
|
+
tokens, since they can be used to trick a model into doing something we don't want it to do.
|
112
|
+
|
113
|
+
Hence, by default, encode will raise an error if it encounters text that corresponds
|
114
|
+
to a special token. This can be controlled on a per-token level using the `allowed_special`
|
115
|
+
and `disallowed_special` parameters. In particular:
|
116
|
+
- Setting `disallowed_special` to () will prevent this function from raising errors and
|
117
|
+
cause all text corresponding to special tokens to be encoded as natural text.
|
118
|
+
- Setting `allowed_special` to "all" will cause this function to treat all text
|
119
|
+
corresponding to special tokens to be encoded as special tokens.
|
120
|
+
|
63
121
|
Args:
|
64
122
|
messages (list[dict[str, str]]): A list of messages to count the tokens
|
65
123
|
should ideally be the result after calling llm.prompts.chat_prompt.
|
66
124
|
tokenizer (tiktoken.Encoding): A tiktoken encoding object
|
67
125
|
model (str): The model to use for tokenization. Defaults to "gpt-3.5-turbo-0125".
|
68
126
|
See https://platform.openai.com/docs/models for a list of OpenAI models.
|
127
|
+
allowed_special (str | set[str]):
|
128
|
+
disallowed_special (str | set[str]):
|
69
129
|
|
70
130
|
Returns:
|
71
131
|
int: The number of tokens used by the messages.
|
@@ -111,7 +171,13 @@ See https://github.com/openai/openai-python/blob/main/chatml.md for information
|
|
111
171
|
for message in messages:
|
112
172
|
num_tokens += tokens_per_message
|
113
173
|
for key, value in message.items():
|
114
|
-
num_tokens += len(
|
174
|
+
num_tokens += len(
|
175
|
+
tokenizer.encode(
|
176
|
+
value,
|
177
|
+
allowed_special=allowed_special,
|
178
|
+
disallowed_special=disallowed_special,
|
179
|
+
)
|
180
|
+
)
|
115
181
|
if key == "name":
|
116
182
|
num_tokens += tokens_per_name
|
117
183
|
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: not-again-ai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.13.0
|
4
4
|
Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
|
5
5
|
Home-page: https://github.com/DaveCoDev/not-again-ai
|
6
6
|
License: MIT
|
@@ -21,21 +21,21 @@ Provides-Extra: llm
|
|
21
21
|
Provides-Extra: local-llm
|
22
22
|
Provides-Extra: statistics
|
23
23
|
Provides-Extra: viz
|
24
|
-
Requires-Dist: azure-ai-inference (==1.0.
|
25
|
-
Requires-Dist: azure-identity (>=1.
|
24
|
+
Requires-Dist: azure-ai-inference (==1.0.0b4) ; extra == "llm"
|
25
|
+
Requires-Dist: azure-identity (>=1.18,<2.0) ; extra == "llm"
|
26
26
|
Requires-Dist: jinja2 (>=3.1,<4.0) ; extra == "local-llm"
|
27
27
|
Requires-Dist: loguru (==0.7.2)
|
28
|
-
Requires-Dist: numpy (>=1
|
28
|
+
Requires-Dist: numpy (>=2.1,<3.0) ; extra == "statistics" or extra == "viz"
|
29
29
|
Requires-Dist: ollama (>=0.3,<0.4) ; extra == "local-llm"
|
30
|
-
Requires-Dist: openai (>=1.
|
30
|
+
Requires-Dist: openai (>=1.51,<2.0) ; extra == "llm"
|
31
31
|
Requires-Dist: pandas (>=2.2,<3.0) ; extra == "viz"
|
32
|
-
Requires-Dist: pydantic (>=2.
|
32
|
+
Requires-Dist: pydantic (>=2.9,<3.0) ; extra == "llm"
|
33
33
|
Requires-Dist: python-liquid (>=1.12,<2.0) ; extra == "llm"
|
34
34
|
Requires-Dist: scikit-learn (>=1.5,<2.0) ; extra == "statistics"
|
35
35
|
Requires-Dist: scipy (>=1.14,<2.0) ; extra == "statistics"
|
36
36
|
Requires-Dist: seaborn (>=0.13,<0.14) ; extra == "viz"
|
37
37
|
Requires-Dist: tiktoken (>=0.7,<0.8) ; extra == "llm"
|
38
|
-
Requires-Dist: transformers (>=4.
|
38
|
+
Requires-Dist: transformers (>=4.45,<5.0) ; extra == "local-llm"
|
39
39
|
Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
|
40
40
|
Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
|
41
41
|
Description-Content-Type: text/markdown
|
@@ -5,14 +5,14 @@ not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA
|
|
5
5
|
not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
|
6
6
|
not_again_ai/llm/gh_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
not_again_ai/llm/gh_models/azure_ai_client.py,sha256=GkVn9ZwYbsLm3X0A3pGKKHuoqrxc-BZnZ4n9ExelRUQ,580
|
8
|
-
not_again_ai/llm/gh_models/chat_completion.py,sha256=
|
8
|
+
not_again_ai/llm/gh_models/chat_completion.py,sha256=zI6Kfqb9AW0t_Yd1ecaXy7q70gygJ_XKcFbtYrKIbn4,3599
|
9
9
|
not_again_ai/llm/openai_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
-
not_again_ai/llm/openai_api/chat_completion.py,sha256=
|
10
|
+
not_again_ai/llm/openai_api/chat_completion.py,sha256=6HS5v1tr-QgHRLSCDHHsJv7len6PaxGY2q9z6fGjhLY,10342
|
11
11
|
not_again_ai/llm/openai_api/context_management.py,sha256=BJSG100_qw9MeTCZGztDV5CBXjVOxU4x7gyoRlLxWnI,3561
|
12
12
|
not_again_ai/llm/openai_api/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
|
13
13
|
not_again_ai/llm/openai_api/openai_client.py,sha256=AK9SDBkpP94u5Q73-Q5i5HRPQh_D8cF8Dfl0IgPsJDQ,3816
|
14
|
-
not_again_ai/llm/openai_api/prompts.py,sha256=
|
15
|
-
not_again_ai/llm/openai_api/tokens.py,sha256=
|
14
|
+
not_again_ai/llm/openai_api/prompts.py,sha256=lZYxgzoM2VqXWKUDToKWKR6w49KNYKu5TnqKLxG3TsM,8034
|
15
|
+
not_again_ai/llm/openai_api/tokens.py,sha256=Q4xdCEPrmgDCNjmcB4rg6ipvo4_McwSjc-b9gAHjUJs,8024
|
16
16
|
not_again_ai/local_llm/__init__.py,sha256=BsUn39U3QQaw6yomQHfp_HIPHRIBoMAgjcP3CDADx04,882
|
17
17
|
not_again_ai/local_llm/chat_completion.py,sha256=PmICXrGZJXIuqY00ULBGi2bKnPG8ticqTXZHSTzZK9o,4828
|
18
18
|
not_again_ai/local_llm/huggingface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -35,8 +35,8 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
|
|
35
35
|
not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
|
36
36
|
not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
|
37
37
|
not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
|
38
|
-
not_again_ai-0.
|
39
|
-
not_again_ai-0.
|
40
|
-
not_again_ai-0.
|
41
|
-
not_again_ai-0.
|
42
|
-
not_again_ai-0.
|
38
|
+
not_again_ai-0.13.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
|
39
|
+
not_again_ai-0.13.0.dist-info/METADATA,sha256=lh12ROekVGKDWn-4a0fmbxQHJGniQTq9E-PBk9lpqig,16388
|
40
|
+
not_again_ai-0.13.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
41
|
+
not_again_ai-0.13.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
|
42
|
+
not_again_ai-0.13.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|