not-again-ai 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- not_again_ai/base/file_system.py +26 -4
- not_again_ai/llm/chat_completion.py +55 -146
- not_again_ai/llm/ollama/__init__.py +0 -0
- not_again_ai/llm/ollama/chat_completion.py +95 -0
- not_again_ai/llm/ollama/ollama_client.py +24 -0
- not_again_ai/llm/ollama/service.py +81 -0
- not_again_ai/llm/openai_api/__init__.py +0 -0
- not_again_ai/llm/openai_api/chat_completion.py +167 -0
- not_again_ai/llm/{context_management.py → openai_api/context_management.py} +1 -1
- {not_again_ai-0.6.0.dist-info → not_again_ai-0.8.0.dist-info}/METADATA +20 -75
- not_again_ai-0.8.0.dist-info/RECORD +31 -0
- {not_again_ai-0.6.0.dist-info → not_again_ai-0.8.0.dist-info}/WHEEL +1 -1
- not_again_ai-0.6.0.dist-info/RECORD +0 -25
- /not_again_ai/llm/{embeddings.py → openai_api/embeddings.py} +0 -0
- /not_again_ai/llm/{openai_client.py → openai_api/openai_client.py} +0 -0
- /not_again_ai/llm/{prompts.py → openai_api/prompts.py} +0 -0
- /not_again_ai/llm/{tokens.py → openai_api/tokens.py} +0 -0
- {not_again_ai-0.6.0.dist-info → not_again_ai-0.8.0.dist-info}/LICENSE +0 -0
- {not_again_ai-0.6.0.dist-info → not_again_ai-0.8.0.dist-info}/entry_points.txt +0 -0
not_again_ai/base/file_system.py
CHANGED
@@ -1,12 +1,34 @@
|
|
1
|
-
import
|
1
|
+
from pathlib import Path
|
2
2
|
|
3
3
|
|
4
|
-
def create_file_dir(filepath: str) -> None:
|
4
|
+
def create_file_dir(filepath: str | Path) -> None:
|
5
5
|
"""Creates the parent directories for the specified filepath.
|
6
6
|
Does not throw any errors if the directories already exist.
|
7
7
|
|
8
8
|
Args:
|
9
|
-
filepath (str): path to a file
|
9
|
+
filepath (str | Path): path to a file
|
10
10
|
"""
|
11
|
-
root_path =
|
11
|
+
root_path = Path(filepath).parent
|
12
12
|
root_path.mkdir(parents=True, exist_ok=True)
|
13
|
+
|
14
|
+
|
15
|
+
def readable_size(size: float) -> str:
|
16
|
+
"""Convert a file size given in bytes to a human-readable format.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
size (int): file size in bytes
|
20
|
+
|
21
|
+
Returns:
|
22
|
+
str: human-readable file size
|
23
|
+
"""
|
24
|
+
# Define the suffixes for each size unit
|
25
|
+
suffixes = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
|
26
|
+
|
27
|
+
# Start with bytes
|
28
|
+
count = 0
|
29
|
+
while size >= 1024 and count < len(suffixes) - 1:
|
30
|
+
count += 1
|
31
|
+
size /= 1024
|
32
|
+
|
33
|
+
# Format the size to two decimal places and append the appropriate suffix
|
34
|
+
return f"{size:.2f} {suffixes[count]}"
|
@@ -1,167 +1,76 @@
|
|
1
|
-
import contextlib
|
2
|
-
import json
|
3
1
|
from typing import Any
|
4
2
|
|
3
|
+
from ollama import Client
|
5
4
|
from openai import OpenAI
|
6
5
|
|
6
|
+
from not_again_ai.llm.ollama import chat_completion as chat_completion_ollama
|
7
|
+
from not_again_ai.llm.openai_api import chat_completion as chat_completion_openai
|
8
|
+
|
7
9
|
|
8
10
|
def chat_completion(
|
9
|
-
messages: list[dict[str,
|
11
|
+
messages: list[dict[str, Any]],
|
10
12
|
model: str,
|
11
|
-
client: OpenAI,
|
12
|
-
tools: list[dict[str, Any]] | None = None,
|
13
|
-
tool_choice: str = "auto",
|
13
|
+
client: OpenAI | Client,
|
14
14
|
max_tokens: int | None = None,
|
15
15
|
temperature: float = 0.7,
|
16
16
|
json_mode: bool = False,
|
17
17
|
seed: int | None = None,
|
18
|
-
logprobs: tuple[bool, int | None] | None = None,
|
19
|
-
n: int = 1,
|
20
18
|
**kwargs: Any,
|
21
19
|
) -> dict[str, Any]:
|
22
|
-
"""
|
23
|
-
|
24
|
-
|
25
|
-
particularly for older vision-enabled models like gpt-4-1106-vision-preview.
|
26
|
-
Be sure to check the documentation: https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
20
|
+
"""Creates a common wrapper around chat completion models from different providers.
|
21
|
+
Currently supports the OpenAI API and Ollama local models.
|
22
|
+
All input parameters are supported by all providers in similar ways and the output is standardized.
|
27
23
|
|
28
24
|
Args:
|
29
|
-
messages (list): A list of messages
|
30
|
-
model (str):
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
tool_choice (str, optional): The tool choice to use. Can be "auto", "none", or a specific function name.
|
37
|
-
Defaults to "auto".
|
38
|
-
max_tokens (int, optional): The maximum number of tokens to generate in the chat completion.
|
39
|
-
Defaults to None, which automatically limits to the model's maximum context length.
|
40
|
-
temperature (float, optional): What sampling temperature to use, between 0 and 2.
|
41
|
-
Higher values like 0.8 will make the output more random,
|
42
|
-
while lower values like 0.2 will make it more focused and deterministic. Defaults to 0.7.
|
43
|
-
json_mode (bool, optional): When JSON mode is enabled, the model is constrained to only
|
44
|
-
generate strings that parse into valid JSON object and will return a dictionary.
|
45
|
-
See https://platform.openai.com/docs/guides/text-generation/json-mode
|
46
|
-
seed (int, optional): If specified, OpenAI will make a best effort to sample deterministically,
|
47
|
-
such that repeated requests with the same `seed` and parameters should return the same result.
|
48
|
-
Determinism is not guaranteed, and you should refer to the `system_fingerprint` response
|
49
|
-
parameter to monitor changes in the backend.
|
50
|
-
logprobs (tuple[bool, int], optional): Whether to return log probabilities of the output tokens or not.
|
51
|
-
If `logprobs[0]` is true, returns the log probabilities of each output token returned in the content of message.
|
52
|
-
`logprobs[1]` is an integer between 0 and 5 specifying the number of most likely tokens to return at each token position,
|
53
|
-
each with an associated log probability. `logprobs[0]` must be set to true if this parameter is used.
|
54
|
-
n (int, optional): How many chat completion choices to generate for each input message.
|
55
|
-
Defaults to 1.
|
56
|
-
**kwargs: Additional keyword arguments to pass to the OpenAI client chat completion.
|
25
|
+
messages (list[dict[str, Any]]): A list of messages to send to the model.
|
26
|
+
model (str): The model name to use.
|
27
|
+
client (OpenAI | Client): The client object to use for chat completion.
|
28
|
+
max_tokens (int, optional): The maximum number of tokens to generate.
|
29
|
+
temperature (float, optional): The temperature of the model. Increasing the temperature will make the model answer more creatively.
|
30
|
+
json_mode (bool, optional): This will structure the response as a valid JSON object.
|
31
|
+
seed (int, optional): The seed to use for the model for reproducible outputs.
|
57
32
|
|
58
33
|
Returns:
|
59
|
-
dict[str, Any]: A dictionary with the following keys
|
60
|
-
|
61
|
-
Can be 'stop', 'length', or 'tool_calls'.
|
62
|
-
'tool_names' (list[str], optional): The names of the tools called by the model.
|
63
|
-
'tool_args_list' (list[dict], optional): The arguments of the tools called by the model.
|
64
|
-
'message' (str | dict): The content of the generated assistant message.
|
34
|
+
dict[str, Any]: A dictionary with the following keys
|
35
|
+
message (str | dict): The content of the generated assistant message.
|
65
36
|
If json_mode is True, this will be a dictionary.
|
66
|
-
|
67
|
-
|
68
|
-
this will be a list of dictionaries containing the token, logprob, and bytes for each token in the message.
|
69
|
-
'choices' (list[dict], optional): A list of chat completion choices if n > 1 where each dict contains the above fields.
|
70
|
-
'completion_tokens' (int): The number of tokens used by the model to generate the completion.
|
71
|
-
NOTE: If n > 1 this is the sum of all completions.
|
72
|
-
'prompt_tokens' (int): The number of tokens in the messages sent to the model.
|
73
|
-
'system_fingerprint' (str, optional): If seed is set, a unique identifier for the model used to generate the response.
|
37
|
+
completion_tokens (int): The number of tokens used by the model to generate the completion.
|
38
|
+
extras (dict): This will contain any additional fields returned by corresponding provider.
|
74
39
|
"""
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
# We first check for tool calls because even if the finish_reason is stop, the model may have called a tool
|
112
|
-
tool_calls = response_choice.message.tool_calls
|
113
|
-
if tool_calls:
|
114
|
-
tool_names = []
|
115
|
-
tool_args_list = []
|
116
|
-
for tool_call in tool_calls:
|
117
|
-
tool_names.append(tool_call.function.name)
|
118
|
-
tool_args_list.append(json.loads(tool_call.function.arguments))
|
119
|
-
response_data_curr["message"] = response_choice.message.content
|
120
|
-
response_data_curr["tool_names"] = tool_names
|
121
|
-
response_data_curr["tool_args_list"] = tool_args_list
|
122
|
-
elif finish_reason == "stop" or finish_reason == "length":
|
123
|
-
message = response_choice.message.content
|
124
|
-
if json_mode:
|
125
|
-
with contextlib.suppress(json.JSONDecodeError):
|
126
|
-
message = json.loads(message)
|
127
|
-
response_data_curr["message"] = message
|
128
|
-
|
129
|
-
if response_choice.logprobs and response_choice.logprobs.content is not None:
|
130
|
-
logprobs_list: list[dict[str, Any] | list[dict[str, Any]]] = []
|
131
|
-
for logprob in response_choice.logprobs.content:
|
132
|
-
if logprob.top_logprobs:
|
133
|
-
curr_logprob_infos = []
|
134
|
-
for top_logprob in logprob.top_logprobs:
|
135
|
-
curr_logprob_infos.append(
|
136
|
-
{
|
137
|
-
"token": top_logprob.token,
|
138
|
-
"logprob": top_logprob.logprob,
|
139
|
-
"bytes": top_logprob.bytes,
|
140
|
-
}
|
141
|
-
)
|
142
|
-
logprobs_list.append(curr_logprob_infos)
|
143
|
-
else:
|
144
|
-
logprobs_list.append(
|
145
|
-
{
|
146
|
-
"token": logprob.token,
|
147
|
-
"logprob": logprob.logprob,
|
148
|
-
"bytes": logprob.bytes,
|
149
|
-
}
|
150
|
-
)
|
151
|
-
|
152
|
-
response_data_curr["logprobs"] = logprobs_list
|
153
|
-
response_data["choices"].append(response_data_curr)
|
154
|
-
|
155
|
-
usage = response.usage
|
156
|
-
if usage is not None:
|
157
|
-
response_data["completion_tokens"] = usage.completion_tokens
|
158
|
-
response_data["prompt_tokens"] = usage.prompt_tokens
|
159
|
-
|
160
|
-
if seed is not None and response.system_fingerprint is not None:
|
161
|
-
response_data["system_fingerprint"] = response.system_fingerprint
|
162
|
-
|
163
|
-
if len(response_data["choices"]) == 1:
|
164
|
-
response_data.update(response_data["choices"][0])
|
165
|
-
del response_data["choices"]
|
40
|
+
# Determine which chat_completion function to call based on the client type
|
41
|
+
if isinstance(client, OpenAI):
|
42
|
+
response = chat_completion_openai.chat_completion(
|
43
|
+
messages=messages,
|
44
|
+
model=model,
|
45
|
+
client=client,
|
46
|
+
max_tokens=max_tokens,
|
47
|
+
temperature=temperature,
|
48
|
+
json_mode=json_mode,
|
49
|
+
seed=seed,
|
50
|
+
**kwargs,
|
51
|
+
)
|
52
|
+
elif isinstance(client, Client):
|
53
|
+
response = chat_completion_ollama.chat_completion(
|
54
|
+
messages=messages,
|
55
|
+
model=model,
|
56
|
+
client=client,
|
57
|
+
max_tokens=max_tokens,
|
58
|
+
temperature=temperature,
|
59
|
+
json_mode=json_mode,
|
60
|
+
seed=seed,
|
61
|
+
**kwargs,
|
62
|
+
)
|
63
|
+
else:
|
64
|
+
raise ValueError("Invalid client type")
|
65
|
+
|
66
|
+
# Parse the responses to be consistent
|
67
|
+
response_data = {}
|
68
|
+
response_data["message"] = response.get("message", None)
|
69
|
+
response_data["completion_tokens"] = response.get("completion_tokens", None)
|
70
|
+
|
71
|
+
# Return any additional fields from the response in an "extras" dictionary
|
72
|
+
extras = {k: v for k, v in response.items() if k not in response_data}
|
73
|
+
if extras:
|
74
|
+
response_data["extras"] = extras
|
166
75
|
|
167
76
|
return response_data
|
File without changes
|
@@ -0,0 +1,95 @@
|
|
1
|
+
import contextlib
|
2
|
+
import json
|
3
|
+
import re
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
from ollama import Client, ResponseError
|
7
|
+
|
8
|
+
|
9
|
+
def _convert_duration(nanoseconds: int) -> float:
|
10
|
+
seconds = nanoseconds / 1_000_000_000
|
11
|
+
return round(seconds, 5)
|
12
|
+
|
13
|
+
|
14
|
+
def chat_completion(
|
15
|
+
messages: list[dict[str, Any]],
|
16
|
+
model: str,
|
17
|
+
client: Client,
|
18
|
+
max_tokens: int | None = None,
|
19
|
+
context_window: int | None = None,
|
20
|
+
temperature: float = 0.8,
|
21
|
+
json_mode: bool = False,
|
22
|
+
seed: int | None = None,
|
23
|
+
**kwargs: Any,
|
24
|
+
) -> dict[str, Any]:
|
25
|
+
"""Gets a Ollama chat completion response, see https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
|
26
|
+
For a full list of valid parameters: https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
|
27
|
+
|
28
|
+
Args:
|
29
|
+
messages (list[dict[str, Any]]): A list of messages to send to the model.
|
30
|
+
model (str): The model to use.
|
31
|
+
client (Client): The Ollama client.
|
32
|
+
max_tokens (int, optional): The maximum number of tokens to generate. Ollama calls this `num_predict`.
|
33
|
+
context_window (int, optional): The number of tokens to consider as context. Ollama calls this `num_ctx`.
|
34
|
+
temperature (float, optional): The temperature of the model. Increasing the temperature will make the model answer more creatively.
|
35
|
+
json_mode (bool, optional): This will structure the response as a valid JSON object.
|
36
|
+
It is important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace.
|
37
|
+
seed (int, optional): The seed to use for the model for reproducible outputs. Defaults to None.
|
38
|
+
|
39
|
+
Returns:
|
40
|
+
dict[str, Any]: A dictionary with the following keys
|
41
|
+
message (str | dict): The content of the generated assistant message.
|
42
|
+
If json_mode is True, this will be a dictionary.
|
43
|
+
completion_tokens (int): The number of tokens used by the model to generate the completion.
|
44
|
+
response_duration (float): The time taken to generate the response in seconds.
|
45
|
+
"""
|
46
|
+
|
47
|
+
options = {
|
48
|
+
"num_predict": max_tokens,
|
49
|
+
"num_ctx": context_window,
|
50
|
+
"temperature": temperature,
|
51
|
+
}
|
52
|
+
if seed is not None:
|
53
|
+
options["seed"] = seed
|
54
|
+
options.update(kwargs)
|
55
|
+
|
56
|
+
all_args = {
|
57
|
+
"model": model,
|
58
|
+
"messages": messages,
|
59
|
+
"options": options,
|
60
|
+
}
|
61
|
+
if json_mode:
|
62
|
+
all_args["format"] = "json"
|
63
|
+
|
64
|
+
try:
|
65
|
+
response = client.chat(**all_args)
|
66
|
+
except ResponseError as e:
|
67
|
+
# If the error says "model 'model' not found" use regex then raise a more specific error
|
68
|
+
expected_pattern = f"model '{model}' not found"
|
69
|
+
if re.search(expected_pattern, e.error):
|
70
|
+
raise ResponseError(
|
71
|
+
f"Model '{model}' not found. Please use not_again_ai.llm.ollama.service.pull() first."
|
72
|
+
) from e
|
73
|
+
else:
|
74
|
+
raise ResponseError(e.message) from e
|
75
|
+
|
76
|
+
response_data: dict[str, Any] = {}
|
77
|
+
|
78
|
+
# Handle getting the message returned by the model
|
79
|
+
message = response["message"].get("content", None)
|
80
|
+
if message and json_mode:
|
81
|
+
with contextlib.suppress(json.JSONDecodeError):
|
82
|
+
message = json.loads(message)
|
83
|
+
if message:
|
84
|
+
response_data["message"] = message
|
85
|
+
|
86
|
+
# Get the number of tokens generated
|
87
|
+
response_data["completion_tokens"] = response.get("eval_count", None)
|
88
|
+
|
89
|
+
# Get the latency of the response
|
90
|
+
if response.get("total_duration", None):
|
91
|
+
response_data["response_duration"] = _convert_duration(response["total_duration"])
|
92
|
+
else:
|
93
|
+
response_data["response_duration"] = None
|
94
|
+
|
95
|
+
return response_data
|
@@ -0,0 +1,24 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
from ollama import Client
|
4
|
+
|
5
|
+
|
6
|
+
def ollama_client(host: str | None = None, timeout: float | None = None) -> Client:
|
7
|
+
"""Create an Ollama client instance based on the specified host or will read from the OLLAMA_HOST environment variable.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
host (str, optional): The host URL of the Ollama server.
|
11
|
+
timeout (float, optional): The timeout for requests
|
12
|
+
|
13
|
+
Returns:
|
14
|
+
Client: An instance of the Ollama client.
|
15
|
+
|
16
|
+
Examples:
|
17
|
+
>>> client = client(host="http://localhost:11434")
|
18
|
+
"""
|
19
|
+
if host is None:
|
20
|
+
host = os.getenv("OLLAMA_HOST")
|
21
|
+
if host is None:
|
22
|
+
raise ValueError("Host must be provided or OLLAMA_HOST environment variable must be set.")
|
23
|
+
|
24
|
+
return Client(host=host, timeout=timeout)
|
@@ -0,0 +1,81 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from ollama import Client
|
4
|
+
|
5
|
+
from not_again_ai.base.file_system import readable_size
|
6
|
+
|
7
|
+
|
8
|
+
def list_models(client: Client) -> list[dict[str, Any]]:
|
9
|
+
"""List models that are available locally.
|
10
|
+
|
11
|
+
Args:
|
12
|
+
client (Client): The Ollama client.
|
13
|
+
|
14
|
+
Returns:
|
15
|
+
list[dict[str, Any]]: A list of dictionaries (each corresponding to an available model) with the following keys:
|
16
|
+
name (str): Name of the model
|
17
|
+
model (str): Name of the model. This should be the same as the name.
|
18
|
+
modified_at (str): The date and time the model was last modified.
|
19
|
+
size (int): The size of the model in bytes.
|
20
|
+
size_readable (str): The size of the model in a human-readable format.
|
21
|
+
details (dict[str, Any]): Additional details about the model.
|
22
|
+
"""
|
23
|
+
response = client.list().get("models", [])
|
24
|
+
|
25
|
+
response_data = []
|
26
|
+
for model_data in response:
|
27
|
+
curr_model_data = {}
|
28
|
+
curr_model_data["name"] = model_data["name"]
|
29
|
+
curr_model_data["model"] = model_data["model"]
|
30
|
+
curr_model_data["modified_at"] = model_data["modified_at"]
|
31
|
+
curr_model_data["size"] = model_data["size"]
|
32
|
+
curr_model_data["size_readable"] = readable_size(model_data["size"])
|
33
|
+
curr_model_data["details"] = model_data["details"]
|
34
|
+
|
35
|
+
response_data.append(curr_model_data)
|
36
|
+
|
37
|
+
return response_data
|
38
|
+
|
39
|
+
|
40
|
+
def is_model_available(model_name: str, client: Client) -> bool:
|
41
|
+
"""Check if a model is available locally.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
model_name (str): The name of the model.
|
45
|
+
client (Client): The Ollama client.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
bool: True if the model is available locally, False otherwise.
|
49
|
+
"""
|
50
|
+
# If model_name does not have a ":", append ":latest"
|
51
|
+
if ":" not in model_name:
|
52
|
+
model_name = f"{model_name}:latest"
|
53
|
+
models = list_models(client)
|
54
|
+
return any(model["name"] == model_name for model in models)
|
55
|
+
|
56
|
+
|
57
|
+
def show(model_name: str, client: Client) -> dict[str, Any]:
|
58
|
+
"""Show information about a model including the modelfile, available parameters, template, and additional details.
|
59
|
+
|
60
|
+
Args:
|
61
|
+
model_name (str): The name of the model.
|
62
|
+
client (Client): The Ollama client.
|
63
|
+
"""
|
64
|
+
response = client.show(model_name)
|
65
|
+
|
66
|
+
response_data = {}
|
67
|
+
response_data["modelfile"] = response["modelfile"]
|
68
|
+
response_data["parameters"] = response["parameters"]
|
69
|
+
response_data["template"] = response["template"]
|
70
|
+
response_data["details"] = response["details"]
|
71
|
+
return response_data
|
72
|
+
|
73
|
+
|
74
|
+
def pull(model_name: str, client: Client) -> Any:
|
75
|
+
"""Pull a model from the Ollama server and returns the status of the pull operation."""
|
76
|
+
return client.pull(model_name)
|
77
|
+
|
78
|
+
|
79
|
+
def delete(model_name: str, client: Client) -> Any:
|
80
|
+
"""Delete a model from the local filesystem and returns the status of the delete operation."""
|
81
|
+
return client.delete(model_name)
|
File without changes
|
@@ -0,0 +1,167 @@
|
|
1
|
+
import contextlib
|
2
|
+
import json
|
3
|
+
from typing import Any
|
4
|
+
|
5
|
+
from openai import OpenAI
|
6
|
+
|
7
|
+
|
8
|
+
def chat_completion(
|
9
|
+
messages: list[dict[str, Any]],
|
10
|
+
model: str,
|
11
|
+
client: OpenAI,
|
12
|
+
tools: list[dict[str, Any]] | None = None,
|
13
|
+
tool_choice: str = "auto",
|
14
|
+
max_tokens: int | None = None,
|
15
|
+
temperature: float = 0.7,
|
16
|
+
json_mode: bool = False,
|
17
|
+
seed: int | None = None,
|
18
|
+
logprobs: tuple[bool, int | None] | None = None,
|
19
|
+
n: int = 1,
|
20
|
+
**kwargs: Any,
|
21
|
+
) -> dict[str, Any]:
|
22
|
+
"""Get an OpenAI chat completion response: https://platform.openai.com/docs/api-reference/chat/create
|
23
|
+
|
24
|
+
NOTE: Depending on the model, certain parameters may not be supported,
|
25
|
+
particularly for older vision-enabled models like gpt-4-1106-vision-preview.
|
26
|
+
Be sure to check the documentation: https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
27
|
+
|
28
|
+
Args:
|
29
|
+
messages (list): A list of messages comprising the conversation so far.
|
30
|
+
model (str): ID of the model to use. See the model endpoint compatibility table:
|
31
|
+
https://platform.openai.com/docs/models/model-endpoint-compatibility
|
32
|
+
for details on which models work with the Chat API.
|
33
|
+
client (OpenAI): An instance of the OpenAI client.
|
34
|
+
tools (list[dict[str, Any]], optional): A list of tools the model may generate JSON inputs for.
|
35
|
+
Defaults to None.
|
36
|
+
tool_choice (str, optional): The tool choice to use. Can be "auto", "required", "none", or a specific function name.
|
37
|
+
Note the function name cannot be any of "auto", "required", or "none". Defaults to "auto".
|
38
|
+
max_tokens (int, optional): The maximum number of tokens to generate in the chat completion.
|
39
|
+
Defaults to None, which automatically limits to the model's maximum context length.
|
40
|
+
temperature (float, optional): What sampling temperature to use, between 0 and 2.
|
41
|
+
Higher values like 0.8 will make the output more random,
|
42
|
+
while lower values like 0.2 will make it more focused and deterministic. Defaults to 0.7.
|
43
|
+
json_mode (bool, optional): When JSON mode is enabled, the model is constrained to only
|
44
|
+
generate strings that parse into valid JSON object and will return a dictionary.
|
45
|
+
See https://platform.openai.com/docs/guides/text-generation/json-mode
|
46
|
+
seed (int, optional): If specified, OpenAI will make a best effort to sample deterministically,
|
47
|
+
such that repeated requests with the same `seed` and parameters should return the same result.
|
48
|
+
Determinism is not guaranteed, and you should refer to the `system_fingerprint` response
|
49
|
+
parameter to monitor changes in the backend.
|
50
|
+
logprobs (tuple[bool, int], optional): Whether to return log probabilities of the output tokens or not.
|
51
|
+
If `logprobs[0]` is true, returns the log probabilities of each output token returned in the content of message.
|
52
|
+
`logprobs[1]` is an integer between 0 and 5 specifying the number of most likely tokens to return at each token position,
|
53
|
+
each with an associated log probability. `logprobs[0]` must be set to true if this parameter is used.
|
54
|
+
n (int, optional): How many chat completion choices to generate for each input message.
|
55
|
+
Defaults to 1.
|
56
|
+
**kwargs: Additional keyword arguments to pass to the OpenAI client chat completion.
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
dict[str, Any]: A dictionary with the following keys:
|
60
|
+
'finish_reason' (str): The reason the model stopped generating further tokens.
|
61
|
+
Can be 'stop', 'length', or 'tool_calls'.
|
62
|
+
'tool_names' (list[str], optional): The names of the tools called by the model.
|
63
|
+
'tool_args_list' (list[dict], optional): The arguments of the tools called by the model.
|
64
|
+
'message' (str | dict): The content of the generated assistant message.
|
65
|
+
If json_mode is True, this will be a dictionary.
|
66
|
+
'logprobs' (list[dict[str, Any] | list[dict[str, Any]]]): If logprobs[1] is between 1 and 5, each element in the list
|
67
|
+
will be a list of dictionaries containing the token, logprob, and bytes for the top `logprobs[1]` logprobs. Otherwise,
|
68
|
+
this will be a list of dictionaries containing the token, logprob, and bytes for each token in the message.
|
69
|
+
'choices' (list[dict], optional): A list of chat completion choices if n > 1 where each dict contains the above fields.
|
70
|
+
'completion_tokens' (int): The number of tokens used by the model to generate the completion.
|
71
|
+
NOTE: If n > 1 this is the sum of all completions.
|
72
|
+
'prompt_tokens' (int): The number of tokens in the messages sent to the model.
|
73
|
+
'system_fingerprint' (str, optional): If seed is set, a unique identifier for the model used to generate the response.
|
74
|
+
"""
|
75
|
+
response_format = {"type": "json_object"} if json_mode else None
|
76
|
+
|
77
|
+
kwargs.update(
|
78
|
+
{
|
79
|
+
"messages": messages,
|
80
|
+
"model": model,
|
81
|
+
"max_tokens": max_tokens,
|
82
|
+
"temperature": temperature,
|
83
|
+
"response_format": response_format,
|
84
|
+
"n": n,
|
85
|
+
}
|
86
|
+
)
|
87
|
+
|
88
|
+
if tools is not None:
|
89
|
+
kwargs["tools"] = tools
|
90
|
+
if tool_choice not in ["none", "auto", "required"]:
|
91
|
+
kwargs["tool_choice"] = {"type": "function", "function": {"name": tool_choice}}
|
92
|
+
else:
|
93
|
+
kwargs["tool_choice"] = tool_choice
|
94
|
+
|
95
|
+
if seed is not None:
|
96
|
+
kwargs["seed"] = seed
|
97
|
+
|
98
|
+
if logprobs is not None:
|
99
|
+
kwargs["logprobs"] = logprobs[0]
|
100
|
+
if logprobs[0] and logprobs[1] is not None:
|
101
|
+
kwargs["top_logprobs"] = logprobs[1]
|
102
|
+
|
103
|
+
response = client.chat.completions.create(**kwargs)
|
104
|
+
|
105
|
+
response_data: dict[str, Any] = {"choices": []}
|
106
|
+
for response_choice in response.choices:
|
107
|
+
response_data_curr = {}
|
108
|
+
finish_reason = response_choice.finish_reason
|
109
|
+
response_data_curr["finish_reason"] = finish_reason
|
110
|
+
|
111
|
+
# We first check for tool calls because even if the finish_reason is stop, the model may have called a tool
|
112
|
+
tool_calls = response_choice.message.tool_calls
|
113
|
+
if tool_calls:
|
114
|
+
tool_names = []
|
115
|
+
tool_args_list = []
|
116
|
+
for tool_call in tool_calls:
|
117
|
+
tool_names.append(tool_call.function.name)
|
118
|
+
tool_args_list.append(json.loads(tool_call.function.arguments))
|
119
|
+
response_data_curr["message"] = response_choice.message.content
|
120
|
+
response_data_curr["tool_names"] = tool_names
|
121
|
+
response_data_curr["tool_args_list"] = tool_args_list
|
122
|
+
elif finish_reason == "stop" or finish_reason == "length":
|
123
|
+
message = response_choice.message.content
|
124
|
+
if json_mode:
|
125
|
+
with contextlib.suppress(json.JSONDecodeError):
|
126
|
+
message = json.loads(message)
|
127
|
+
response_data_curr["message"] = message
|
128
|
+
|
129
|
+
if response_choice.logprobs and response_choice.logprobs.content is not None:
|
130
|
+
logprobs_list: list[dict[str, Any] | list[dict[str, Any]]] = []
|
131
|
+
for logprob in response_choice.logprobs.content:
|
132
|
+
if logprob.top_logprobs:
|
133
|
+
curr_logprob_infos = []
|
134
|
+
for top_logprob in logprob.top_logprobs:
|
135
|
+
curr_logprob_infos.append(
|
136
|
+
{
|
137
|
+
"token": top_logprob.token,
|
138
|
+
"logprob": top_logprob.logprob,
|
139
|
+
"bytes": top_logprob.bytes,
|
140
|
+
}
|
141
|
+
)
|
142
|
+
logprobs_list.append(curr_logprob_infos)
|
143
|
+
else:
|
144
|
+
logprobs_list.append(
|
145
|
+
{
|
146
|
+
"token": logprob.token,
|
147
|
+
"logprob": logprob.logprob,
|
148
|
+
"bytes": logprob.bytes,
|
149
|
+
}
|
150
|
+
)
|
151
|
+
|
152
|
+
response_data_curr["logprobs"] = logprobs_list
|
153
|
+
response_data["choices"].append(response_data_curr)
|
154
|
+
|
155
|
+
usage = response.usage
|
156
|
+
if usage is not None:
|
157
|
+
response_data["completion_tokens"] = usage.completion_tokens
|
158
|
+
response_data["prompt_tokens"] = usage.prompt_tokens
|
159
|
+
|
160
|
+
if seed is not None and response.system_fingerprint is not None:
|
161
|
+
response_data["system_fingerprint"] = response.system_fingerprint
|
162
|
+
|
163
|
+
if len(response_data["choices"]) == 1:
|
164
|
+
response_data.update(response_data["choices"][0])
|
165
|
+
del response_data["choices"]
|
166
|
+
|
167
|
+
return response_data
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: not-again-ai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.8.0
|
4
4
|
Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
|
5
5
|
Home-page: https://github.com/DaveCoDev/not-again-ai
|
6
6
|
License: MIT
|
@@ -21,7 +21,8 @@ Provides-Extra: llm
|
|
21
21
|
Provides-Extra: statistics
|
22
22
|
Provides-Extra: viz
|
23
23
|
Requires-Dist: numpy (>=1.26.4,<2.0.0) ; extra == "statistics" or extra == "viz"
|
24
|
-
Requires-Dist:
|
24
|
+
Requires-Dist: ollama (>=0.1.9,<0.2.0) ; extra == "llm"
|
25
|
+
Requires-Dist: openai (>=1.25.1,<2.0.0) ; extra == "llm"
|
25
26
|
Requires-Dist: pandas (>=2.2.2,<3.0.0) ; extra == "viz"
|
26
27
|
Requires-Dist: python-liquid (>=1.12.1,<2.0.0) ; extra == "llm"
|
27
28
|
Requires-Dist: scikit-learn (>=1.4.2,<2.0.0) ; extra == "statistics"
|
@@ -47,9 +48,9 @@ Description-Content-Type: text/markdown
|
|
47
48
|
[ruff-badge]: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
|
48
49
|
[mypy-badge]: https://www.mypy-lang.org/static/mypy_badge.svg
|
49
50
|
|
50
|
-
**not-again-ai** is a collection of various
|
51
|
+
**not-again-ai** is a collection of various building blocks that come up over and over again when developing AI products. The key goals of this package are to have simple, but flexible interfaces and to minimize dependencies. Feel free to **a)** use this as a template for your own Python package. **b)** instead of installing the package, copy and paste functions into your own projects (this is made possible with the limited amount of dependencies and the MIT license).
|
51
52
|
|
52
|
-
**Documentation** available within
|
53
|
+
**Documentation** available within individual **[notebooks](notebooks)**, docstrings within the source, or auto-generated at [DaveCoDev.github.io/not-again-ai/](https://DaveCoDev.github.io/not-again-ai/).
|
53
54
|
|
54
55
|
# Installation
|
55
56
|
|
@@ -61,82 +62,26 @@ Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) w
|
|
61
62
|
$ pip install not_again_ai[llm,statistics,viz]
|
62
63
|
```
|
63
64
|
|
64
|
-
The package is split into subpackages, so you can install only the parts you need.
|
65
|
+
The package is split into subpackages, so you can install only the parts you need. See the **[notebooks](notebooks)** for examples.
|
65
66
|
* **Base only**: `pip install not_again_ai`
|
66
|
-
* **LLM
|
67
|
+
* **LLM**: `pip install not_again_ai[llm]`
|
68
|
+
1. If you wish to use OpenAI
|
69
|
+
1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
|
70
|
+
1. (Optionally) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
|
71
|
+
1. If you wish to use Ollama:
|
72
|
+
1. follow the instructions to install ollama for your system: https://github.com/ollama/ollama
|
73
|
+
1. [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
|
74
|
+
1. If you'd like to make the ollama service accessible on your local network and it is hosted on Linux, add the following to the `/etc/systemd/system/ollama.service` file:
|
75
|
+
```bash
|
76
|
+
[Service]
|
77
|
+
...
|
78
|
+
Environment="OLLAMA_HOST=0.0.0.0"
|
79
|
+
```
|
80
|
+
Now ollama will be available at `http://<local_address>:11434`
|
67
81
|
* **Statistics**: `pip install not_again_ai[statistics]`
|
68
82
|
* **Visualization**: `pip install not_again_ai[viz]`
|
69
83
|
|
70
84
|
|
71
|
-
# Quick Tour
|
72
|
-
|
73
|
-
## Base
|
74
|
-
[README](https://github.com/DaveCoDev/not-again-ai/blob/main/readmes/base.md)
|
75
|
-
|
76
|
-
The base package includes only functions that have minimal external dependencies and are useful in a variety of situations such as parallelization and filesystem operations.
|
77
|
-
|
78
|
-
## LLM (Large Language Model)
|
79
|
-
[README](https://github.com/DaveCoDev/not-again-ai/blob/main/readmes/llm.md), [Example Notebooks](https://github.com/DaveCoDev/not-again-ai/blob/main/notebooks/llm/)
|
80
|
-
|
81
|
-
Supports OpenAI chat completions and text embeddings. Includes functions for creating chat completion prompts, token management, and context management.
|
82
|
-
|
83
|
-
One example:
|
84
|
-
```python
|
85
|
-
client = openai_client()
|
86
|
-
messages = [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello!"}]
|
87
|
-
response = chat_completion(messages=messages, model="gpt-3.5-turbo", max_tokens=100, client=client)["message"]
|
88
|
-
>>> "Hello! How can I help you today?"
|
89
|
-
```
|
90
|
-
|
91
|
-
## Statistics
|
92
|
-
[README](https://github.com/DaveCoDev/not-again-ai/blob/main/readmes/statistics.md)
|
93
|
-
|
94
|
-
We provide a few helpers for data analysis such as:
|
95
|
-
|
96
|
-
```python
|
97
|
-
from not_again_ai.statistics.dependence import pearson_correlation
|
98
|
-
# quadratic dependence
|
99
|
-
>>> x = (rs.rand(500) * 4) - 2
|
100
|
-
>>> y = x**2 + (rs.randn(500) * 0.2)
|
101
|
-
>>> pearson_correlation(x, y)
|
102
|
-
0.05
|
103
|
-
```
|
104
|
-
|
105
|
-
## Visualization
|
106
|
-
[README](https://github.com/DaveCoDev/not-again-ai/blob/main/readmes/viz.md)
|
107
|
-
|
108
|
-
We offer opinionated wrappers around seaborn to make common visualizations easier to create and customize.
|
109
|
-
|
110
|
-
```python
|
111
|
-
>>> import numpy as np
|
112
|
-
>>> import pandas as pd
|
113
|
-
>>> from not_again_ai.viz.time_series import ts_lineplot
|
114
|
-
>>> from not_again_ai.viz.distributions import univariate_distplot
|
115
|
-
|
116
|
-
# get some time series data
|
117
|
-
>>> rs = np.random.RandomState(365)
|
118
|
-
>>> values = rs.randn(365, 4).cumsum(axis=0)
|
119
|
-
>>> dates = pd.date_range('1 1 2021', periods=365, freq='D')
|
120
|
-
# plot the time series and save it to a file
|
121
|
-
>>> ts_lineplot(ts_data=values, save_pathname='myplot.png', ts_x=dates, ts_names=['A', 'B', 'C', 'D'])
|
122
|
-
|
123
|
-
# get a random distribution
|
124
|
-
>>> distrib = np.random.beta(a=0.5, b=0.5, size=1000)
|
125
|
-
# plot the distribution and save it to a file
|
126
|
-
>>> univariate_distplot(
|
127
|
-
... data=distrib,
|
128
|
-
... save_pathname='mydistribution.svg',
|
129
|
-
... print_summary=False, bins=100,
|
130
|
-
... title=r'Beta Distribution $\alpha=0.5, \beta=0.5$'
|
131
|
-
... )
|
132
|
-
```
|
133
|
-
|
134
|
-
<p float="center">
|
135
|
-
<img src="https://raw.githubusercontent.com/DaveCoDev/not-again-ai/44c53fb7fb07234aaceea40c90d8cb74e5fa6c15/assets/distributions_test4.svg" width="404" />
|
136
|
-
<img src="https://raw.githubusercontent.com/DaveCoDev/not-again-ai/44c53fb7fb07234aaceea40c90d8cb74e5fa6c15/assets/ts_lineplot5.svg" width="404" />
|
137
|
-
</p>
|
138
|
-
|
139
|
-
|
140
85
|
# Development Information
|
141
86
|
|
142
87
|
The following information is relevant if you would like to contribute or use this package as a template for yourself.
|
@@ -0,0 +1,31 @@
|
|
1
|
+
not_again_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
|
4
|
+
not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
|
5
|
+
not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
|
6
|
+
not_again_ai/llm/chat_completion.py,sha256=a5jqce5qQzwdDFK8W7XRqGjTdFHAqKcWDmrTDtKls9U,3070
|
7
|
+
not_again_ai/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
+
not_again_ai/llm/ollama/chat_completion.py,sha256=EKvqpHztsEISH9skYaLgKABEeoDhUlUyrakz_v6zvTw,3682
|
9
|
+
not_again_ai/llm/ollama/ollama_client.py,sha256=dktyw7aKFq4EA3dU7Le5UpfsSq3Oh_POmYSrAI4qLi8,765
|
10
|
+
not_again_ai/llm/ollama/service.py,sha256=XczbxISTAp4KHnIkqRZaMsfBohH-TAHrjZ8T9x3cRAY,2900
|
11
|
+
not_again_ai/llm/openai_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
+
not_again_ai/llm/openai_api/chat_completion.py,sha256=UJljrAV1lS2UvA5Sjt6N7S-9DaAgKRBpswLNDDP9RBI,8623
|
13
|
+
not_again_ai/llm/openai_api/context_management.py,sha256=SKksCHs-C-UXtSksvXSvWWR4eeUyuyVc6XIhGgqJUF4,3435
|
14
|
+
not_again_ai/llm/openai_api/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
|
15
|
+
not_again_ai/llm/openai_api/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
|
16
|
+
not_again_ai/llm/openai_api/prompts.py,sha256=7cDfvIKCTYM0t5lK34FLLqYf-SR_cynDXIXw3zWDizA,7094
|
17
|
+
not_again_ai/llm/openai_api/tokens.py,sha256=2WGHzZJ0mIFAGpkuS_otjFzwhqjaSMgLoP2FVMnJTiE,4301
|
18
|
+
not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
|
19
|
+
not_again_ai/statistics/__init__.py,sha256=gA8r9JQFbFSN0ykrHy4G1IQgcky4f2eM5Oo24oVI5Ik,466
|
20
|
+
not_again_ai/statistics/dependence.py,sha256=yZDk_e3ng96mp4hu8dDtQ0-uIn6KdSuGRS9uyM0O3x0,4429
|
21
|
+
not_again_ai/viz/__init__.py,sha256=MeaWae_QRbDEHJ4MWYoY1-Ad6S0FhSDaRhQncS2cpSc,447
|
22
|
+
not_again_ai/viz/barplots.py,sha256=xhxEXf4mfaymxDQtpphg9auy03zQ9e29R6L9yHixW1Y,3382
|
23
|
+
not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3umnzU,4354
|
24
|
+
not_again_ai/viz/scatterplot.py,sha256=eBtIf0Tf_1EcN-akRNJgvwLU0zpRx1zOl0VF9QTnbZA,2290
|
25
|
+
not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
|
26
|
+
not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
|
27
|
+
not_again_ai-0.8.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
|
28
|
+
not_again_ai-0.8.0.dist-info/METADATA,sha256=xM1pNeu0diA32AzTmWV_u8KNXpqkdjRBzrzsfJ9VGr0,14203
|
29
|
+
not_again_ai-0.8.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
30
|
+
not_again_ai-0.8.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
|
31
|
+
not_again_ai-0.8.0.dist-info/RECORD,,
|
@@ -1,25 +0,0 @@
|
|
1
|
-
not_again_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
not_again_ai/base/file_system.py,sha256=SX1ab2igdcFGjvdh4rDJIYVXbYC-jsYnCiM4oZtutAU,344
|
4
|
-
not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
|
5
|
-
not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
|
6
|
-
not_again_ai/llm/chat_completion.py,sha256=rjgaswSX4lf5ufPsv5SKWsXvCglLMFbX4y6bUvueUNQ,8528
|
7
|
-
not_again_ai/llm/context_management.py,sha256=LXNd_L-jxurIFR1bO7gWakEH8I-cZmvLW83dFtvfJa4,3424
|
8
|
-
not_again_ai/llm/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
|
9
|
-
not_again_ai/llm/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
|
10
|
-
not_again_ai/llm/prompts.py,sha256=7cDfvIKCTYM0t5lK34FLLqYf-SR_cynDXIXw3zWDizA,7094
|
11
|
-
not_again_ai/llm/tokens.py,sha256=2WGHzZJ0mIFAGpkuS_otjFzwhqjaSMgLoP2FVMnJTiE,4301
|
12
|
-
not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
|
13
|
-
not_again_ai/statistics/__init__.py,sha256=gA8r9JQFbFSN0ykrHy4G1IQgcky4f2eM5Oo24oVI5Ik,466
|
14
|
-
not_again_ai/statistics/dependence.py,sha256=yZDk_e3ng96mp4hu8dDtQ0-uIn6KdSuGRS9uyM0O3x0,4429
|
15
|
-
not_again_ai/viz/__init__.py,sha256=MeaWae_QRbDEHJ4MWYoY1-Ad6S0FhSDaRhQncS2cpSc,447
|
16
|
-
not_again_ai/viz/barplots.py,sha256=xhxEXf4mfaymxDQtpphg9auy03zQ9e29R6L9yHixW1Y,3382
|
17
|
-
not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3umnzU,4354
|
18
|
-
not_again_ai/viz/scatterplot.py,sha256=eBtIf0Tf_1EcN-akRNJgvwLU0zpRx1zOl0VF9QTnbZA,2290
|
19
|
-
not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
|
20
|
-
not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
|
21
|
-
not_again_ai-0.6.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
|
22
|
-
not_again_ai-0.6.0.dist-info/METADATA,sha256=3OVUTI8dpkQ9qCxLt2r7nFHk8tXPWDh1jnGAhlHIOUQ,15966
|
23
|
-
not_again_ai-0.6.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
24
|
-
not_again_ai-0.6.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
|
25
|
-
not_again_ai-0.6.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|