not-again-ai 0.4.4__tar.gz → 0.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/PKG-INFO +3 -4
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/pyproject.toml +6 -4
- not_again_ai-0.5.0/src/not_again_ai/llm/__init__.py +15 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/llm/chat_completion.py +1 -3
- not_again_ai-0.5.0/src/not_again_ai/llm/chat_completion_vision.py +88 -0
- not_again_ai-0.5.0/src/not_again_ai/llm/prompts.py +220 -0
- not_again_ai-0.5.0/src/not_again_ai/statistics/__init__.py +15 -0
- not_again_ai-0.5.0/src/not_again_ai/viz/__init__.py +15 -0
- not_again_ai-0.4.4/src/not_again_ai/llm/__init__.py +0 -9
- not_again_ai-0.4.4/src/not_again_ai/llm/prompts.py +0 -57
- not_again_ai-0.4.4/src/not_again_ai/statistics/__init__.py +0 -9
- not_again_ai-0.4.4/src/not_again_ai/viz/__init__.py +0 -9
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/LICENSE +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/README.md +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/__init__.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/base/__init__.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/base/file_system.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/base/parallel.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/llm/context_management.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/llm/embeddings.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/llm/openai_client.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/llm/tokens.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/py.typed +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/statistics/dependence.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/viz/barplots.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/viz/distributions.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/viz/scatterplot.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/viz/time_series.py +0 -0
- {not_again_ai-0.4.4 → not_again_ai-0.5.0}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: not-again-ai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5.0
|
4
4
|
Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
|
5
5
|
Home-page: https://github.com/DaveCoDev/not-again-ai
|
6
6
|
License: MIT
|
@@ -16,17 +16,16 @@ Classifier: Programming Language :: Python
|
|
16
16
|
Classifier: Programming Language :: Python :: 3
|
17
17
|
Classifier: Programming Language :: Python :: 3.11
|
18
18
|
Classifier: Programming Language :: Python :: 3.12
|
19
|
-
Classifier: Programming Language :: Python :: 3 :: Only
|
20
19
|
Classifier: Typing :: Typed
|
21
20
|
Provides-Extra: llm
|
22
21
|
Provides-Extra: statistics
|
23
22
|
Provides-Extra: viz
|
24
23
|
Requires-Dist: numpy (>=1.26.4,<2.0.0) ; extra == "statistics" or extra == "viz"
|
25
|
-
Requires-Dist: openai (>=1.
|
24
|
+
Requires-Dist: openai (>=1.16.2,<2.0.0) ; extra == "llm"
|
26
25
|
Requires-Dist: pandas (>=2.2.1,<3.0.0) ; extra == "viz"
|
27
26
|
Requires-Dist: python-liquid (>=1.12.1,<2.0.0) ; extra == "llm"
|
28
27
|
Requires-Dist: scikit-learn (>=1.4.1.post1,<2.0.0) ; extra == "statistics"
|
29
|
-
Requires-Dist: scipy (>=1.
|
28
|
+
Requires-Dist: scipy (>=1.13.0,<2.0.0) ; extra == "statistics"
|
30
29
|
Requires-Dist: seaborn (>=0.13.2,<0.14.0) ; extra == "viz"
|
31
30
|
Requires-Dist: tiktoken (>=0.6.0,<0.7.0) ; extra == "llm"
|
32
31
|
Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "not-again-ai"
|
3
|
-
version = "0.
|
3
|
+
version = "0.5.0"
|
4
4
|
description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
|
5
5
|
authors = ["DaveCoDev <dave.co.dev@gmail.com>"]
|
6
6
|
license = "MIT"
|
@@ -15,7 +15,6 @@ classifiers = [
|
|
15
15
|
"Operating System :: OS Independent",
|
16
16
|
"Programming Language :: Python",
|
17
17
|
"Programming Language :: Python :: 3",
|
18
|
-
"Programming Language :: Python :: 3 :: Only",
|
19
18
|
"Programming Language :: Python :: 3.11",
|
20
19
|
"Programming Language :: Python :: 3.12",
|
21
20
|
"Typing :: Typed",
|
@@ -29,10 +28,10 @@ python = "^3.11, <3.13"
|
|
29
28
|
|
30
29
|
# Optional dependencies are defined here, and groupings are defined below.
|
31
30
|
numpy = { version = "^1.26.4", optional = true }
|
32
|
-
openai = { version = "^1.
|
31
|
+
openai = { version = "^1.16.2", optional = true }
|
33
32
|
pandas = { version = "^2.2.1", optional = true }
|
34
33
|
python-liquid = { version = "^1.12.1", optional = true }
|
35
|
-
scipy = { version = "^1.
|
34
|
+
scipy = { version = "^1.13.0", optional = true }
|
36
35
|
scikit-learn = { version = "^1.4.1.post1", optional = true }
|
37
36
|
seaborn = { version = "^0.13.2", optional = true }
|
38
37
|
tiktoken = { version = "^0.6.0", optional = true }
|
@@ -90,12 +89,15 @@ src = ["src", "tests"]
|
|
90
89
|
|
91
90
|
[tool.ruff.lint]
|
92
91
|
select = [
|
92
|
+
"F", # pyflakes
|
93
|
+
"E", # pycodestyle
|
93
94
|
"I", # isort
|
94
95
|
"N", # pep8-naming
|
95
96
|
"UP", # pyupgrade
|
96
97
|
"RUF", # ruff
|
97
98
|
"B", # flake8-bugbear
|
98
99
|
"C4", # flake8-comprehensions
|
100
|
+
"ISC", # flake8-implicit-str-concat
|
99
101
|
"PTH", # flake8-use-pathlib
|
100
102
|
"SIM", # flake8-simplify
|
101
103
|
"TID", # flake8-tidy-imports
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import importlib.util
|
2
|
+
|
3
|
+
if (
|
4
|
+
importlib.util.find_spec("liquid") is None
|
5
|
+
or importlib.util.find_spec("openai") is None
|
6
|
+
or importlib.util.find_spec("tiktoken") is None
|
7
|
+
):
|
8
|
+
raise ImportError(
|
9
|
+
"not_again_ai.llm requires the 'llm' extra to be installed. "
|
10
|
+
"You can install it using 'pip install not_again_ai[llm]'."
|
11
|
+
)
|
12
|
+
else:
|
13
|
+
import liquid # noqa: F401
|
14
|
+
import openai # noqa: F401
|
15
|
+
import tiktoken # noqa: F401
|
@@ -105,8 +105,7 @@ def chat_completion(
|
|
105
105
|
finish_reason = response_choice.finish_reason
|
106
106
|
response_data_curr["finish_reason"] = finish_reason
|
107
107
|
|
108
|
-
#
|
109
|
-
# the finish reason is "stop", not "tool_calls"
|
108
|
+
# We first check for tool calls because even if the finish_reason is stop, the model may have called a tool
|
110
109
|
tool_calls = response_choice.message.tool_calls
|
111
110
|
if tool_calls:
|
112
111
|
tool_names = []
|
@@ -159,7 +158,6 @@ def chat_completion(
|
|
159
158
|
response_data["system_fingerprint"] = response.system_fingerprint
|
160
159
|
|
161
160
|
if len(response_data["choices"]) == 1:
|
162
|
-
# Add all the fields in the first choice dict to the response_data dict
|
163
161
|
response_data.update(response_data["choices"][0])
|
164
162
|
del response_data["choices"]
|
165
163
|
|
@@ -0,0 +1,88 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from openai import OpenAI
|
4
|
+
|
5
|
+
|
6
|
+
def chat_completion_vision(
|
7
|
+
messages: list[dict[str, Any]],
|
8
|
+
model: str,
|
9
|
+
client: OpenAI,
|
10
|
+
max_tokens: int | None = None,
|
11
|
+
temperature: float = 0.7,
|
12
|
+
seed: int | None = None,
|
13
|
+
n: int = 1,
|
14
|
+
**kwargs: Any,
|
15
|
+
) -> dict[str, Any]:
|
16
|
+
"""Get an OpenAI chat completion response for vision models only: https://platform.openai.com/docs/guides/vision
|
17
|
+
|
18
|
+
Args:
|
19
|
+
messages (list): A list of messages comprising the conversation so far.
|
20
|
+
See https://platform.openai.com/docs/api-reference/chat/create for details on the format
|
21
|
+
model (str): ID of the model to use for generating chat completions. Refer to OpenAI's documentation
|
22
|
+
for details on available models.
|
23
|
+
client (OpenAI): An instance of the OpenAI client, used to make requests to the API.
|
24
|
+
max_tokens (int | None, optional): The maximum number of tokens to generate in the chat completion.
|
25
|
+
If None, defaults to the model's maximum context length. Defaults to None.
|
26
|
+
temperature (float, optional): Controls the randomness of the output. A higher temperature produces
|
27
|
+
more varied results, whereas a lower temperature results in more deterministic and predictable text.
|
28
|
+
Must be between 0 and 2. Defaults to 0.7.
|
29
|
+
seed (int | None, optional): A seed used for deterministic generation. Providing a seed ensures that
|
30
|
+
the same input will produce the same output across different runs. Defaults to None.
|
31
|
+
n (int, optional): The number of chat completion choices to generate for each input message.
|
32
|
+
Defaults to 1.
|
33
|
+
**kwargs (Any): Additional keyword arguments to pass to the OpenAI client chat completion method.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
dict[str, Any]: A dictionary containing the generated responses and metadata. Key components include:
|
37
|
+
'finish_reason' (str): The reason the model stopped generating further tokens.
|
38
|
+
Can be 'stop' or 'length'
|
39
|
+
'tool_names' (list[str], optional): The names of the tools called by the model.
|
40
|
+
'tool_args_list' (list[dict], optional): The arguments of the tools called by the model.
|
41
|
+
'message' (str | dict): The content of the generated assistant message.
|
42
|
+
'choices' (list[dict], optional): A list of chat completion choices if n > 1 where each dict contains the above fields.
|
43
|
+
'completion_tokens' (int): The number of tokens used by the model to generate the completion.
|
44
|
+
NOTE: If n > 1 this is the sum of all completions and thus will be same value in each dict.
|
45
|
+
'prompt_tokens' (int): The number of tokens in the generated response.
|
46
|
+
NOTE: If n > 1 this is the sum of all completions and thus will be same value in each dict.
|
47
|
+
'system_fingerprint' (str, optional): If seed is set, a unique identifier for the model used to generate the response.
|
48
|
+
"""
|
49
|
+
kwargs.update(
|
50
|
+
{
|
51
|
+
"messages": messages,
|
52
|
+
"model": model,
|
53
|
+
"max_tokens": max_tokens,
|
54
|
+
"temperature": temperature,
|
55
|
+
"n": n,
|
56
|
+
}
|
57
|
+
)
|
58
|
+
|
59
|
+
if seed is not None:
|
60
|
+
kwargs["seed"] = seed
|
61
|
+
|
62
|
+
response = client.chat.completions.create(**kwargs)
|
63
|
+
|
64
|
+
response_data: dict[str, Any] = {"choices": []}
|
65
|
+
for response_choice in response.choices:
|
66
|
+
response_data_curr = {}
|
67
|
+
finish_reason = response_choice.finish_reason
|
68
|
+
response_data_curr["finish_reason"] = finish_reason
|
69
|
+
|
70
|
+
if finish_reason == "stop" or finish_reason == "length":
|
71
|
+
message = response_choice.message.content
|
72
|
+
response_data_curr["message"] = message
|
73
|
+
|
74
|
+
response_data["choices"].append(response_data_curr)
|
75
|
+
|
76
|
+
usage = response.usage
|
77
|
+
if usage is not None:
|
78
|
+
response_data["completion_tokens"] = usage.completion_tokens
|
79
|
+
response_data["prompt_tokens"] = usage.prompt_tokens
|
80
|
+
|
81
|
+
if seed is not None and response.system_fingerprint is not None:
|
82
|
+
response_data["system_fingerprint"] = response.system_fingerprint
|
83
|
+
|
84
|
+
if len(response_data["choices"]) == 1:
|
85
|
+
response_data.update(response_data["choices"][0])
|
86
|
+
del response_data["choices"]
|
87
|
+
|
88
|
+
return response_data
|
@@ -0,0 +1,220 @@
|
|
1
|
+
import base64
|
2
|
+
from copy import deepcopy
|
3
|
+
import mimetypes
|
4
|
+
from pathlib import Path
|
5
|
+
from typing import Any
|
6
|
+
|
7
|
+
from liquid import Template
|
8
|
+
|
9
|
+
|
10
|
+
def _validate_message(message: dict[str, str]) -> bool:
|
11
|
+
"""Valides that a message has valid fields and if the role is valid.
|
12
|
+
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages
|
13
|
+
"""
|
14
|
+
valid_fields = ["role", "content", "name", "tool_call_id", "tool_calls"]
|
15
|
+
# Check if the only keys in the message are in valid_fields
|
16
|
+
if not all(key in valid_fields for key in message):
|
17
|
+
raise ValueError(f"Message contains invalid fields: {message.keys()}")
|
18
|
+
|
19
|
+
# Check if the only roles in the message are in valid_fields
|
20
|
+
valid_roles = ["system", "user", "assistant", "tool"]
|
21
|
+
if message["role"] not in valid_roles:
|
22
|
+
raise ValueError(f"Message contains invalid role: {message['role']}")
|
23
|
+
|
24
|
+
return True
|
25
|
+
|
26
|
+
|
27
|
+
def chat_prompt(messages_unformatted: list[dict[str, str]], variables: dict[str, str]) -> list[dict[str, str]]:
|
28
|
+
"""
|
29
|
+
Formats a list of messages for OpenAI's chat completion API using Liquid templating.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
messages_unformatted: A list of dictionaries where each dictionary
|
33
|
+
represents a message. Each message must have 'role' and 'content'
|
34
|
+
keys with string values, where content is a Liquid template.
|
35
|
+
variables: A dictionary where each key-value pair represents a variable
|
36
|
+
name and its value for template rendering.
|
37
|
+
|
38
|
+
Returns:
|
39
|
+
A list of dictionaries with the same structure as `messages_unformatted`,
|
40
|
+
but with the 'content' of each message with the provided `variables`.
|
41
|
+
|
42
|
+
Examples:
|
43
|
+
>>> messages = [
|
44
|
+
... {"role": "system", "content": "You are a helpful assistant."},
|
45
|
+
... {"role": "user", "content": "Help me {{task}}"}
|
46
|
+
... ]
|
47
|
+
>>> vars = {"task": "write Python code for the fibonnaci sequence"}
|
48
|
+
>>> chat_prompt(messages, vars)
|
49
|
+
[
|
50
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
51
|
+
{"role": "user", "content": "Help me write Python code for the fibonnaci sequence"}
|
52
|
+
]
|
53
|
+
"""
|
54
|
+
|
55
|
+
messages_formatted = deepcopy(messages_unformatted)
|
56
|
+
for message in messages_formatted:
|
57
|
+
if not _validate_message(message):
|
58
|
+
raise ValueError()
|
59
|
+
|
60
|
+
liquid_template = Template(message["content"])
|
61
|
+
message["content"] = liquid_template.render(**variables)
|
62
|
+
|
63
|
+
return messages_formatted
|
64
|
+
|
65
|
+
|
66
|
+
def _validate_message_vision(message: dict[str, list[dict[str, Path | str]] | str]) -> bool:
|
67
|
+
"""Validates that a message for a vision model is valid"""
|
68
|
+
valid_fields = ["role", "content"]
|
69
|
+
if not all(key in valid_fields for key in message):
|
70
|
+
raise ValueError(f"Message contains invalid fields: {message.keys()}")
|
71
|
+
|
72
|
+
valid_roles = ["system", "user", "assistant"]
|
73
|
+
if message["role"] not in valid_roles:
|
74
|
+
raise ValueError(f"Message contains invalid role: {message['role']}")
|
75
|
+
|
76
|
+
if not isinstance(message["content"], list) and not isinstance(message["content"], str):
|
77
|
+
raise ValueError(f"content must be a list of dictionaries or a string: {message['content']}")
|
78
|
+
|
79
|
+
if isinstance(message["content"], list):
|
80
|
+
for content_part in message["content"]:
|
81
|
+
if isinstance(content_part, dict):
|
82
|
+
if "image" not in content_part:
|
83
|
+
raise ValueError(f"Dictionary content part must contain 'image' key: {content_part}")
|
84
|
+
if "detail" in content_part and content_part["detail"] not in ["low", "high"]:
|
85
|
+
raise ValueError(f"Optional 'detail' key must be 'low' or 'high': {content_part['detail']}")
|
86
|
+
elif not isinstance(content_part, str):
|
87
|
+
raise ValueError(f"content_part must be a dictionary or a string: {content_part}")
|
88
|
+
|
89
|
+
return True
|
90
|
+
|
91
|
+
|
92
|
+
def encode_image(image_path: Path) -> str:
|
93
|
+
"""Encodes an image file at the given Path to base64.
|
94
|
+
|
95
|
+
Args:
|
96
|
+
image_path: The path to the image file to encode.
|
97
|
+
|
98
|
+
Returns:
|
99
|
+
The base64 encoded image as a string.
|
100
|
+
"""
|
101
|
+
with Path.open(image_path, "rb") as image_file:
|
102
|
+
return base64.b64encode(image_file.read()).decode("utf-8")
|
103
|
+
|
104
|
+
|
105
|
+
def create_image_url(image_path: Path) -> str:
|
106
|
+
"""Creates a data URL for an image file at the given Path.
|
107
|
+
|
108
|
+
Args:
|
109
|
+
image_path: The path to the image file to encode.
|
110
|
+
|
111
|
+
Returns:
|
112
|
+
The data URL for the image.
|
113
|
+
"""
|
114
|
+
image_data = encode_image(image_path)
|
115
|
+
|
116
|
+
valid_mime_types = ["image/jpeg", "image/png", "image/webp", "image/gif"]
|
117
|
+
|
118
|
+
# Get the MIME type from the image file extension
|
119
|
+
mime_type = mimetypes.guess_type(image_path)[0]
|
120
|
+
|
121
|
+
# Check if the MIME type is valid
|
122
|
+
# List of valid types is here: https://platform.openai.com/docs/guides/vision/what-type-of-files-can-i-upload
|
123
|
+
if mime_type not in valid_mime_types:
|
124
|
+
raise ValueError(f"Invalid MIME type for image: {mime_type}")
|
125
|
+
|
126
|
+
return f"data:{mime_type};base64,{image_data}"
|
127
|
+
|
128
|
+
|
129
|
+
def chat_prompt_vision(messages_unformatted: list[dict[str, Any]], variables: dict[str, str]) -> list[dict[str, Any]]:
|
130
|
+
"""Formats a list of messages for OpenAI's chat completion API for vision models only using Liquid templating.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
messages_unformatted (list[dict[str, list[dict[str, Path | str]] | str]]):
|
134
|
+
A list of dictionaries where each dictionary represents a message.
|
135
|
+
Each message must have 'role' and 'content' keys. `role` must be 'system', 'user', or 'assistant'.
|
136
|
+
`content` can be a Liquid template string or a list of dictionaries where each dictionary
|
137
|
+
represents a content part. Each content part can be a string or a dictionary with 'image' and 'detail' keys.
|
138
|
+
The 'image' key must be a Path or a string representing a URL. The 'detail' key is optional and must be 'low' or 'high'.
|
139
|
+
variables: A dictionary where each key-value pair represents a variable
|
140
|
+
name and its value for template rendering.
|
141
|
+
|
142
|
+
Returns:
|
143
|
+
A list which represents messages in the format that OpenAI expects for its chat completions API.
|
144
|
+
See here for details: https://platform.openai.com/docs/api-reference/chat/create
|
145
|
+
|
146
|
+
Examples:
|
147
|
+
>>> # Assume cat_image and dog_image are Path objects to image files
|
148
|
+
>>> messages = [
|
149
|
+
... {"role": "system", "content": "You are a helpful assistant."},
|
150
|
+
... {
|
151
|
+
... "role": "user",
|
152
|
+
... "content": ["Describe the animal in the image in one word.", {"image": cat_image, "detail": "low"}],
|
153
|
+
... }
|
154
|
+
... {"role": "assistant", "content": "{{ answer }}"},
|
155
|
+
... {
|
156
|
+
... "role": "user",
|
157
|
+
... "content": ["What about this animal?", {"image": dog_image, "detail": "high"}],
|
158
|
+
... }
|
159
|
+
... ]
|
160
|
+
>>> vars = {"answer": "Cat"}
|
161
|
+
>>> chat_prompt(messages, vars)
|
162
|
+
[
|
163
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
164
|
+
{
|
165
|
+
"role": "user",
|
166
|
+
"content": [
|
167
|
+
{"type": "text", "text": "Describe the animal in the image in one word."},
|
168
|
+
{
|
169
|
+
"type": "image_url",
|
170
|
+
"image_url": {"url": f"data:image/jpeg;base64,<encoding>", "detail": "low"},
|
171
|
+
},
|
172
|
+
],
|
173
|
+
},
|
174
|
+
{"role": "assistant", "content": "Cat"},
|
175
|
+
{
|
176
|
+
"role": "user",
|
177
|
+
"content": [
|
178
|
+
{"type": "text", "text": "What about this animal?"},
|
179
|
+
{
|
180
|
+
"type": "image_url",
|
181
|
+
"image_url": {"url": f"data:image/jpeg;base64,<encoding>", "detail": "high"},
|
182
|
+
},
|
183
|
+
],
|
184
|
+
},
|
185
|
+
]
|
186
|
+
"""
|
187
|
+
messages_formatted = deepcopy(messages_unformatted)
|
188
|
+
for message in messages_formatted:
|
189
|
+
if not _validate_message_vision(message):
|
190
|
+
raise ValueError()
|
191
|
+
|
192
|
+
if isinstance(message["content"], list):
|
193
|
+
for i in range(len(message["content"])):
|
194
|
+
content_part = message["content"][i]
|
195
|
+
if isinstance(content_part, dict):
|
196
|
+
image_path = content_part["image"]
|
197
|
+
if isinstance(image_path, Path):
|
198
|
+
temp_content_part: dict[str, Any] = {
|
199
|
+
"type": "image_url",
|
200
|
+
"image_url": {
|
201
|
+
"url": create_image_url(image_path),
|
202
|
+
},
|
203
|
+
}
|
204
|
+
if "detail" in content_part:
|
205
|
+
temp_content_part["image_url"]["detail"] = content_part["detail"]
|
206
|
+
elif isinstance(image_path, str):
|
207
|
+
# Assume its a valid URL
|
208
|
+
pass
|
209
|
+
else:
|
210
|
+
raise ValueError(f"Image path must be a Path or str: {image_path}")
|
211
|
+
message["content"][i] = temp_content_part
|
212
|
+
elif isinstance(content_part, str):
|
213
|
+
message["content"][i] = {
|
214
|
+
"type": "text",
|
215
|
+
"text": Template(content_part).render(**variables),
|
216
|
+
}
|
217
|
+
elif isinstance(message["content"], str):
|
218
|
+
message["content"] = Template(message["content"]).render(**variables)
|
219
|
+
|
220
|
+
return messages_formatted
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import importlib.util
|
2
|
+
|
3
|
+
if (
|
4
|
+
importlib.util.find_spec("numpy") is None
|
5
|
+
or importlib.util.find_spec("scipy") is None
|
6
|
+
or importlib.util.find_spec("sklearn") is None
|
7
|
+
):
|
8
|
+
raise ImportError(
|
9
|
+
"not_again_ai.statistics requires the 'statistics' extra to be installed. "
|
10
|
+
"You can install it using 'pip install not_again_ai[statistics]'."
|
11
|
+
)
|
12
|
+
else:
|
13
|
+
import numpy # noqa: F401
|
14
|
+
import scipy # noqa: F401
|
15
|
+
import sklearn # noqa: F401
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import importlib.util
|
2
|
+
|
3
|
+
if (
|
4
|
+
importlib.util.find_spec("numpy") is None
|
5
|
+
or importlib.util.find_spec("pandas") is None
|
6
|
+
or importlib.util.find_spec("seaborn") is None
|
7
|
+
):
|
8
|
+
raise ImportError(
|
9
|
+
"not_again_ai.viz requires the 'viz' extra to be installed. "
|
10
|
+
"You can install it using 'pip install not_again_ai[viz]'."
|
11
|
+
)
|
12
|
+
else:
|
13
|
+
import numpy # noqa: F401
|
14
|
+
import pandas # noqa: F401
|
15
|
+
import seaborn # noqa: F401
|
@@ -1,57 +0,0 @@
|
|
1
|
-
from liquid import Template
|
2
|
-
|
3
|
-
|
4
|
-
def _validate_message(message: dict[str, str]) -> bool:
|
5
|
-
"""Valides that a message has valid fields and if the role is valid.
|
6
|
-
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages
|
7
|
-
"""
|
8
|
-
valid_fields = ["role", "content", "name", "tool_call_id", "tool_calls"]
|
9
|
-
# Check if the only keys in the message are in valid_fields
|
10
|
-
if not all(key in valid_fields for key in message):
|
11
|
-
return False
|
12
|
-
|
13
|
-
# Check if the only roles in the message are in valid_fields
|
14
|
-
valid_roles = ["system", "user", "assistant", "tool"]
|
15
|
-
if message["role"] not in valid_roles:
|
16
|
-
return False
|
17
|
-
|
18
|
-
return True
|
19
|
-
|
20
|
-
|
21
|
-
def chat_prompt(messages_unformatted: list[dict[str, str]], variables: dict[str, str]) -> list[dict[str, str]]:
|
22
|
-
"""
|
23
|
-
Formats a list of messages for OpenAI's chat completion API using Liquid templating.
|
24
|
-
|
25
|
-
Args:
|
26
|
-
messages_unformatted: A list of dictionaries where each dictionary
|
27
|
-
represents a message. Each message must have 'role' and 'content'
|
28
|
-
keys with string values, where content is a Liquid template.
|
29
|
-
variables: A dictionary where each key-value pair represents a variable
|
30
|
-
name and its value for template rendering.
|
31
|
-
|
32
|
-
Returns:
|
33
|
-
A list of dictionaries with the same structure as `messages_unformatted`,
|
34
|
-
but with the 'content' of each message with the provided `variables`.
|
35
|
-
|
36
|
-
Examples:
|
37
|
-
>>> messages = [
|
38
|
-
... {"role": "system", "content": "You are a helpful assistant."},
|
39
|
-
... {"role": "user", "content": "Help me {{task}}"}
|
40
|
-
... ]
|
41
|
-
>>> vars = {"task": "write Python code for the fibonnaci sequence"}
|
42
|
-
>>> chat_prompt(messages, vars)
|
43
|
-
[
|
44
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
45
|
-
{"role": "user", "content": "Help me write Python code for the fibonnaci sequence"}
|
46
|
-
]
|
47
|
-
"""
|
48
|
-
|
49
|
-
messages_formatted = messages_unformatted.copy()
|
50
|
-
for message in messages_formatted:
|
51
|
-
if not _validate_message(message):
|
52
|
-
raise ValueError(f"Invalid message: {message}")
|
53
|
-
|
54
|
-
liquid_template = Template(message["content"])
|
55
|
-
message["content"] = liquid_template.render(**variables)
|
56
|
-
|
57
|
-
return messages_formatted
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|