not-again-ai 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- not_again_ai/llm/__init__.py +0 -15
- not_again_ai/llm/chat_completion/interface.py +8 -1
- not_again_ai/llm/chat_completion/providers/anthropic_api.py +180 -0
- not_again_ai/llm/chat_completion/providers/gemini_api.py +237 -0
- not_again_ai/llm/chat_completion/types.py +3 -2
- not_again_ai/llm/image_gen/__init__.py +4 -0
- not_again_ai/llm/image_gen/interface.py +24 -0
- not_again_ai/llm/image_gen/providers/__init__.py +0 -0
- not_again_ai/llm/image_gen/providers/openai_api.py +144 -0
- not_again_ai/llm/image_gen/types.py +24 -0
- not_again_ai/llm/prompting/compile_prompt.py +3 -3
- {not_again_ai-0.17.0.dist-info → not_again_ai-0.19.0.dist-info}/METADATA +68 -165
- {not_again_ai-0.17.0.dist-info → not_again_ai-0.19.0.dist-info}/RECORD +18 -11
- {not_again_ai-0.17.0.dist-info → not_again_ai-0.19.0.dist-info}/WHEEL +1 -1
- {not_again_ai-0.17.0.dist-info → not_again_ai-0.19.0.dist-info/licenses}/LICENSE +0 -0
not_again_ai/llm/__init__.py
CHANGED
@@ -1,15 +0,0 @@
|
|
1
|
-
import importlib.util
|
2
|
-
|
3
|
-
if (
|
4
|
-
importlib.util.find_spec("liquid") is None
|
5
|
-
or importlib.util.find_spec("openai") is None
|
6
|
-
or importlib.util.find_spec("tiktoken") is None
|
7
|
-
):
|
8
|
-
raise ImportError(
|
9
|
-
"not_again_ai.llm requires the 'llm' extra to be installed. "
|
10
|
-
"You can install it using 'pip install not_again_ai[llm]'."
|
11
|
-
)
|
12
|
-
else:
|
13
|
-
import liquid # noqa: F401
|
14
|
-
import openai # noqa: F401
|
15
|
-
import tiktoken # noqa: F401
|
@@ -1,6 +1,8 @@
|
|
1
1
|
from collections.abc import AsyncGenerator, Callable
|
2
2
|
from typing import Any
|
3
3
|
|
4
|
+
from not_again_ai.llm.chat_completion.providers.anthropic_api import anthropic_chat_completion
|
5
|
+
from not_again_ai.llm.chat_completion.providers.gemini_api import gemini_chat_completion
|
4
6
|
from not_again_ai.llm.chat_completion.providers.ollama_api import ollama_chat_completion, ollama_chat_completion_stream
|
5
7
|
from not_again_ai.llm.chat_completion.providers.openai_api import openai_chat_completion, openai_chat_completion_stream
|
6
8
|
from not_again_ai.llm.chat_completion.types import ChatCompletionChunk, ChatCompletionRequest, ChatCompletionResponse
|
@@ -15,6 +17,8 @@ def chat_completion(
|
|
15
17
|
- `openai` - OpenAI
|
16
18
|
- `azure_openai` - Azure OpenAI
|
17
19
|
- `ollama` - Ollama
|
20
|
+
- `anthropic` - Anthropic
|
21
|
+
- `gemini` - Gemini
|
18
22
|
|
19
23
|
Args:
|
20
24
|
request: Request parameter object
|
@@ -28,6 +32,10 @@ def chat_completion(
|
|
28
32
|
return openai_chat_completion(request, client)
|
29
33
|
elif provider == "ollama":
|
30
34
|
return ollama_chat_completion(request, client)
|
35
|
+
elif provider == "anthropic":
|
36
|
+
return anthropic_chat_completion(request, client)
|
37
|
+
elif provider == "gemini":
|
38
|
+
return gemini_chat_completion(request, client)
|
31
39
|
else:
|
32
40
|
raise ValueError(f"Provider {provider} not supported")
|
33
41
|
|
@@ -40,7 +48,6 @@ async def chat_completion_stream(
|
|
40
48
|
"""Stream a chat completion response from the given provider. Currently supported providers:
|
41
49
|
- `openai` - OpenAI
|
42
50
|
- `azure_openai` - Azure OpenAI
|
43
|
-
- `ollama` - Ollama
|
44
51
|
|
45
52
|
Args:
|
46
53
|
request: Request parameter object
|
@@ -0,0 +1,180 @@
|
|
1
|
+
from collections.abc import Callable
|
2
|
+
import os
|
3
|
+
import time
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
from anthropic import Anthropic
|
7
|
+
from anthropic.types import Message
|
8
|
+
|
9
|
+
from not_again_ai.llm.chat_completion.types import (
|
10
|
+
AssistantMessage,
|
11
|
+
ChatCompletionChoice,
|
12
|
+
ChatCompletionRequest,
|
13
|
+
ChatCompletionResponse,
|
14
|
+
Function,
|
15
|
+
ToolCall,
|
16
|
+
)
|
17
|
+
|
18
|
+
ANTHROPIC_PARAMETER_MAP = {
|
19
|
+
"max_completion_tokens": "max_tokens",
|
20
|
+
}
|
21
|
+
|
22
|
+
|
23
|
+
def anthropic_chat_completion(request: ChatCompletionRequest, client: Callable[..., Any]) -> ChatCompletionResponse:
|
24
|
+
"""Anthropic chat completion function.
|
25
|
+
|
26
|
+
TODO
|
27
|
+
- Image messages
|
28
|
+
- Thinking
|
29
|
+
- Citations
|
30
|
+
- Stop sequences
|
31
|
+
- Documents
|
32
|
+
"""
|
33
|
+
kwargs = request.model_dump(mode="json", exclude_none=True)
|
34
|
+
|
35
|
+
# For each key in ANTHROPIC_PARAMETER_MAP
|
36
|
+
# If it is not None, set the key in kwargs to the value of the corresponding value in ANTHROPIC_PARAMETER_MAP
|
37
|
+
# If it is None, remove that key from kwargs
|
38
|
+
for key, value in ANTHROPIC_PARAMETER_MAP.items():
|
39
|
+
if value is not None and key in kwargs:
|
40
|
+
kwargs[value] = kwargs.pop(key)
|
41
|
+
elif value is None and key in kwargs:
|
42
|
+
del kwargs[key]
|
43
|
+
|
44
|
+
# Handle messages
|
45
|
+
# Any system messages need to be removed from messages and concatenated into a single string (in order)
|
46
|
+
# Any tool messages need to be converted to a special user message
|
47
|
+
# Any assistant messages with tool calls need to be converted.
|
48
|
+
system = ""
|
49
|
+
new_messages = []
|
50
|
+
for message in kwargs["messages"]:
|
51
|
+
if message["role"] == "system":
|
52
|
+
system += message["content"] + "\n"
|
53
|
+
elif message["role"] == "tool":
|
54
|
+
new_messages.append(
|
55
|
+
{
|
56
|
+
"role": "user",
|
57
|
+
"content": [
|
58
|
+
{
|
59
|
+
"type": "tool_result",
|
60
|
+
"tool_use_id": message["name"],
|
61
|
+
"content": message["content"],
|
62
|
+
}
|
63
|
+
],
|
64
|
+
}
|
65
|
+
)
|
66
|
+
elif message["role"] == "assistant":
|
67
|
+
content = []
|
68
|
+
if message.get("content", None):
|
69
|
+
content.append(
|
70
|
+
{
|
71
|
+
"type": "text",
|
72
|
+
"content": message["content"],
|
73
|
+
}
|
74
|
+
)
|
75
|
+
for tool_call in message.get("tool_calls", []):
|
76
|
+
content.append(
|
77
|
+
{
|
78
|
+
"type": "tool_use",
|
79
|
+
"id": tool_call["id"],
|
80
|
+
"name": tool_call["function"]["name"],
|
81
|
+
"input": tool_call["function"]["arguments"],
|
82
|
+
}
|
83
|
+
)
|
84
|
+
new_messages.append(
|
85
|
+
{
|
86
|
+
"role": "assistant",
|
87
|
+
"content": content,
|
88
|
+
}
|
89
|
+
)
|
90
|
+
else:
|
91
|
+
new_messages.append(message)
|
92
|
+
kwargs["messages"] = new_messages
|
93
|
+
system = system.strip()
|
94
|
+
if system:
|
95
|
+
kwargs["system"] = system
|
96
|
+
|
97
|
+
# Handle tool choice and parallel tool calls
|
98
|
+
if kwargs.get("tool_choice") is not None:
|
99
|
+
tool_choice_value = kwargs.pop("tool_choice")
|
100
|
+
tool_choice = {}
|
101
|
+
if tool_choice_value == "none":
|
102
|
+
tool_choice["type"] = "none"
|
103
|
+
elif tool_choice_value in ["auto", "any"]:
|
104
|
+
tool_choice["type"] = "auto"
|
105
|
+
if kwargs.get("parallel_tool_calls") is not None:
|
106
|
+
tool_choice["disable_parallel_tool_use"] = not kwargs["parallel_tool_calls"] # type: ignore
|
107
|
+
else:
|
108
|
+
tool_choice["name"] = tool_choice_value
|
109
|
+
tool_choice["type"] = "tool"
|
110
|
+
if kwargs.get("parallel_tool_calls") is not None:
|
111
|
+
tool_choice["disable_parallel_tool_use"] = not kwargs["parallel_tool_calls"] # type: ignore
|
112
|
+
kwargs["tool_choice"] = tool_choice
|
113
|
+
kwargs.pop("parallel_tool_calls", None)
|
114
|
+
|
115
|
+
start_time = time.time()
|
116
|
+
response: Message = client(**kwargs)
|
117
|
+
end_time = time.time()
|
118
|
+
response_duration = round(end_time - start_time, 4)
|
119
|
+
|
120
|
+
tool_calls: list[ToolCall] = []
|
121
|
+
assistant_message = ""
|
122
|
+
for block in response.content:
|
123
|
+
if block.type == "text":
|
124
|
+
assistant_message += block.text
|
125
|
+
elif block.type == "tool_use":
|
126
|
+
tool_calls.append(
|
127
|
+
ToolCall(
|
128
|
+
id=block.id,
|
129
|
+
function=Function(
|
130
|
+
name=block.name,
|
131
|
+
arguments=block.input, # type: ignore
|
132
|
+
),
|
133
|
+
)
|
134
|
+
)
|
135
|
+
|
136
|
+
choice = ChatCompletionChoice(
|
137
|
+
message=AssistantMessage(
|
138
|
+
content=assistant_message,
|
139
|
+
tool_calls=tool_calls,
|
140
|
+
),
|
141
|
+
finish_reason=response.stop_reason or "stop",
|
142
|
+
)
|
143
|
+
|
144
|
+
chat_completion_response = ChatCompletionResponse(
|
145
|
+
choices=[choice],
|
146
|
+
errors="",
|
147
|
+
completion_tokens=response.usage.output_tokens,
|
148
|
+
prompt_tokens=response.usage.input_tokens,
|
149
|
+
cache_read_input_tokens=response.usage.cache_read_input_tokens,
|
150
|
+
cache_creation_input_tokens=response.usage.cache_creation_input_tokens,
|
151
|
+
response_duration=response_duration,
|
152
|
+
)
|
153
|
+
return chat_completion_response
|
154
|
+
|
155
|
+
|
156
|
+
def create_client_callable(client_class: type[Anthropic], **client_args: Any) -> Callable[..., Any]:
|
157
|
+
"""Creates a callable that instantiates and uses an Anthropic client.
|
158
|
+
|
159
|
+
Args:
|
160
|
+
client_class: The Anthropic client class to instantiate
|
161
|
+
**client_args: Arguments to pass to the client constructor
|
162
|
+
|
163
|
+
Returns:
|
164
|
+
A callable that creates a client and returns completion results
|
165
|
+
"""
|
166
|
+
filtered_args = {k: v for k, v in client_args.items() if v is not None}
|
167
|
+
|
168
|
+
def client_callable(**kwargs: Any) -> Any:
|
169
|
+
client = client_class(**filtered_args)
|
170
|
+
completion = client.beta.messages.create(**kwargs)
|
171
|
+
return completion
|
172
|
+
|
173
|
+
return client_callable
|
174
|
+
|
175
|
+
|
176
|
+
def anthropic_client(api_key: str | None = None) -> Callable[..., Any]:
|
177
|
+
if not api_key:
|
178
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
179
|
+
client_callable = create_client_callable(Anthropic, api_key=api_key)
|
180
|
+
return client_callable
|
@@ -0,0 +1,237 @@
|
|
1
|
+
import base64
|
2
|
+
from collections.abc import Callable
|
3
|
+
import os
|
4
|
+
import time
|
5
|
+
from typing import Any
|
6
|
+
|
7
|
+
from google import genai
|
8
|
+
from google.genai import types
|
9
|
+
from google.genai.types import FunctionCall, FunctionCallingConfigMode, GenerateContentResponse
|
10
|
+
|
11
|
+
from not_again_ai.llm.chat_completion.types import (
|
12
|
+
AssistantMessage,
|
13
|
+
ChatCompletionChoice,
|
14
|
+
ChatCompletionRequest,
|
15
|
+
ChatCompletionResponse,
|
16
|
+
Function,
|
17
|
+
ImageContent,
|
18
|
+
Role,
|
19
|
+
TextContent,
|
20
|
+
ToolCall,
|
21
|
+
)
|
22
|
+
|
23
|
+
# This should be all of the options we want to support in types.GenerateContentConfig, that are not handled otherwise
|
24
|
+
GEMINI_PARAMETER_MAP = {
|
25
|
+
"max_completion_tokens": "max_output_tokens",
|
26
|
+
"temperature": "temperature",
|
27
|
+
"top_p": "top_p",
|
28
|
+
"top_k": "top_k",
|
29
|
+
}
|
30
|
+
|
31
|
+
GEMINI_FINISH_REASON_MAP = {
|
32
|
+
"STOP": "stop",
|
33
|
+
"MAX_TOKENS": "max_tokens",
|
34
|
+
"SAFETY": "safety",
|
35
|
+
"RECITATION": "recitation",
|
36
|
+
"LANGUAGE": "language",
|
37
|
+
"OTHER": "other",
|
38
|
+
"BLOCKLIST": "blocklist",
|
39
|
+
"PROHIBITED_CONTENT": "prohibited_content",
|
40
|
+
"SPII": "spii",
|
41
|
+
"MALFORMED_FUNCTION_CALL": "malformed_function_call",
|
42
|
+
"IMAGE_SAFETY": "image_safety",
|
43
|
+
}
|
44
|
+
|
45
|
+
|
46
|
+
def gemini_chat_completion(request: ChatCompletionRequest, client: Callable[..., Any]) -> ChatCompletionResponse:
|
47
|
+
"""Experimental Gemini chat completion function."""
|
48
|
+
# Handle messages
|
49
|
+
# Any system messages need to be removed from messages and concatenated into a single string (in order)
|
50
|
+
system = ""
|
51
|
+
contents = []
|
52
|
+
for message in request.messages:
|
53
|
+
if message.role == "system":
|
54
|
+
# Handle both string content and structured content
|
55
|
+
if isinstance(message.content, str):
|
56
|
+
system += message.content + "\n"
|
57
|
+
else:
|
58
|
+
# If it's a list of content parts, extract text content
|
59
|
+
for part in message.content:
|
60
|
+
if hasattr(part, "text"):
|
61
|
+
system += part.text + "\n"
|
62
|
+
elif message.role == "tool":
|
63
|
+
tool_name = message.name if message.name is not None else ""
|
64
|
+
function_response_part = types.Part.from_function_response(
|
65
|
+
name=tool_name,
|
66
|
+
response={"result": message.content},
|
67
|
+
)
|
68
|
+
contents.append(
|
69
|
+
types.Content(
|
70
|
+
role="user",
|
71
|
+
parts=[function_response_part],
|
72
|
+
)
|
73
|
+
)
|
74
|
+
elif message.role == "assistant":
|
75
|
+
if message.content and isinstance(message.content, str):
|
76
|
+
contents.append(types.Content(role="model", parts=[types.Part(text=message.content)]))
|
77
|
+
function_parts = []
|
78
|
+
if isinstance(message, AssistantMessage) and message.tool_calls:
|
79
|
+
for tool_call in message.tool_calls:
|
80
|
+
function_call_part = types.Part(
|
81
|
+
function_call=FunctionCall(
|
82
|
+
id=tool_call.id,
|
83
|
+
name=tool_call.function.name,
|
84
|
+
args=tool_call.function.arguments,
|
85
|
+
)
|
86
|
+
)
|
87
|
+
function_parts.append(function_call_part)
|
88
|
+
if function_parts:
|
89
|
+
contents.append(types.Content(role="model", parts=function_parts))
|
90
|
+
elif message.role == "user":
|
91
|
+
if isinstance(message.content, str):
|
92
|
+
contents.append(types.Content(role="user", parts=[types.Part(text=message.content)]))
|
93
|
+
elif isinstance(message.content, list):
|
94
|
+
parts = []
|
95
|
+
for part in message.content:
|
96
|
+
if isinstance(part, TextContent):
|
97
|
+
parts.append(types.Part(text=part.text))
|
98
|
+
elif isinstance(part, ImageContent):
|
99
|
+
# Extract MIME type and data from data URI
|
100
|
+
uri_parts = part.image_url.url.split(",", 1)
|
101
|
+
if len(uri_parts) == 2:
|
102
|
+
mime_type = uri_parts[0].split(":")[1].split(";")[0]
|
103
|
+
base64_data = uri_parts[1]
|
104
|
+
image_data = base64.b64decode(base64_data)
|
105
|
+
parts.append(types.Part.from_bytes(mime_type=mime_type, data=image_data))
|
106
|
+
contents.append(types.Content(role="user", parts=parts))
|
107
|
+
|
108
|
+
kwargs: dict[str, Any] = {}
|
109
|
+
kwargs["contents"] = contents
|
110
|
+
kwargs["model"] = request.model
|
111
|
+
config: dict[str, Any] = {}
|
112
|
+
config["system_instruction"] = system.rstrip()
|
113
|
+
config["automatic_function_calling"] = {"disable": True}
|
114
|
+
|
115
|
+
# Handle the possible tool choice options
|
116
|
+
if request.tool_choice:
|
117
|
+
tool_choice = request.tool_choice
|
118
|
+
if tool_choice == "auto":
|
119
|
+
config["tool_config"] = types.FunctionCallingConfig(mode=FunctionCallingConfigMode.AUTO)
|
120
|
+
elif tool_choice == "any":
|
121
|
+
config["tool_config"] = types.FunctionCallingConfig(mode=FunctionCallingConfigMode.ANY)
|
122
|
+
elif tool_choice == "none":
|
123
|
+
config["tool_config"] = types.FunctionCallingConfig(mode=FunctionCallingConfigMode.NONE)
|
124
|
+
elif isinstance(tool_choice, list):
|
125
|
+
config["tool_config"] = types.FunctionCallingConfig(
|
126
|
+
mode=FunctionCallingConfigMode.ANY, allowed_function_names=tool_choice
|
127
|
+
)
|
128
|
+
elif tool_choice not in (None, "auto", "any", "none"):
|
129
|
+
config["tool_config"] = types.FunctionCallingConfig(
|
130
|
+
mode=FunctionCallingConfigMode.ANY, allowed_function_names=[tool_choice]
|
131
|
+
)
|
132
|
+
|
133
|
+
# Handle tools
|
134
|
+
tools = []
|
135
|
+
for tool in request.tools or []:
|
136
|
+
tools.append(types.Tool(function_declarations=[tool])) # type: ignore
|
137
|
+
if tools:
|
138
|
+
config["tools"] = tools
|
139
|
+
|
140
|
+
# Everything else defined in GEMINI_PARAMETER_MAP goes into kwargs["config"]
|
141
|
+
request_kwargs = request.model_dump(mode="json", exclude_none=True)
|
142
|
+
for key, value in GEMINI_PARAMETER_MAP.items():
|
143
|
+
if value is not None and key in request_kwargs:
|
144
|
+
config[value] = request_kwargs.pop(key)
|
145
|
+
|
146
|
+
kwargs["config"] = types.GenerateContentConfig(**config)
|
147
|
+
|
148
|
+
start_time = time.time()
|
149
|
+
response: GenerateContentResponse = client(**kwargs)
|
150
|
+
end_time = time.time()
|
151
|
+
response_duration = round(end_time - start_time, 4)
|
152
|
+
|
153
|
+
finish_reason = "other"
|
154
|
+
if response.candidates and response.candidates[0].finish_reason:
|
155
|
+
finish_reason_str = str(response.candidates[0].finish_reason)
|
156
|
+
finish_reason = GEMINI_FINISH_REASON_MAP.get(finish_reason_str, "other")
|
157
|
+
|
158
|
+
tool_calls: list[ToolCall] = []
|
159
|
+
tool_call_objs = response.function_calls
|
160
|
+
if tool_call_objs:
|
161
|
+
for tool_call_obj in tool_call_objs:
|
162
|
+
tool_call_id = tool_call_obj.id if tool_call_obj.id else ""
|
163
|
+
tool_calls.append(
|
164
|
+
ToolCall(
|
165
|
+
id=tool_call_id,
|
166
|
+
function=Function(
|
167
|
+
name=tool_call_obj.name if tool_call_obj.name is not None else "",
|
168
|
+
arguments=tool_call_obj.args if tool_call_obj.args is not None else {},
|
169
|
+
),
|
170
|
+
)
|
171
|
+
)
|
172
|
+
|
173
|
+
assistant_message = ""
|
174
|
+
if (
|
175
|
+
response.candidates
|
176
|
+
and response.candidates[0].content
|
177
|
+
and response.candidates[0].content.parts
|
178
|
+
and response.candidates[0].content.parts[0].text
|
179
|
+
):
|
180
|
+
assistant_message = response.candidates[0].content.parts[0].text
|
181
|
+
|
182
|
+
choice = ChatCompletionChoice(
|
183
|
+
message=AssistantMessage(
|
184
|
+
role=Role.ASSISTANT,
|
185
|
+
content=assistant_message,
|
186
|
+
tool_calls=tool_calls,
|
187
|
+
),
|
188
|
+
finish_reason=finish_reason,
|
189
|
+
)
|
190
|
+
|
191
|
+
completion_tokens = 0
|
192
|
+
# Add null check for usage_metadata
|
193
|
+
if response.usage_metadata is not None:
|
194
|
+
if response.usage_metadata.thoughts_token_count:
|
195
|
+
completion_tokens = response.usage_metadata.thoughts_token_count
|
196
|
+
if response.usage_metadata.candidates_token_count:
|
197
|
+
completion_tokens += response.usage_metadata.candidates_token_count
|
198
|
+
|
199
|
+
# Set safe default for prompt_tokens
|
200
|
+
prompt_tokens = 0
|
201
|
+
if response.usage_metadata is not None and response.usage_metadata.prompt_token_count:
|
202
|
+
prompt_tokens = response.usage_metadata.prompt_token_count
|
203
|
+
|
204
|
+
chat_completion_response = ChatCompletionResponse(
|
205
|
+
choices=[choice],
|
206
|
+
completion_tokens=completion_tokens,
|
207
|
+
prompt_tokens=prompt_tokens,
|
208
|
+
response_duration=response_duration,
|
209
|
+
)
|
210
|
+
return chat_completion_response
|
211
|
+
|
212
|
+
|
213
|
+
def create_client_callable(client_class: type[genai.Client], **client_args: Any) -> Callable[..., Any]:
|
214
|
+
"""Creates a callable that instantiates and uses a Google genai client.
|
215
|
+
|
216
|
+
Args:
|
217
|
+
client_class: The Google genai client class to instantiate
|
218
|
+
**client_args: Arguments to pass to the client constructor
|
219
|
+
|
220
|
+
Returns:
|
221
|
+
A callable that creates a client and returns completion results
|
222
|
+
"""
|
223
|
+
filtered_args = {k: v for k, v in client_args.items() if v is not None}
|
224
|
+
|
225
|
+
def client_callable(**kwargs: Any) -> Any:
|
226
|
+
client = client_class(**filtered_args)
|
227
|
+
completion = client.models.generate_content(**kwargs)
|
228
|
+
return completion
|
229
|
+
|
230
|
+
return client_callable
|
231
|
+
|
232
|
+
|
233
|
+
def gemini_client(api_key: str | None = None) -> Callable[..., Any]:
|
234
|
+
if not api_key:
|
235
|
+
api_key = os.environ.get("GEMINI_API_KEY")
|
236
|
+
client_callable = create_client_callable(genai.Client, api_key=api_key)
|
237
|
+
return client_callable
|
@@ -138,8 +138,7 @@ class ChatCompletionRequest(BaseModel):
|
|
138
138
|
|
139
139
|
class ChatCompletionChoice(BaseModel):
|
140
140
|
message: AssistantMessage
|
141
|
-
finish_reason:
|
142
|
-
|
141
|
+
finish_reason: str
|
143
142
|
json_message: dict[str, Any] | None = Field(default=None)
|
144
143
|
logprobs: list[dict[str, Any] | list[dict[str, Any]]] | None = Field(default=None)
|
145
144
|
|
@@ -155,6 +154,8 @@ class ChatCompletionResponse(BaseModel):
|
|
155
154
|
prompt_tokens: int
|
156
155
|
completion_detailed_tokens: dict[str, int] | None = Field(default=None)
|
157
156
|
prompt_detailed_tokens: dict[str, int] | None = Field(default=None)
|
157
|
+
cache_read_input_tokens: int | None = Field(default=None)
|
158
|
+
cache_creation_input_tokens: int | None = Field(default=None)
|
158
159
|
response_duration: float
|
159
160
|
|
160
161
|
system_fingerprint: str | None = Field(default=None)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from collections.abc import Callable
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
from not_again_ai.llm.image_gen.providers.openai_api import openai_create_image
|
5
|
+
from not_again_ai.llm.image_gen.types import ImageGenRequest, ImageGenResponse
|
6
|
+
|
7
|
+
|
8
|
+
def create_image(request: ImageGenRequest, provider: str, client: Callable[..., Any]) -> ImageGenResponse:
|
9
|
+
"""Get a image response from the given provider. Currently supported providers:
|
10
|
+
- `openai` - OpenAI
|
11
|
+
- `azure_openai` - Azure OpenAI
|
12
|
+
|
13
|
+
Args:
|
14
|
+
request: Request parameter object
|
15
|
+
provider: The supported provider name
|
16
|
+
client: Client information, see the provider's implementation for what can be provided
|
17
|
+
|
18
|
+
Returns:
|
19
|
+
ImageGenResponse: The image generation response.
|
20
|
+
"""
|
21
|
+
if provider == "openai" or provider == "azure_openai":
|
22
|
+
return openai_create_image(request, client)
|
23
|
+
else:
|
24
|
+
raise ValueError(f"Provider {provider} not supported")
|
File without changes
|
@@ -0,0 +1,144 @@
|
|
1
|
+
import base64
|
2
|
+
from collections.abc import Callable
|
3
|
+
import time
|
4
|
+
from typing import Any, Literal
|
5
|
+
|
6
|
+
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
|
7
|
+
from openai import AzureOpenAI, OpenAI
|
8
|
+
from openai.types.images_response import ImagesResponse
|
9
|
+
|
10
|
+
from not_again_ai.llm.image_gen.types import ImageGenRequest, ImageGenResponse
|
11
|
+
|
12
|
+
|
13
|
+
def openai_create_image(request: ImageGenRequest, client: Callable[..., Any]) -> ImageGenResponse:
|
14
|
+
"""Create an image using OpenAI API.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
request (ImageGenRequest): The request object containing parameters for image generation.
|
18
|
+
client (Callable[..., Any]): The OpenAI client callable.
|
19
|
+
|
20
|
+
Returns:
|
21
|
+
ImageGenResponse: The response object containing the generated image and metadata.
|
22
|
+
"""
|
23
|
+
kwargs = request.model_dump(exclude_none=True)
|
24
|
+
if kwargs.get("images"):
|
25
|
+
kwargs["image"] = kwargs.pop("images", None)
|
26
|
+
|
27
|
+
start_time = time.time()
|
28
|
+
response: ImagesResponse = client(**kwargs)
|
29
|
+
end_time = time.time()
|
30
|
+
response_duration = round(end_time - start_time, 4)
|
31
|
+
|
32
|
+
images: list[bytes] = []
|
33
|
+
if response.data:
|
34
|
+
for data in response.data:
|
35
|
+
images.append(base64.b64decode(data.b64_json or ""))
|
36
|
+
|
37
|
+
input_tokens = response.usage.input_tokens if response.usage else -1
|
38
|
+
output_tokens = response.usage.output_tokens if response.usage else -1
|
39
|
+
input_tokens_details = response.usage.input_tokens_details.to_dict() if response.usage else {}
|
40
|
+
image_gen_response = ImageGenResponse(
|
41
|
+
images=images,
|
42
|
+
input_tokens=input_tokens,
|
43
|
+
output_tokens=output_tokens,
|
44
|
+
input_tokens_details=input_tokens_details,
|
45
|
+
response_duration=response_duration,
|
46
|
+
)
|
47
|
+
return image_gen_response
|
48
|
+
|
49
|
+
|
50
|
+
def create_client_callable(client_class: type[OpenAI | AzureOpenAI], **client_args: Any) -> Callable[..., Any]:
|
51
|
+
"""
|
52
|
+
Creates the correct callable depending on the parameters provided.
|
53
|
+
"""
|
54
|
+
filtered_args = {k: v for k, v in client_args.items() if v is not None}
|
55
|
+
|
56
|
+
def client_callable(**kwargs: Any) -> Any:
|
57
|
+
client = client_class(**filtered_args)
|
58
|
+
# If mask or image is not none, use client.images.edit instead of client.images.generate
|
59
|
+
if kwargs.get("mask") or kwargs.get("image"):
|
60
|
+
completion = client.images.edit(**kwargs)
|
61
|
+
else:
|
62
|
+
completion = client.images.generate(**kwargs)
|
63
|
+
return completion
|
64
|
+
|
65
|
+
return client_callable
|
66
|
+
|
67
|
+
|
68
|
+
class InvalidOAIAPITypeError(Exception):
|
69
|
+
"""Raised when an invalid OAIAPIType string is provided."""
|
70
|
+
|
71
|
+
|
72
|
+
def openai_client(
|
73
|
+
api_type: Literal["openai", "azure_openai"] = "openai",
|
74
|
+
api_key: str | None = None,
|
75
|
+
organization: str | None = None,
|
76
|
+
aoai_api_version: str = "2024-06-01",
|
77
|
+
azure_endpoint: str | None = None,
|
78
|
+
timeout: float | None = None,
|
79
|
+
max_retries: int | None = None,
|
80
|
+
) -> Callable[..., Any]:
|
81
|
+
"""Create an OpenAI or Azure OpenAI client instance based on the specified API type and other provided parameters.
|
82
|
+
|
83
|
+
It is preferred to use RBAC authentication for Azure OpenAI. You must be signed in with the Azure CLI and have correct role assigned.
|
84
|
+
See https://techcommunity.microsoft.com/t5/microsoft-developer-community/using-keyless-authentication-with-azure-openai/ba-p/4111521
|
85
|
+
|
86
|
+
Args:
|
87
|
+
api_type (str, optional): Type of the API to be used. Accepted values are 'openai' or 'azure_openai'.
|
88
|
+
Defaults to 'openai'.
|
89
|
+
api_key (str, optional): The API key to authenticate the client. If not provided,
|
90
|
+
OpenAI automatically uses `OPENAI_API_KEY` from the environment.
|
91
|
+
If provided for Azure OpenAI, it will be used for authentication instead of the Azure AD token provider.
|
92
|
+
organization (str, optional): The ID of the organization. If not provided,
|
93
|
+
OpenAI automotically uses `OPENAI_ORG_ID` from the environment.
|
94
|
+
aoai_api_version (str, optional): Only applicable if using Azure OpenAI https://learn.microsoft.com/azure/ai-services/openai/reference#rest-api-versioning
|
95
|
+
azure_endpoint (str, optional): The endpoint to use for Azure OpenAI.
|
96
|
+
timeout (float, optional): By default requests time out after 10 minutes.
|
97
|
+
max_retries (int, optional): Certain errors are automatically retried 2 times by default,
|
98
|
+
with a short exponential backoff. Connection errors (for example, due to a network connectivity problem),
|
99
|
+
408 Request Timeout, 409 Conflict, 429 Rate Limit, and >=500 Internal errors are all retried by default.
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
Callable[..., Any]: A callable that creates a client and returns completion results
|
103
|
+
|
104
|
+
|
105
|
+
Raises:
|
106
|
+
InvalidOAIAPITypeError: If an invalid API type string is provided.
|
107
|
+
NotImplementedError: If the specified API type is recognized but not yet supported (e.g., 'azure_openai').
|
108
|
+
"""
|
109
|
+
if api_type not in ["openai", "azure_openai"]:
|
110
|
+
raise InvalidOAIAPITypeError(f"Invalid OAIAPIType: {api_type}. Must be 'openai' or 'azure_openai'.")
|
111
|
+
|
112
|
+
if api_type == "openai":
|
113
|
+
return create_client_callable(
|
114
|
+
OpenAI,
|
115
|
+
api_key=api_key,
|
116
|
+
organization=organization,
|
117
|
+
timeout=timeout,
|
118
|
+
max_retries=max_retries,
|
119
|
+
)
|
120
|
+
elif api_type == "azure_openai":
|
121
|
+
if api_key:
|
122
|
+
return create_client_callable(
|
123
|
+
AzureOpenAI,
|
124
|
+
api_version=aoai_api_version,
|
125
|
+
azure_endpoint=azure_endpoint,
|
126
|
+
api_key=api_key,
|
127
|
+
timeout=timeout,
|
128
|
+
max_retries=max_retries,
|
129
|
+
)
|
130
|
+
else:
|
131
|
+
azure_credential = DefaultAzureCredential()
|
132
|
+
ad_token_provider = get_bearer_token_provider(
|
133
|
+
azure_credential, "https://cognitiveservices.azure.com/.default"
|
134
|
+
)
|
135
|
+
return create_client_callable(
|
136
|
+
AzureOpenAI,
|
137
|
+
api_version=aoai_api_version,
|
138
|
+
azure_endpoint=azure_endpoint,
|
139
|
+
azure_ad_token_provider=ad_token_provider,
|
140
|
+
timeout=timeout,
|
141
|
+
max_retries=max_retries,
|
142
|
+
)
|
143
|
+
else:
|
144
|
+
raise NotImplementedError(f"API type '{api_type}' is invalid.")
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
from pydantic import BaseModel, Field
|
5
|
+
|
6
|
+
|
7
|
+
class ImageGenRequest(BaseModel):
|
8
|
+
prompt: str
|
9
|
+
model: str
|
10
|
+
images: list[Path] | None = Field(default=None)
|
11
|
+
mask: Path | None = Field(default=None)
|
12
|
+
n: int = Field(default=1)
|
13
|
+
quality: str | None = Field(default=None)
|
14
|
+
size: str | None = Field(default=None)
|
15
|
+
background: str | None = Field(default=None)
|
16
|
+
moderation: str | None = Field(default=None)
|
17
|
+
|
18
|
+
|
19
|
+
class ImageGenResponse(BaseModel):
|
20
|
+
images: list[bytes]
|
21
|
+
input_tokens: int
|
22
|
+
output_tokens: int
|
23
|
+
response_duration: float
|
24
|
+
input_tokens_details: dict[str, Any] | None = Field(default=None)
|
@@ -5,7 +5,7 @@ import mimetypes
|
|
5
5
|
from pathlib import Path
|
6
6
|
from typing import Any
|
7
7
|
|
8
|
-
from liquid import
|
8
|
+
from liquid import render
|
9
9
|
from openai.lib._pydantic import to_strict_json_schema
|
10
10
|
from pydantic import BaseModel
|
11
11
|
|
@@ -15,7 +15,7 @@ from not_again_ai.llm.chat_completion.types import MessageT
|
|
15
15
|
def _apply_templates(value: Any, variables: dict[str, str]) -> Any:
|
16
16
|
"""Recursively applies Liquid templating to all string fields within the given value."""
|
17
17
|
if isinstance(value, str):
|
18
|
-
return
|
18
|
+
return render(value, **variables)
|
19
19
|
elif isinstance(value, list):
|
20
20
|
return [_apply_templates(item, variables) for item in value]
|
21
21
|
elif isinstance(value, dict):
|
@@ -31,7 +31,7 @@ def _apply_templates(value: Any, variables: dict[str, str]) -> Any:
|
|
31
31
|
|
32
32
|
def compile_messages(messages: Sequence[MessageT], variables: dict[str, str]) -> Sequence[MessageT]:
|
33
33
|
"""Compiles messages using Liquid templating and the provided variables.
|
34
|
-
Calls
|
34
|
+
Calls render(content_part, **variables) on each text content part.
|
35
35
|
|
36
36
|
Args:
|
37
37
|
messages: List of MessageT where content can contain Liquid templates.
|
@@ -1,11 +1,13 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: not-again-ai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.19.0
|
4
4
|
Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
5
|
+
Project-URL: Homepage, https://github.com/DaveCoDev/not-again-ai
|
6
|
+
Project-URL: Documentation, https://davecodev.github.io/not-again-ai/
|
7
|
+
Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
|
8
|
+
Author-email: DaveCoDev <dave.co.dev@gmail.com>
|
9
|
+
License-Expression: MIT
|
10
|
+
License-File: LICENSE
|
9
11
|
Classifier: Development Status :: 3 - Alpha
|
10
12
|
Classifier: Intended Audience :: Developers
|
11
13
|
Classifier: Intended Audience :: Science/Research
|
@@ -16,52 +18,56 @@ Classifier: Programming Language :: Python :: 3
|
|
16
18
|
Classifier: Programming Language :: Python :: 3.11
|
17
19
|
Classifier: Programming Language :: Python :: 3.12
|
18
20
|
Classifier: Typing :: Typed
|
21
|
+
Requires-Python: >=3.11
|
22
|
+
Requires-Dist: loguru<1.0,>=0.7
|
23
|
+
Requires-Dist: pydantic<3.0,>=2.11
|
19
24
|
Provides-Extra: data
|
25
|
+
Requires-Dist: playwright<2.0,>=1.51; extra == 'data'
|
26
|
+
Requires-Dist: pytest-playwright<1.0,>=0.7; extra == 'data'
|
20
27
|
Provides-Extra: llm
|
28
|
+
Requires-Dist: anthropic<1.0,>=0.50; extra == 'llm'
|
29
|
+
Requires-Dist: azure-identity<2.0,>=1.21; extra == 'llm'
|
30
|
+
Requires-Dist: google-genai<2.0,>1.12; extra == 'llm'
|
31
|
+
Requires-Dist: ollama<1.0,>=0.4; extra == 'llm'
|
32
|
+
Requires-Dist: openai<2.0,>=1.76; extra == 'llm'
|
33
|
+
Requires-Dist: python-liquid<3.0,>=2.0; extra == 'llm'
|
34
|
+
Requires-Dist: tiktoken<1.0,>=0.9; extra == 'llm'
|
21
35
|
Provides-Extra: statistics
|
36
|
+
Requires-Dist: numpy<3.0,>=2.2; extra == 'statistics'
|
37
|
+
Requires-Dist: scikit-learn<2.0,>=1.6; extra == 'statistics'
|
38
|
+
Requires-Dist: scipy>=1.15; extra == 'statistics'
|
22
39
|
Provides-Extra: viz
|
23
|
-
Requires-Dist:
|
24
|
-
Requires-Dist:
|
25
|
-
Requires-Dist:
|
26
|
-
Requires-Dist: numpy (>=2.2) ; extra == "viz"
|
27
|
-
Requires-Dist: ollama (>=0.4) ; extra == "llm"
|
28
|
-
Requires-Dist: openai (>=1) ; extra == "llm"
|
29
|
-
Requires-Dist: pandas (>=2.2) ; extra == "viz"
|
30
|
-
Requires-Dist: playwright (>=1.50) ; extra == "data"
|
31
|
-
Requires-Dist: pydantic (>=2.10)
|
32
|
-
Requires-Dist: pytest-playwright (>=0.7) ; extra == "data"
|
33
|
-
Requires-Dist: python-liquid (>=1.12) ; extra == "llm"
|
34
|
-
Requires-Dist: scikit-learn (>=1.6) ; extra == "statistics"
|
35
|
-
Requires-Dist: scipy (>=1.15) ; extra == "statistics"
|
36
|
-
Requires-Dist: seaborn (>=0.13) ; extra == "viz"
|
37
|
-
Requires-Dist: tiktoken (>=0.8) ; extra == "llm"
|
38
|
-
Project-URL: Documentation, https://davecodev.github.io/not-again-ai/
|
39
|
-
Project-URL: Homepage, https://github.com/DaveCoDev/not-again-ai
|
40
|
-
Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
|
40
|
+
Requires-Dist: numpy<3.0,>=2.2; extra == 'viz'
|
41
|
+
Requires-Dist: pandas<3.0,>=2.2; extra == 'viz'
|
42
|
+
Requires-Dist: seaborn<1.0,>=0.13; extra == 'viz'
|
41
43
|
Description-Content-Type: text/markdown
|
42
44
|
|
43
45
|
# not-again-ai
|
44
46
|
|
45
47
|
[![GitHub Actions][github-actions-badge]](https://github.com/johnthagen/python-blueprint/actions)
|
46
|
-
[![
|
48
|
+
[![uv][uv-badge]](https://github.com/astral-sh/uv)
|
47
49
|
[![Nox][nox-badge]](https://github.com/wntrblm/nox)
|
48
50
|
[![Ruff][ruff-badge]](https://github.com/astral-sh/ruff)
|
49
51
|
[![Type checked with mypy][mypy-badge]](https://mypy-lang.org/)
|
50
52
|
|
51
53
|
[github-actions-badge]: https://github.com/johnthagen/python-blueprint/workflows/python/badge.svg
|
52
|
-
[
|
54
|
+
[uv-badge]: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/uv/main/assets/badge/v0.json
|
53
55
|
[nox-badge]: https://img.shields.io/badge/%F0%9F%A6%8A-Nox-D85E00.svg
|
54
56
|
[black-badge]: https://img.shields.io/badge/code%20style-black-000000.svg
|
55
57
|
[ruff-badge]: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
|
56
58
|
[mypy-badge]: https://www.mypy-lang.org/static/mypy_badge.svg
|
57
59
|
|
58
|
-
**not-again-ai** is a collection of various building blocks that come up over and over again when developing AI products.
|
60
|
+
**not-again-ai** is a collection of various building blocks that come up over and over again when developing AI products.
|
61
|
+
The key goals of this package are to have simple, yet flexible interfaces and to minimize dependencies.
|
62
|
+
It is encouraged to also **a)** use this as a template for your own Python package.
|
63
|
+
**b)** instead of installing the package, copy and paste functions into your own projects.
|
64
|
+
We make this easier by limiting the number of dependencies and use an MIT license.
|
59
65
|
|
60
|
-
**Documentation** available within individual **[notebooks](notebooks)
|
66
|
+
**Documentation** available within individual **[notebooks](notebooks)** or docstrings within the source code.
|
61
67
|
|
62
68
|
# Installation
|
63
69
|
|
64
|
-
Requires: Python 3.11, or 3.12
|
70
|
+
Requires: Python 3.11, or 3.12 which can be installed with [uv](https://docs.astral.sh/uv/getting-started/installation/) by running the command `uv python install 3.12`
|
65
71
|
|
66
72
|
Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
|
67
73
|
|
@@ -111,52 +117,35 @@ The package is split into subpackages, so you can install only the parts you nee
|
|
111
117
|
|
112
118
|
# Development Information
|
113
119
|
|
114
|
-
|
120
|
+
This package uses [uv](https://docs.astral.sh/uv/) to manage dependencies and
|
121
|
+
isolated [Python virtual environments](https://docs.python.org/3/library/venv.html).
|
115
122
|
|
116
|
-
|
117
|
-
|
118
|
-
|
123
|
+
To proceed,
|
124
|
+
[install uv globally](https://docs.astral.sh/uv/getting-started/installation/)
|
125
|
+
onto your system.
|
119
126
|
|
120
|
-
|
127
|
+
To install a specific version of Python:
|
121
128
|
|
122
|
-
```
|
123
|
-
|
124
|
-
```
|
125
|
-
|
126
|
-
(Optional) configure Poetry to use an in-project virtual environment.
|
127
|
-
```bash
|
128
|
-
$ poetry config virtualenvs.in-project true
|
129
|
+
```shell
|
130
|
+
uv python install 3.12
|
129
131
|
```
|
130
132
|
|
131
133
|
## Dependencies
|
132
134
|
|
133
135
|
Dependencies are defined in [`pyproject.toml`](./pyproject.toml) and specific versions are locked
|
134
|
-
into [`
|
136
|
+
into [`uv.lock`](./uv.lock). This allows for exact reproducible environments across
|
135
137
|
all machines that use the project, both during development and in production.
|
136
138
|
|
137
|
-
To
|
138
|
-
|
139
|
-
```bash
|
140
|
-
$ poetry update
|
141
|
-
```
|
142
|
-
|
143
|
-
To install all dependencies (with all extra dependencies) into an isolated virtual environment:
|
144
|
-
|
145
|
-
```bash
|
146
|
-
$ poetry sync --all-extras
|
147
|
-
```
|
139
|
+
To install all dependencies into an isolated virtual environment:
|
148
140
|
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
```bash
|
153
|
-
$ poetry shell
|
141
|
+
```shell
|
142
|
+
uv sync --all-extras --all-groups
|
154
143
|
```
|
155
144
|
|
156
|
-
To
|
145
|
+
To upgrade all dependencies to their latest versions:
|
157
146
|
|
158
|
-
```
|
159
|
-
|
147
|
+
```shell
|
148
|
+
uv lock --upgrade
|
160
149
|
```
|
161
150
|
|
162
151
|
## Packaging
|
@@ -164,48 +153,40 @@ To deactivate the environment:
|
|
164
153
|
This project is designed as a Python package, meaning that it can be bundled up and redistributed
|
165
154
|
as a single compressed file.
|
166
155
|
|
167
|
-
Packaging is configured by
|
168
|
-
|
169
|
-
- [`pyproject.toml`](./pyproject.toml)
|
156
|
+
Packaging is configured by the [`pyproject.toml`](./pyproject.toml).
|
170
157
|
|
171
158
|
To package the project as both a
|
172
159
|
[source distribution](https://packaging.python.org/en/latest/flow/#the-source-distribution-sdist) and
|
173
160
|
a [wheel](https://packaging.python.org/en/latest/specifications/binary-distribution-format/):
|
174
161
|
|
175
162
|
```bash
|
176
|
-
$
|
163
|
+
$ uv build
|
177
164
|
```
|
178
165
|
|
179
166
|
This will generate `dist/not-again-ai-<version>.tar.gz` and `dist/not_again_ai-<version>-py3-none-any.whl`.
|
180
167
|
|
181
|
-
Read more about the [advantages of wheels](https://pythonwheels.com/) to understand why generating
|
182
|
-
wheel distributions are important.
|
183
168
|
|
184
169
|
## Publish Distributions to PyPI
|
185
170
|
|
186
171
|
Source and wheel redistributable packages can
|
187
|
-
be [published to PyPI](https://
|
172
|
+
be [published to PyPI](https://docs.astral.sh/uv/guides/package/) or installed
|
188
173
|
directly from the filesystem using `pip`.
|
189
174
|
|
190
|
-
```
|
191
|
-
|
175
|
+
```shell
|
176
|
+
uv publish
|
192
177
|
```
|
193
178
|
|
194
179
|
# Enforcing Code Quality
|
195
180
|
|
196
|
-
Automated code quality checks are performed using
|
197
|
-
|
198
|
-
[`
|
199
|
-
|
200
|
-
guide checking, type checking and documentation generation.
|
201
|
-
|
202
|
-
> Note: `nox` is installed into the virtual environment automatically by the `poetry sync`
|
203
|
-
> command above. Run `poetry shell` to activate the virtual environment.
|
181
|
+
Automated code quality checks are performed using [Nox](https://nox.thea.codes/en/stable/). Nox
|
182
|
+
will automatically create virtual environments and run commands based on
|
183
|
+
[`noxfile.py`](./noxfile.py) for unit testing, PEP 8 style guide checking, type checking and
|
184
|
+
documentation generation.
|
204
185
|
|
205
186
|
To run all default sessions:
|
206
187
|
|
207
|
-
```
|
208
|
-
|
188
|
+
```shell
|
189
|
+
uv run nox
|
209
190
|
```
|
210
191
|
|
211
192
|
## Unit Testing
|
@@ -237,7 +218,7 @@ pytest and code coverage are configured in [`pyproject.toml`](./pyproject.toml).
|
|
237
218
|
To run selected tests:
|
238
219
|
|
239
220
|
```bash
|
240
|
-
(.venv) $ nox -s test -- -k "test_web"
|
221
|
+
(.venv) $ uv run nox -s test -- -k "test_web"
|
241
222
|
```
|
242
223
|
|
243
224
|
## Code Style Checking
|
@@ -251,13 +232,13 @@ code. PEP 8 code compliance is verified using [Ruff][Ruff]. Ruff is configured i
|
|
251
232
|
To lint code, run:
|
252
233
|
|
253
234
|
```bash
|
254
|
-
(.venv) $ nox -s lint
|
235
|
+
(.venv) $ uv run nox -s lint
|
255
236
|
```
|
256
237
|
|
257
238
|
To automatically fix fixable lint errors, run:
|
258
239
|
|
259
240
|
```bash
|
260
|
-
(.venv) $ nox -s lint_fix
|
241
|
+
(.venv) $ uv run nox -s lint_fix
|
261
242
|
```
|
262
243
|
|
263
244
|
## Automated Code Formatting
|
@@ -267,13 +248,13 @@ To automatically fix fixable lint errors, run:
|
|
267
248
|
To automatically format code, run:
|
268
249
|
|
269
250
|
```bash
|
270
|
-
(.venv) $ nox -s fmt
|
251
|
+
(.venv) $ uv run nox -s fmt
|
271
252
|
```
|
272
253
|
|
273
254
|
To verify code has been formatted, such as in a CI job:
|
274
255
|
|
275
256
|
```bash
|
276
|
-
(.venv) $ nox -s fmt_check
|
257
|
+
(.venv) $ uv run nox -s fmt_check
|
277
258
|
```
|
278
259
|
|
279
260
|
## Type Checking
|
@@ -293,11 +274,9 @@ def factorial(n: int) -> int:
|
|
293
274
|
mypy is configured in [`pyproject.toml`](./pyproject.toml). To type check code, run:
|
294
275
|
|
295
276
|
```bash
|
296
|
-
(.venv) $ nox -s type_check
|
277
|
+
(.venv) $ uv run nox -s type_check
|
297
278
|
```
|
298
279
|
|
299
|
-
See also [awesome-python-typing](https://github.com/typeddjango/awesome-python-typing).
|
300
|
-
|
301
280
|
### Distributing Type Annotations
|
302
281
|
|
303
282
|
[PEP 561](https://www.python.org/dev/peps/pep-0561/) defines how a Python package should
|
@@ -313,7 +292,7 @@ installed package to indicate that inline type annotations should be checked.
|
|
313
292
|
Check for typos using [typos](https://github.com/crate-ci/typos)
|
314
293
|
|
315
294
|
```bash
|
316
|
-
(.venv) $ nox -s typos
|
295
|
+
(.venv) $ uv run nox -s typos
|
317
296
|
```
|
318
297
|
|
319
298
|
## Continuous Integration
|
@@ -331,81 +310,5 @@ Install the [Ruff extension](https://marketplace.visualstudio.com/items?itemName
|
|
331
310
|
|
332
311
|
Default settings are configured in [`.vscode/settings.json`](./.vscode/settings.json) which will enable Ruff with consistent settings.
|
333
312
|
|
334
|
-
# Generating Documentation
|
335
|
-
|
336
|
-
## Generating a User Guide
|
337
|
-
|
338
|
-
[Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) is a powerful static site
|
339
|
-
generator that combines easy-to-write Markdown, with a number of Markdown extensions that increase
|
340
|
-
the power of Markdown. This makes it a great fit for user guides and other technical documentation.
|
341
|
-
|
342
|
-
The example MkDocs project included in this project is configured to allow the built documentation
|
343
|
-
to be hosted at any URL or viewed offline from the file system.
|
344
|
-
|
345
|
-
To build the user guide, run,
|
346
|
-
|
347
|
-
```bash
|
348
|
-
(.venv) $ nox -s docs
|
349
|
-
```
|
350
|
-
|
351
|
-
and open `docs/user_guide/site/index.html` using a web browser.
|
352
|
-
|
353
|
-
To build the user guide, additionally validating external URLs, run:
|
354
|
-
|
355
|
-
```bash
|
356
|
-
(.venv) $ nox -s docs_check_urls
|
357
|
-
```
|
358
|
-
|
359
|
-
To build the user guide in a format suitable for viewing directly from the file system, run:
|
360
|
-
|
361
|
-
```bash
|
362
|
-
(.venv) $ nox -s docs_offline
|
363
|
-
```
|
364
|
-
|
365
|
-
To build and serve the user guide with automatic rebuilding as you change the contents,
|
366
|
-
run:
|
367
|
-
|
368
|
-
```bash
|
369
|
-
(.venv) $ nox -s docs_serve
|
370
|
-
```
|
371
|
-
|
372
|
-
and open <http://127.0.0.1:8000> in a browser.
|
373
|
-
|
374
|
-
Each time the `main` Git branch is updated, the
|
375
|
-
[`.github/workflows/pages.yml`](.github/workflows/pages.yml) GitHub Action will
|
376
|
-
automatically build the user guide and publish it to [GitHub Pages](https://pages.github.com/).
|
377
|
-
This is configured in the `docs_github_pages` Nox session.
|
378
|
-
|
379
|
-
## Generating API Documentation
|
380
|
-
|
381
|
-
This project uses [mkdocstrings](https://github.com/mkdocstrings/mkdocstrings) plugin for
|
382
|
-
MkDocs, which renders
|
383
|
-
[Google-style docstrings](https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html)
|
384
|
-
into an MkDocs project. Google-style docstrings provide a good mix of easy-to-read docstrings in
|
385
|
-
code as well as nicely-rendered output.
|
386
|
-
|
387
|
-
```python
|
388
|
-
"""Computes the factorial through a recursive algorithm.
|
389
|
-
|
390
|
-
Args:
|
391
|
-
n: A positive input value.
|
392
|
-
|
393
|
-
Raises:
|
394
|
-
InvalidFactorialError: If n is less than 0.
|
395
|
-
|
396
|
-
Returns:
|
397
|
-
Computed factorial.
|
398
|
-
"""
|
399
|
-
```
|
400
|
-
|
401
|
-
## Misc
|
402
|
-
|
403
|
-
If you get a `Failed to create the collection: Prompt dismissed..` error when running `poetry update` on Ubuntu, try setting the following environment variable:
|
404
|
-
|
405
|
-
```bash
|
406
|
-
export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring
|
407
|
-
```
|
408
|
-
|
409
313
|
# Attributions
|
410
314
|
[python-blueprint](https://github.com/johnthagen/python-blueprint) for the Python package skeleton.
|
411
|
-
|
@@ -1,29 +1,36 @@
|
|
1
1
|
not_again_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
|
2
3
|
not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
4
|
not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
|
4
5
|
not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
|
5
6
|
not_again_ai/data/__init__.py,sha256=1jF6mwvtB2PT7IEc3xpbRtZm3g3Lyf8zUqH4AEE4qlQ,244
|
6
7
|
not_again_ai/data/web.py,sha256=wjx9cc33jcoJBGonYCIpwygPBFOwz7F-dx_ominmbnI,1838
|
7
|
-
not_again_ai/llm/__init__.py,sha256=
|
8
|
+
not_again_ai/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
9
|
not_again_ai/llm/chat_completion/__init__.py,sha256=HozawvdRkTFgq8XR16GJUHN1ukEa4Ya68wVPVrl-afs,250
|
9
|
-
not_again_ai/llm/chat_completion/interface.py,sha256=
|
10
|
+
not_again_ai/llm/chat_completion/interface.py,sha256=OU6ghG7RlveahkHZWdRHFg0uzbSrSh2Dz7u5-4rrypA,2700
|
11
|
+
not_again_ai/llm/chat_completion/types.py,sha256=0pBo1Fgm__JU3NyMShGouAIolcANPpTfXn8WJHODlQw,5472
|
10
12
|
not_again_ai/llm/chat_completion/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
+
not_again_ai/llm/chat_completion/providers/anthropic_api.py,sha256=Eix3_GgQvDyPr6pKfSDrfqRg_bTb0pqMI8fdQrx9e84,6211
|
14
|
+
not_again_ai/llm/chat_completion/providers/gemini_api.py,sha256=ovyTssfN3achMr2Laa2Hu557CjYN6o7UkO5IGXg6lzk,9461
|
11
15
|
not_again_ai/llm/chat_completion/providers/ollama_api.py,sha256=Puo2eE2VynvZOoqrUlNYtPgRGCRMVa8syc3TfBxS1hs,10661
|
12
16
|
not_again_ai/llm/chat_completion/providers/openai_api.py,sha256=1wdeV50KYX_KIf2uofsICKYoHVSvj4kTRpS1Vuw3NSQ,17887
|
13
|
-
not_again_ai/llm/chat_completion/types.py,sha256=yjSrcR9N5hrrMQAjzNvRIfQXQ-lVRgZfrIoKuhMbmjo,5399
|
14
17
|
not_again_ai/llm/embedding/__init__.py,sha256=wscUfROukvw0M0vYccfaVTdXV0P-eICAT5mqM0LaHHc,182
|
15
18
|
not_again_ai/llm/embedding/interface.py,sha256=Hj3UiktXEeCUeMwpIDtRkwBfKgaJSnJvclLNyjwUAtE,1144
|
19
|
+
not_again_ai/llm/embedding/types.py,sha256=J4FFLx35Aow2kOaafDReeY9cUNqhWMjaAk5gXkX7SVk,506
|
16
20
|
not_again_ai/llm/embedding/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
17
21
|
not_again_ai/llm/embedding/providers/ollama_api.py,sha256=m-OCis9WAUT2baGsGVPzejlive40eSNyO6tHmPh6joM,3201
|
18
22
|
not_again_ai/llm/embedding/providers/openai_api.py,sha256=JFFqbq0O5snIEnr9VESdp5xehikQBPbs7nwyE6acFsY,5441
|
19
|
-
not_again_ai/llm/
|
23
|
+
not_again_ai/llm/image_gen/__init__.py,sha256=v31PgYdTxMQRRxXPFl40BW5Y8RSHrZuwabuD-yC9gfI,170
|
24
|
+
not_again_ai/llm/image_gen/interface.py,sha256=XGE0aDvQwe-aWRuGNLMECO6KnMiK8qLv2APvr0hZ0tY,930
|
25
|
+
not_again_ai/llm/image_gen/types.py,sha256=Qhdov5azWwmmbqqE3Ln7t-Fb_Ipyp8r3z0_80omyASc,672
|
26
|
+
not_again_ai/llm/image_gen/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
|
+
not_again_ai/llm/image_gen/providers/openai_api.py,sha256=3IEhdId1UU_imvDebytTt0dSCjEEyPHf2o4vVt6RqSE,6198
|
20
28
|
not_again_ai/llm/prompting/__init__.py,sha256=7YnHro1yH01FLGnao27WyqQDFjNYf9npE5UxoR9YrUU,84
|
21
|
-
not_again_ai/llm/prompting/compile_prompt.py,sha256=
|
29
|
+
not_again_ai/llm/prompting/compile_prompt.py,sha256=uBn655yTiQ325z1CUgnkU2k7ICIvaYRJOm50B7w2lSs,4683
|
22
30
|
not_again_ai/llm/prompting/interface.py,sha256=SMKYabmu3zTWbEDukU6aLU_JQ88apeBWWOF_qZ0s3ww,1783
|
31
|
+
not_again_ai/llm/prompting/types.py,sha256=xz70dnawL9rji7Zr1_mOekY-uUlvKJJf7k9nXJsOXc4,1219
|
23
32
|
not_again_ai/llm/prompting/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
33
|
not_again_ai/llm/prompting/providers/openai_tiktoken.py,sha256=8YrEiK3ZHyKVGiVsJ_Rd6eVdISIvcub7ooj-HB7Prsc,4536
|
25
|
-
not_again_ai/llm/prompting/types.py,sha256=xz70dnawL9rji7Zr1_mOekY-uUlvKJJf7k9nXJsOXc4,1219
|
26
|
-
not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
|
27
34
|
not_again_ai/statistics/__init__.py,sha256=gA8r9JQFbFSN0ykrHy4G1IQgcky4f2eM5Oo24oVI5Ik,466
|
28
35
|
not_again_ai/statistics/dependence.py,sha256=4xaniMkLlTjdXcNVXdwepEAiZ-WaaGYfR9haJC1lU2Q,4434
|
29
36
|
not_again_ai/viz/__init__.py,sha256=MeaWae_QRbDEHJ4MWYoY1-Ad6S0FhSDaRhQncS2cpSc,447
|
@@ -32,7 +39,7 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
|
|
32
39
|
not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
|
33
40
|
not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
|
34
41
|
not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
|
35
|
-
not_again_ai-0.
|
36
|
-
not_again_ai-0.
|
37
|
-
not_again_ai-0.
|
38
|
-
not_again_ai-0.
|
42
|
+
not_again_ai-0.19.0.dist-info/METADATA,sha256=LeCIas912YMtvKEJcChoPqYrM3ay_EurUZehvlQ9t8o,12004
|
43
|
+
not_again_ai-0.19.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
44
|
+
not_again_ai-0.19.0.dist-info/licenses/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
|
45
|
+
not_again_ai-0.19.0.dist-info/RECORD,,
|
File without changes
|