not-again-ai 0.14.0__py3-none-any.whl → 0.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- not_again_ai/llm/chat_completion/__init__.py +4 -0
- not_again_ai/llm/chat_completion/interface.py +32 -0
- not_again_ai/llm/chat_completion/providers/ollama_api.py +227 -0
- not_again_ai/llm/chat_completion/providers/openai_api.py +290 -0
- not_again_ai/llm/chat_completion/types.py +145 -0
- not_again_ai/llm/embedding/__init__.py +4 -0
- not_again_ai/llm/embedding/interface.py +28 -0
- not_again_ai/llm/embedding/providers/ollama_api.py +87 -0
- not_again_ai/llm/embedding/providers/openai_api.py +126 -0
- not_again_ai/llm/embedding/types.py +23 -0
- not_again_ai/llm/prompting/__init__.py +3 -0
- not_again_ai/llm/prompting/compile_prompt.py +125 -0
- not_again_ai/llm/prompting/interface.py +46 -0
- not_again_ai/llm/prompting/providers/openai_tiktoken.py +122 -0
- not_again_ai/llm/prompting/types.py +43 -0
- {not_again_ai-0.14.0.dist-info → not_again_ai-0.16.0.dist-info}/METADATA +24 -40
- not_again_ai-0.16.0.dist-info/RECORD +38 -0
- {not_again_ai-0.14.0.dist-info → not_again_ai-0.16.0.dist-info}/WHEEL +1 -1
- not_again_ai/llm/gh_models/azure_ai_client.py +0 -20
- not_again_ai/llm/gh_models/chat_completion.py +0 -81
- not_again_ai/llm/openai_api/chat_completion.py +0 -339
- not_again_ai/llm/openai_api/context_management.py +0 -70
- not_again_ai/llm/openai_api/embeddings.py +0 -62
- not_again_ai/llm/openai_api/openai_client.py +0 -78
- not_again_ai/llm/openai_api/prompts.py +0 -191
- not_again_ai/llm/openai_api/tokens.py +0 -184
- not_again_ai/local_llm/__init__.py +0 -27
- not_again_ai/local_llm/chat_completion.py +0 -105
- not_again_ai/local_llm/huggingface/chat_completion.py +0 -59
- not_again_ai/local_llm/huggingface/helpers.py +0 -23
- not_again_ai/local_llm/ollama/__init__.py +0 -0
- not_again_ai/local_llm/ollama/chat_completion.py +0 -111
- not_again_ai/local_llm/ollama/model_mapping.py +0 -17
- not_again_ai/local_llm/ollama/ollama_client.py +0 -24
- not_again_ai/local_llm/ollama/service.py +0 -81
- not_again_ai/local_llm/ollama/tokens.py +0 -104
- not_again_ai/local_llm/prompts.py +0 -38
- not_again_ai/local_llm/tokens.py +0 -90
- not_again_ai-0.14.0.dist-info/RECORD +0 -44
- not_again_ai-0.14.0.dist-info/entry_points.txt +0 -3
- /not_again_ai/llm/{gh_models → chat_completion/providers}/__init__.py +0 -0
- /not_again_ai/llm/{openai_api → embedding/providers}/__init__.py +0 -0
- /not_again_ai/{local_llm/huggingface → llm/prompting/providers}/__init__.py +0 -0
- {not_again_ai-0.14.0.dist-info → not_again_ai-0.16.0.dist-info}/LICENSE +0 -0
@@ -0,0 +1,122 @@
|
|
1
|
+
from collections.abc import Collection, Set
|
2
|
+
from typing import Literal
|
3
|
+
|
4
|
+
from loguru import logger
|
5
|
+
import tiktoken
|
6
|
+
|
7
|
+
from not_again_ai.llm.chat_completion.types import MessageT
|
8
|
+
from not_again_ai.llm.prompting.types import BaseTokenizer
|
9
|
+
|
10
|
+
|
11
|
+
class TokenizerOpenAI(BaseTokenizer):
|
12
|
+
def __init__(
|
13
|
+
self,
|
14
|
+
model: str,
|
15
|
+
provider: str = "openai",
|
16
|
+
allowed_special: Literal["all"] | Set[str] | None = None,
|
17
|
+
disallowed_special: Literal["all"] | Collection[str] | None = None,
|
18
|
+
):
|
19
|
+
self.model = model
|
20
|
+
self.provider = provider
|
21
|
+
self.allowed_special = allowed_special
|
22
|
+
self.disallowed_special = disallowed_special
|
23
|
+
|
24
|
+
self.init_tokenizer(model, provider, allowed_special, disallowed_special)
|
25
|
+
|
26
|
+
def init_tokenizer(
|
27
|
+
self,
|
28
|
+
model: str,
|
29
|
+
provider: str = "openai",
|
30
|
+
allowed_special: Literal["all"] | Set[str] | None = None,
|
31
|
+
disallowed_special: Literal["all"] | Collection[str] | None = None,
|
32
|
+
) -> None:
|
33
|
+
try:
|
34
|
+
self.encoding = tiktoken.encoding_for_model(model)
|
35
|
+
except KeyError:
|
36
|
+
default_encoding = "o200k_base"
|
37
|
+
logger.warning(f"Model {model} not found. Using {default_encoding} encoding.")
|
38
|
+
self.encoding = tiktoken.get_encoding(default_encoding)
|
39
|
+
|
40
|
+
# Set defaults if not provided
|
41
|
+
if not allowed_special:
|
42
|
+
self.allowed_special = set()
|
43
|
+
if not disallowed_special:
|
44
|
+
self.disallowed_special = ()
|
45
|
+
|
46
|
+
def truncate_str(self, text: str, max_len: int) -> str:
|
47
|
+
tokens = self.encoding.encode(
|
48
|
+
text,
|
49
|
+
allowed_special=self.allowed_special if self.allowed_special is not None else set(),
|
50
|
+
disallowed_special=self.disallowed_special if self.disallowed_special is not None else (),
|
51
|
+
)
|
52
|
+
if len(tokens) > max_len:
|
53
|
+
tokens = tokens[:max_len]
|
54
|
+
truncated_text = self.encoding.decode(tokens)
|
55
|
+
return truncated_text
|
56
|
+
else:
|
57
|
+
return text
|
58
|
+
|
59
|
+
def num_tokens_in_str(self, text: str) -> int:
|
60
|
+
return len(
|
61
|
+
self.encoding.encode(
|
62
|
+
text,
|
63
|
+
allowed_special=self.allowed_special if self.allowed_special is not None else set(),
|
64
|
+
disallowed_special=self.disallowed_special if self.disallowed_special is not None else (),
|
65
|
+
)
|
66
|
+
)
|
67
|
+
|
68
|
+
def num_tokens_in_messages(self, messages: list[MessageT]) -> int:
|
69
|
+
if self.model in {
|
70
|
+
"gpt-3.5-turbo-0613",
|
71
|
+
"gpt-3.5-turbo-16k-0613",
|
72
|
+
"gpt-3.5-turbo-1106",
|
73
|
+
"gpt-3.5-turbo-0125",
|
74
|
+
"gpt-4-0314",
|
75
|
+
"gpt-4-32k-0314",
|
76
|
+
"gpt-4-0613",
|
77
|
+
"gpt-4-32k-0613",
|
78
|
+
"gpt-4-1106-preview",
|
79
|
+
"gpt-4-turbo-preview",
|
80
|
+
"gpt-4-0125-preview",
|
81
|
+
"gpt-4-turbo",
|
82
|
+
"gpt-4-turbo-2024-04-09",
|
83
|
+
"gpt-4o",
|
84
|
+
"gpt-4o-2024-05-13",
|
85
|
+
"gpt-4o-2024-08-06",
|
86
|
+
"gpt-4o-2024-11-20",
|
87
|
+
"gpt-4o-mini",
|
88
|
+
"gpt-4o-mini-2024-07-18",
|
89
|
+
"o1",
|
90
|
+
"o1-2024-12-17",
|
91
|
+
"o1-mini",
|
92
|
+
"o1-mini-2024-09-12",
|
93
|
+
"o1-preview",
|
94
|
+
"o1-preview-2024-09-12",
|
95
|
+
}:
|
96
|
+
tokens_per_message = 3 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
97
|
+
tokens_per_name = 1 # if there's a name, the role is omitted
|
98
|
+
elif self.model == "gpt-3.5-turbo-0301":
|
99
|
+
tokens_per_message = 4
|
100
|
+
tokens_per_name = -1
|
101
|
+
else:
|
102
|
+
logger.warning(f"Model {self.model} not supported. Assuming gpt-4o encoding.")
|
103
|
+
tokens_per_message = 3
|
104
|
+
tokens_per_name = 1
|
105
|
+
|
106
|
+
num_tokens = 0
|
107
|
+
for message in messages:
|
108
|
+
num_tokens += tokens_per_message
|
109
|
+
message_dict = message.model_dump(exclude_none=True)
|
110
|
+
for key, value in message_dict.items():
|
111
|
+
if isinstance(value, str):
|
112
|
+
num_tokens += len(
|
113
|
+
self.encoding.encode(
|
114
|
+
value,
|
115
|
+
allowed_special=self.allowed_special if self.allowed_special is not None else set(),
|
116
|
+
disallowed_special=self.disallowed_special if self.disallowed_special is not None else (),
|
117
|
+
)
|
118
|
+
)
|
119
|
+
if key == "name":
|
120
|
+
num_tokens += tokens_per_name
|
121
|
+
num_tokens += 3
|
122
|
+
return num_tokens
|
@@ -0,0 +1,43 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from collections.abc import Collection, Set
|
3
|
+
from typing import Literal
|
4
|
+
|
5
|
+
from not_again_ai.llm.chat_completion.types import MessageT
|
6
|
+
|
7
|
+
|
8
|
+
class BaseTokenizer(ABC):
|
9
|
+
def __init__(
|
10
|
+
self,
|
11
|
+
model: str,
|
12
|
+
provider: str,
|
13
|
+
allowed_special: Literal["all"] | Set[str] | None = None,
|
14
|
+
disallowed_special: Literal["all"] | Collection[str] | None = None,
|
15
|
+
):
|
16
|
+
self.model = model
|
17
|
+
self.provider = provider
|
18
|
+
self.allowed_special = allowed_special
|
19
|
+
self.disallowed_special = disallowed_special
|
20
|
+
|
21
|
+
self.init_tokenizer(model, provider, allowed_special, disallowed_special)
|
22
|
+
|
23
|
+
@abstractmethod
|
24
|
+
def init_tokenizer(
|
25
|
+
self,
|
26
|
+
model: str,
|
27
|
+
provider: str,
|
28
|
+
allowed_special: Literal["all"] | Set[str] | None = None,
|
29
|
+
disallowed_special: Literal["all"] | Collection[str] | None = None,
|
30
|
+
) -> None:
|
31
|
+
pass
|
32
|
+
|
33
|
+
@abstractmethod
|
34
|
+
def truncate_str(self, text: str, max_len: int) -> str:
|
35
|
+
pass
|
36
|
+
|
37
|
+
@abstractmethod
|
38
|
+
def num_tokens_in_str(self, text: str) -> int:
|
39
|
+
pass
|
40
|
+
|
41
|
+
@abstractmethod
|
42
|
+
def num_tokens_in_messages(self, messages: list[MessageT]) -> int:
|
43
|
+
pass
|
@@ -1,12 +1,11 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.3
|
2
2
|
Name: not-again-ai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.16.0
|
4
4
|
Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
|
5
|
-
Home-page: https://github.com/DaveCoDev/not-again-ai
|
6
5
|
License: MIT
|
7
6
|
Author: DaveCoDev
|
8
7
|
Author-email: dave.co.dev@gmail.com
|
9
|
-
Requires-Python: >=3.11
|
8
|
+
Requires-Python: >=3.11, <3.13
|
10
9
|
Classifier: Development Status :: 3 - Alpha
|
11
10
|
Classifier: Intended Audience :: Developers
|
12
11
|
Classifier: Intended Audience :: Science/Research
|
@@ -19,26 +18,25 @@ Classifier: Programming Language :: Python :: 3.12
|
|
19
18
|
Classifier: Typing :: Typed
|
20
19
|
Provides-Extra: data
|
21
20
|
Provides-Extra: llm
|
22
|
-
Provides-Extra: local-llm
|
23
21
|
Provides-Extra: statistics
|
24
22
|
Provides-Extra: viz
|
25
|
-
Requires-Dist: azure-
|
26
|
-
Requires-Dist:
|
27
|
-
Requires-Dist:
|
28
|
-
Requires-Dist:
|
29
|
-
Requires-Dist:
|
30
|
-
Requires-Dist:
|
31
|
-
Requires-Dist:
|
32
|
-
Requires-Dist:
|
33
|
-
Requires-Dist: pydantic (>=2.
|
34
|
-
Requires-Dist: pytest-playwright (>=0.
|
35
|
-
Requires-Dist: python-liquid (>=1.12
|
36
|
-
Requires-Dist: scikit-learn (>=1.
|
37
|
-
Requires-Dist: scipy (>=1.
|
38
|
-
Requires-Dist: seaborn (>=0.13
|
39
|
-
Requires-Dist: tiktoken (>=0.8
|
40
|
-
|
41
|
-
Project-URL:
|
23
|
+
Requires-Dist: azure-identity (>=1.19) ; extra == "llm"
|
24
|
+
Requires-Dist: loguru (>=0.7)
|
25
|
+
Requires-Dist: numpy (>=2.2) ; extra == "statistics"
|
26
|
+
Requires-Dist: numpy (>=2.2) ; extra == "viz"
|
27
|
+
Requires-Dist: ollama (>=0.4) ; extra == "llm"
|
28
|
+
Requires-Dist: openai (>=1) ; extra == "llm"
|
29
|
+
Requires-Dist: pandas (>=2.2) ; extra == "viz"
|
30
|
+
Requires-Dist: playwright (>=1.49) ; extra == "data"
|
31
|
+
Requires-Dist: pydantic (>=2.10)
|
32
|
+
Requires-Dist: pytest-playwright (>=0.7) ; extra == "data"
|
33
|
+
Requires-Dist: python-liquid (>=1.12) ; extra == "llm"
|
34
|
+
Requires-Dist: scikit-learn (>=1.6) ; extra == "statistics"
|
35
|
+
Requires-Dist: scipy (>=1.15) ; extra == "statistics"
|
36
|
+
Requires-Dist: seaborn (>=0.13) ; extra == "viz"
|
37
|
+
Requires-Dist: tiktoken (>=0.8) ; extra == "llm"
|
38
|
+
Project-URL: Documentation, https://davecodev.github.io/not-again-ai/
|
39
|
+
Project-URL: Homepage, https://github.com/DaveCoDev/not-again-ai
|
42
40
|
Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
|
43
41
|
Description-Content-Type: text/markdown
|
44
42
|
|
@@ -68,11 +66,9 @@ Requires: Python 3.11, or 3.12
|
|
68
66
|
Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
|
69
67
|
|
70
68
|
```bash
|
71
|
-
$ pip install not_again_ai[llm,
|
69
|
+
$ pip install not_again_ai[data,llm,statistics,viz]
|
72
70
|
```
|
73
71
|
|
74
|
-
Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
|
75
|
-
|
76
72
|
The package is split into subpackages, so you can install only the parts you need.
|
77
73
|
|
78
74
|
### Base
|
@@ -93,16 +89,7 @@ The package is split into subpackages, so you can install only the parts you nee
|
|
93
89
|
1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
|
94
90
|
* Requires the correct role assigned to your user account and being signed into the Azure CLI.
|
95
91
|
1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
|
96
|
-
1.
|
97
|
-
1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
|
98
|
-
1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
|
99
|
-
|
100
|
-
|
101
|
-
### Local LLM
|
102
|
-
1. `pip install not_again_ai[llm,local_llm]`
|
103
|
-
1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
|
104
|
-
* Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
|
105
|
-
1. If you wish to use Ollama:
|
92
|
+
1. If you wish to use Ollama:
|
106
93
|
1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
|
107
94
|
1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
|
108
95
|
1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
|
@@ -112,7 +99,6 @@ The package is split into subpackages, so you can install only the parts you nee
|
|
112
99
|
Environment="OLLAMA_HOST=0.0.0.0"
|
113
100
|
```
|
114
101
|
1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
|
115
|
-
1. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
|
116
102
|
|
117
103
|
|
118
104
|
### Statistics
|
@@ -156,10 +142,8 @@ $ poetry update
|
|
156
142
|
|
157
143
|
To install all dependencies (with all extra dependencies) into an isolated virtual environment:
|
158
144
|
|
159
|
-
> Append `--sync` to uninstall dependencies that are no longer in use from the virtual environment.
|
160
|
-
|
161
145
|
```bash
|
162
|
-
$ poetry
|
146
|
+
$ poetry sync --all-extras
|
163
147
|
```
|
164
148
|
|
165
149
|
To [activate](https://python-poetry.org/docs/basic-usage#activating-the-virtual-environment) the
|
@@ -215,7 +199,7 @@ Automated code quality checks are performed using
|
|
215
199
|
environments and run commands based on [`noxfile.py`](./noxfile.py) for unit testing, PEP 8 style
|
216
200
|
guide checking, type checking and documentation generation.
|
217
201
|
|
218
|
-
> Note: `nox` is installed into the virtual environment automatically by the `poetry
|
202
|
+
> Note: `nox` is installed into the virtual environment automatically by the `poetry sync`
|
219
203
|
> command above. Run `poetry shell` to activate the virtual environment.
|
220
204
|
|
221
205
|
To run all default sessions:
|
@@ -0,0 +1,38 @@
|
|
1
|
+
not_again_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
|
4
|
+
not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
|
5
|
+
not_again_ai/data/__init__.py,sha256=1jF6mwvtB2PT7IEc3xpbRtZm3g3Lyf8zUqH4AEE4qlQ,244
|
6
|
+
not_again_ai/data/web.py,sha256=wjx9cc33jcoJBGonYCIpwygPBFOwz7F-dx_ominmbnI,1838
|
7
|
+
not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
|
8
|
+
not_again_ai/llm/chat_completion/__init__.py,sha256=a2qmmmrXjMKyHGZDjt_xdqYbSrEOBea_VvZArzMboe0,200
|
9
|
+
not_again_ai/llm/chat_completion/interface.py,sha256=FCyE-1gLdhwuS0Lv8iTbZvraa4iZjnKB8qb31WF53uk,1204
|
10
|
+
not_again_ai/llm/chat_completion/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
+
not_again_ai/llm/chat_completion/providers/ollama_api.py,sha256=iBTMyF8edo8uxxrorNPtShzmCXG7m0RlEBunWLSO4Mo,7999
|
12
|
+
not_again_ai/llm/chat_completion/providers/openai_api.py,sha256=S7TZhDIQ_xpp3JakRVcd3Gpw2UjeHCETdA9MfRKUjCU,12294
|
13
|
+
not_again_ai/llm/chat_completion/types.py,sha256=q8APUWWzwCKL0Rs_zEFfph9uBcwh5nAT0f0rp4crvk0,4039
|
14
|
+
not_again_ai/llm/embedding/__init__.py,sha256=wscUfROukvw0M0vYccfaVTdXV0P-eICAT5mqM0LaHHc,182
|
15
|
+
not_again_ai/llm/embedding/interface.py,sha256=Hj3UiktXEeCUeMwpIDtRkwBfKgaJSnJvclLNyjwUAtE,1144
|
16
|
+
not_again_ai/llm/embedding/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
17
|
+
not_again_ai/llm/embedding/providers/ollama_api.py,sha256=m-OCis9WAUT2baGsGVPzejlive40eSNyO6tHmPh6joM,3201
|
18
|
+
not_again_ai/llm/embedding/providers/openai_api.py,sha256=JFFqbq0O5snIEnr9VESdp5xehikQBPbs7nwyE6acFsY,5441
|
19
|
+
not_again_ai/llm/embedding/types.py,sha256=J4FFLx35Aow2kOaafDReeY9cUNqhWMjaAk5gXkX7SVk,506
|
20
|
+
not_again_ai/llm/prompting/__init__.py,sha256=7YnHro1yH01FLGnao27WyqQDFjNYf9npE5UxoR9YrUU,84
|
21
|
+
not_again_ai/llm/prompting/compile_prompt.py,sha256=lnbTOoTc7PumyP_GhfHaLZHp3UUpSB7VAeWOilS1wpI,4703
|
22
|
+
not_again_ai/llm/prompting/interface.py,sha256=SMKYabmu3zTWbEDukU6aLU_JQ88apeBWWOF_qZ0s3ww,1783
|
23
|
+
not_again_ai/llm/prompting/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
|
+
not_again_ai/llm/prompting/providers/openai_tiktoken.py,sha256=8YrEiK3ZHyKVGiVsJ_Rd6eVdISIvcub7ooj-HB7Prsc,4536
|
25
|
+
not_again_ai/llm/prompting/types.py,sha256=xz70dnawL9rji7Zr1_mOekY-uUlvKJJf7k9nXJsOXc4,1219
|
26
|
+
not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
|
27
|
+
not_again_ai/statistics/__init__.py,sha256=gA8r9JQFbFSN0ykrHy4G1IQgcky4f2eM5Oo24oVI5Ik,466
|
28
|
+
not_again_ai/statistics/dependence.py,sha256=4xaniMkLlTjdXcNVXdwepEAiZ-WaaGYfR9haJC1lU2Q,4434
|
29
|
+
not_again_ai/viz/__init__.py,sha256=MeaWae_QRbDEHJ4MWYoY1-Ad6S0FhSDaRhQncS2cpSc,447
|
30
|
+
not_again_ai/viz/barplots.py,sha256=rr_2phZgDaqcF5Ve7mBZrVvNXVzEt84RQPIyyeJxsMo,3384
|
31
|
+
not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3umnzU,4354
|
32
|
+
not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
|
33
|
+
not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
|
34
|
+
not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
|
35
|
+
not_again_ai-0.16.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
|
36
|
+
not_again_ai-0.16.0.dist-info/METADATA,sha256=kvwxTcEi-elRl-LuHyh2QtFLrpYHd-U6HjyuAkHYvWQ,15035
|
37
|
+
not_again_ai-0.16.0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
38
|
+
not_again_ai-0.16.0.dist-info/RECORD,,
|
@@ -1,20 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
|
3
|
-
from azure.ai.inference import ChatCompletionsClient
|
4
|
-
from azure.core.credentials import AzureKeyCredential
|
5
|
-
|
6
|
-
|
7
|
-
def azure_ai_client(
|
8
|
-
token: str | None = None,
|
9
|
-
endpoint: str = "https://models.inference.ai.azure.com",
|
10
|
-
) -> ChatCompletionsClient:
|
11
|
-
if not token:
|
12
|
-
token = os.getenv("GITHUB_TOKEN")
|
13
|
-
if not token:
|
14
|
-
raise ValueError("Token must be provided or GITHUB_TOKEN environment variable must be set")
|
15
|
-
|
16
|
-
client = ChatCompletionsClient(
|
17
|
-
endpoint=endpoint,
|
18
|
-
credential=AzureKeyCredential(token),
|
19
|
-
)
|
20
|
-
return client
|
@@ -1,81 +0,0 @@
|
|
1
|
-
import contextlib
|
2
|
-
import json
|
3
|
-
import time
|
4
|
-
from typing import Any
|
5
|
-
|
6
|
-
from azure.ai.inference import ChatCompletionsClient
|
7
|
-
from azure.ai.inference.models import ChatCompletionsToolDefinition, ChatRequestMessage
|
8
|
-
|
9
|
-
|
10
|
-
def chat_completion(
|
11
|
-
messages: list[ChatRequestMessage],
|
12
|
-
model: str,
|
13
|
-
client: ChatCompletionsClient,
|
14
|
-
tools: list[ChatCompletionsToolDefinition] | None = None,
|
15
|
-
max_tokens: int | None = None,
|
16
|
-
temperature: float | None = None,
|
17
|
-
json_mode: bool = False,
|
18
|
-
seed: int | None = None,
|
19
|
-
) -> dict[str, Any]:
|
20
|
-
"""Gets a response from GitHub Models using the Azure AI Inference SDK.
|
21
|
-
See the available models at https://github.com/marketplace/models
|
22
|
-
Full documentation of the SDK is at: https://learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-chat-completions
|
23
|
-
And samples at: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples
|
24
|
-
|
25
|
-
Returns:
|
26
|
-
dict[str, Any]: A dictionary with the following keys
|
27
|
-
message (str | dict): The content of the generated assistant message.
|
28
|
-
If json_mode is True, this will be a dictionary.
|
29
|
-
tool_names (list[str], optional): The names of the tools called by the model.
|
30
|
-
If the model does not support tools, a ResponseError is raised.
|
31
|
-
tool_args_list (list[dict], optional): The arguments of the tools called by the model.
|
32
|
-
prompt_tokens (int): The number of tokens in the messages sent to the model.
|
33
|
-
completion_tokens (int): The number of tokens used by the model to generate the completion.
|
34
|
-
response_duration (float): The time, in seconds, taken to generate the response by using the model.
|
35
|
-
system_fingerprint (str, optional): If seed is set, a unique identifier for the model used to generate the response.
|
36
|
-
"""
|
37
|
-
response_format = {"type": "json_object"} if json_mode else None
|
38
|
-
start_time = time.time()
|
39
|
-
response = client.complete( # type: ignore
|
40
|
-
messages=messages,
|
41
|
-
model=model,
|
42
|
-
response_format=response_format, # type: ignore
|
43
|
-
max_tokens=max_tokens,
|
44
|
-
temperature=temperature,
|
45
|
-
tools=tools,
|
46
|
-
seed=seed,
|
47
|
-
)
|
48
|
-
end_time = time.time()
|
49
|
-
response_duration = end_time - start_time
|
50
|
-
|
51
|
-
response_data = {}
|
52
|
-
finish_reason = response.choices[0].finish_reason
|
53
|
-
response_data["finish_reason"] = finish_reason.value # type: ignore
|
54
|
-
|
55
|
-
message = response.choices[0].message.content
|
56
|
-
if message and json_mode:
|
57
|
-
with contextlib.suppress(json.JSONDecodeError):
|
58
|
-
message = json.loads(message)
|
59
|
-
response_data["message"] = message
|
60
|
-
|
61
|
-
# Check for tool calls because even if the finish_reason is stop, the model may have called a tool
|
62
|
-
tool_calls = response.choices[0].message.tool_calls
|
63
|
-
if tool_calls:
|
64
|
-
tool_names = []
|
65
|
-
tool_args_list = []
|
66
|
-
for tool_call in tool_calls:
|
67
|
-
tool_names.append(tool_call.function.name)
|
68
|
-
tool_args_list.append(json.loads(tool_call.function.arguments))
|
69
|
-
response_data["tool_names"] = tool_names
|
70
|
-
response_data["tool_args_list"] = tool_args_list
|
71
|
-
|
72
|
-
if seed is not None and hasattr(response, "system_fingerprint"):
|
73
|
-
response_data["system_fingerprint"] = response.system_fingerprint
|
74
|
-
|
75
|
-
usage = response.usage
|
76
|
-
if usage is not None:
|
77
|
-
response_data["completion_tokens"] = usage.completion_tokens
|
78
|
-
response_data["prompt_tokens"] = usage.prompt_tokens
|
79
|
-
response_data["response_duration"] = round(response_duration, 4)
|
80
|
-
|
81
|
-
return response_data
|