not-again-ai 0.13.0__tar.gz → 0.15.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/PKG-INFO +63 -58
  2. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/README.md +42 -37
  3. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/pyproject.toml +51 -39
  4. not_again_ai-0.15.0/src/not_again_ai/data/__init__.py +7 -0
  5. not_again_ai-0.15.0/src/not_again_ai/data/web.py +56 -0
  6. not_again_ai-0.15.0/src/not_again_ai/llm/chat_completion/__init__.py +4 -0
  7. not_again_ai-0.15.0/src/not_again_ai/llm/chat_completion/interface.py +32 -0
  8. not_again_ai-0.15.0/src/not_again_ai/llm/chat_completion/providers/ollama_api.py +227 -0
  9. not_again_ai-0.15.0/src/not_again_ai/llm/chat_completion/providers/openai_api.py +290 -0
  10. not_again_ai-0.15.0/src/not_again_ai/llm/chat_completion/types.py +145 -0
  11. not_again_ai-0.15.0/src/not_again_ai/llm/prompting/__init__.py +3 -0
  12. not_again_ai-0.15.0/src/not_again_ai/llm/prompting/compile_messages.py +98 -0
  13. not_again_ai-0.15.0/src/not_again_ai/llm/prompting/interface.py +46 -0
  14. not_again_ai-0.15.0/src/not_again_ai/llm/prompting/providers/openai_tiktoken.py +122 -0
  15. not_again_ai-0.15.0/src/not_again_ai/llm/prompting/types.py +43 -0
  16. not_again_ai-0.13.0/src/not_again_ai/llm/gh_models/azure_ai_client.py +0 -20
  17. not_again_ai-0.13.0/src/not_again_ai/llm/gh_models/chat_completion.py +0 -81
  18. not_again_ai-0.13.0/src/not_again_ai/llm/openai_api/chat_completion.py +0 -200
  19. not_again_ai-0.13.0/src/not_again_ai/llm/openai_api/context_management.py +0 -70
  20. not_again_ai-0.13.0/src/not_again_ai/llm/openai_api/embeddings.py +0 -62
  21. not_again_ai-0.13.0/src/not_again_ai/llm/openai_api/openai_client.py +0 -78
  22. not_again_ai-0.13.0/src/not_again_ai/llm/openai_api/prompts.py +0 -191
  23. not_again_ai-0.13.0/src/not_again_ai/llm/openai_api/tokens.py +0 -184
  24. not_again_ai-0.13.0/src/not_again_ai/local_llm/__init__.py +0 -27
  25. not_again_ai-0.13.0/src/not_again_ai/local_llm/chat_completion.py +0 -105
  26. not_again_ai-0.13.0/src/not_again_ai/local_llm/huggingface/__init__.py +0 -0
  27. not_again_ai-0.13.0/src/not_again_ai/local_llm/huggingface/chat_completion.py +0 -59
  28. not_again_ai-0.13.0/src/not_again_ai/local_llm/huggingface/helpers.py +0 -23
  29. not_again_ai-0.13.0/src/not_again_ai/local_llm/ollama/__init__.py +0 -0
  30. not_again_ai-0.13.0/src/not_again_ai/local_llm/ollama/chat_completion.py +0 -111
  31. not_again_ai-0.13.0/src/not_again_ai/local_llm/ollama/model_mapping.py +0 -17
  32. not_again_ai-0.13.0/src/not_again_ai/local_llm/ollama/ollama_client.py +0 -24
  33. not_again_ai-0.13.0/src/not_again_ai/local_llm/ollama/service.py +0 -81
  34. not_again_ai-0.13.0/src/not_again_ai/local_llm/ollama/tokens.py +0 -104
  35. not_again_ai-0.13.0/src/not_again_ai/local_llm/prompts.py +0 -38
  36. not_again_ai-0.13.0/src/not_again_ai/local_llm/tokens.py +0 -90
  37. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/LICENSE +0 -0
  38. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/__init__.py +0 -0
  39. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/base/__init__.py +0 -0
  40. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/base/file_system.py +0 -0
  41. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/base/parallel.py +0 -0
  42. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/llm/__init__.py +0 -0
  43. {not_again_ai-0.13.0/src/not_again_ai/llm/gh_models → not_again_ai-0.15.0/src/not_again_ai/llm/chat_completion/providers}/__init__.py +0 -0
  44. {not_again_ai-0.13.0/src/not_again_ai/llm/openai_api → not_again_ai-0.15.0/src/not_again_ai/llm/prompting/providers}/__init__.py +0 -0
  45. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/py.typed +0 -0
  46. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/statistics/__init__.py +0 -0
  47. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/statistics/dependence.py +0 -0
  48. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/viz/__init__.py +0 -0
  49. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/viz/barplots.py +0 -0
  50. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/viz/distributions.py +0 -0
  51. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/viz/scatterplot.py +0 -0
  52. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/viz/time_series.py +0 -0
  53. {not_again_ai-0.13.0 → not_again_ai-0.15.0}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,12 +1,11 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: not-again-ai
3
- Version: 0.13.0
3
+ Version: 0.15.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
- Home-page: https://github.com/DaveCoDev/not-again-ai
6
5
  License: MIT
7
6
  Author: DaveCoDev
8
7
  Author-email: dave.co.dev@gmail.com
9
- Requires-Python: >=3.11,<4.0
8
+ Requires-Python: >=3.11, <3.13
10
9
  Classifier: Development Status :: 3 - Alpha
11
10
  Classifier: Intended Audience :: Developers
12
11
  Classifier: Intended Audience :: Science/Research
@@ -17,26 +16,27 @@ Classifier: Programming Language :: Python :: 3
17
16
  Classifier: Programming Language :: Python :: 3.11
18
17
  Classifier: Programming Language :: Python :: 3.12
19
18
  Classifier: Typing :: Typed
19
+ Provides-Extra: data
20
20
  Provides-Extra: llm
21
- Provides-Extra: local-llm
22
21
  Provides-Extra: statistics
23
22
  Provides-Extra: viz
24
- Requires-Dist: azure-ai-inference (==1.0.0b4) ; extra == "llm"
25
- Requires-Dist: azure-identity (>=1.18,<2.0) ; extra == "llm"
26
- Requires-Dist: jinja2 (>=3.1,<4.0) ; extra == "local-llm"
27
- Requires-Dist: loguru (==0.7.2)
28
- Requires-Dist: numpy (>=2.1,<3.0) ; extra == "statistics" or extra == "viz"
29
- Requires-Dist: ollama (>=0.3,<0.4) ; extra == "local-llm"
30
- Requires-Dist: openai (>=1.51,<2.0) ; extra == "llm"
31
- Requires-Dist: pandas (>=2.2,<3.0) ; extra == "viz"
32
- Requires-Dist: pydantic (>=2.9,<3.0) ; extra == "llm"
33
- Requires-Dist: python-liquid (>=1.12,<2.0) ; extra == "llm"
34
- Requires-Dist: scikit-learn (>=1.5,<2.0) ; extra == "statistics"
35
- Requires-Dist: scipy (>=1.14,<2.0) ; extra == "statistics"
36
- Requires-Dist: seaborn (>=0.13,<0.14) ; extra == "viz"
37
- Requires-Dist: tiktoken (>=0.7,<0.8) ; extra == "llm"
38
- Requires-Dist: transformers (>=4.45,<5.0) ; extra == "local-llm"
39
- Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
23
+ Requires-Dist: azure-identity (>=1.19) ; extra == "llm"
24
+ Requires-Dist: loguru (>=0.7)
25
+ Requires-Dist: numpy (>=2.2) ; extra == "statistics"
26
+ Requires-Dist: numpy (>=2.2) ; extra == "viz"
27
+ Requires-Dist: ollama (>=0.4) ; extra == "llm"
28
+ Requires-Dist: openai (>=1.60) ; extra == "llm"
29
+ Requires-Dist: pandas (>=2.2) ; extra == "viz"
30
+ Requires-Dist: playwright (>=1.49) ; extra == "data"
31
+ Requires-Dist: pydantic (>=2.10)
32
+ Requires-Dist: pytest-playwright (>=0.6) ; extra == "data"
33
+ Requires-Dist: python-liquid (>=1.12) ; extra == "llm"
34
+ Requires-Dist: scikit-learn (>=1.6) ; extra == "statistics"
35
+ Requires-Dist: scipy (>=1.15) ; extra == "statistics"
36
+ Requires-Dist: seaborn (>=0.13) ; extra == "viz"
37
+ Requires-Dist: tiktoken (>=0.8) ; extra == "llm"
38
+ Project-URL: Documentation, https://davecodev.github.io/not-again-ai/
39
+ Project-URL: Homepage, https://github.com/DaveCoDev/not-again-ai
40
40
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
41
41
  Description-Content-Type: text/markdown
42
42
 
@@ -66,40 +66,47 @@ Requires: Python 3.11, or 3.12
66
66
  Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
67
67
 
68
68
  ```bash
69
- $ pip install not_again_ai[llm,local_llm,statistics,viz]
69
+ $ pip install not_again_ai[data,llm,statistics,viz]
70
70
  ```
71
71
 
72
- Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
73
-
74
72
  The package is split into subpackages, so you can install only the parts you need.
75
- * **Base only**: `pip install not_again_ai`
76
- * **LLM**: `pip install not_again_ai[llm]`
77
- 1. OpenAI API
78
- 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
79
- 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
80
- 1. Azure OpenAI (AOAI)
81
- 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
82
- 1. Requires the correct role assigned to your user account and being signed into the Azure CLI.
83
- 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
84
- 1. GitHub Models
85
- 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
86
- 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
87
- * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
88
- 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
89
- 1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
90
- 2. If you wish to use Ollama:
91
- 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
92
- 2. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
93
- 3. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
94
- ```bash
95
- [Service]
96
- ...
97
- Environment="OLLAMA_HOST=0.0.0.0"
98
- ```
99
- 4. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
100
- 3. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
101
- * **Statistics**: `pip install not_again_ai[statistics]`
102
- * **Visualization**: `pip install not_again_ai[viz]`
73
+
74
+ ### Base
75
+ 1. `pip install not_again_ai`
76
+
77
+
78
+ ### Data
79
+ 1. `pip install not_again_ai[data]`
80
+ 1. `playwright install` to download the browser binaries.
81
+
82
+
83
+ ### LLM
84
+ 1. `pip install not_again_ai[llm]`
85
+ 1. Setup OpenAI API
86
+ 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
87
+ 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
88
+ 1. Setup Azure OpenAI (AOAI)
89
+ 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
90
+ * Requires the correct role assigned to your user account and being signed into the Azure CLI.
91
+ 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
92
+ 1. If you wish to use Ollama:
93
+ 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
94
+ 1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
95
+ 1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
96
+ ```bash
97
+ [Service]
98
+ ...
99
+ Environment="OLLAMA_HOST=0.0.0.0"
100
+ ```
101
+ 1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
102
+
103
+
104
+ ### Statistics
105
+ 1. `pip install not_again_ai[statistics]`
106
+
107
+
108
+ ### Visualization
109
+ 1. `pip install not_again_ai[viz]`
103
110
 
104
111
 
105
112
  # Development Information
@@ -135,10 +142,8 @@ $ poetry update
135
142
 
136
143
  To install all dependencies (with all extra dependencies) into an isolated virtual environment:
137
144
 
138
- > Append `--sync` to uninstall dependencies that are no longer in use from the virtual environment.
139
-
140
145
  ```bash
141
- $ poetry install --all-extras
146
+ $ poetry sync --all-extras
142
147
  ```
143
148
 
144
149
  To [activate](https://python-poetry.org/docs/basic-usage#activating-the-virtual-environment) the
@@ -194,7 +199,7 @@ Automated code quality checks are performed using
194
199
  environments and run commands based on [`noxfile.py`](./noxfile.py) for unit testing, PEP 8 style
195
200
  guide checking, type checking and documentation generation.
196
201
 
197
- > Note: `nox` is installed into the virtual environment automatically by the `poetry install`
202
+ > Note: `nox` is installed into the virtual environment automatically by the `poetry sync`
198
203
  > command above. Run `poetry shell` to activate the virtual environment.
199
204
 
200
205
  To run all default sessions:
@@ -229,10 +234,10 @@ areas of the project that are currently not tested.
229
234
 
230
235
  pytest and code coverage are configured in [`pyproject.toml`](./pyproject.toml).
231
236
 
232
- To pass arguments to `pytest` through `nox`:
237
+ To run selected tests:
233
238
 
234
239
  ```bash
235
- (.venv) $ nox -s test -- -k invalid_factorial
240
+ (.venv) $ nox -s test -- -k "test_web"
236
241
  ```
237
242
 
238
243
  ## Code Style Checking
@@ -24,40 +24,47 @@ Requires: Python 3.11, or 3.12
24
24
  Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
25
25
 
26
26
  ```bash
27
- $ pip install not_again_ai[llm,local_llm,statistics,viz]
27
+ $ pip install not_again_ai[data,llm,statistics,viz]
28
28
  ```
29
29
 
30
- Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
31
-
32
30
  The package is split into subpackages, so you can install only the parts you need.
33
- * **Base only**: `pip install not_again_ai`
34
- * **LLM**: `pip install not_again_ai[llm]`
35
- 1. OpenAI API
36
- 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
37
- 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
38
- 1. Azure OpenAI (AOAI)
39
- 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
40
- 1. Requires the correct role assigned to your user account and being signed into the Azure CLI.
41
- 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
42
- 1. GitHub Models
43
- 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
44
- 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
45
- * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
46
- 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
47
- 1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
48
- 2. If you wish to use Ollama:
49
- 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
50
- 2. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
51
- 3. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
52
- ```bash
53
- [Service]
54
- ...
55
- Environment="OLLAMA_HOST=0.0.0.0"
56
- ```
57
- 4. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
58
- 3. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
59
- * **Statistics**: `pip install not_again_ai[statistics]`
60
- * **Visualization**: `pip install not_again_ai[viz]`
31
+
32
+ ### Base
33
+ 1. `pip install not_again_ai`
34
+
35
+
36
+ ### Data
37
+ 1. `pip install not_again_ai[data]`
38
+ 1. `playwright install` to download the browser binaries.
39
+
40
+
41
+ ### LLM
42
+ 1. `pip install not_again_ai[llm]`
43
+ 1. Setup OpenAI API
44
+ 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
45
+ 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
46
+ 1. Setup Azure OpenAI (AOAI)
47
+ 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
48
+ * Requires the correct role assigned to your user account and being signed into the Azure CLI.
49
+ 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
50
+ 1. If you wish to use Ollama:
51
+ 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
52
+ 1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
53
+ 1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
54
+ ```bash
55
+ [Service]
56
+ ...
57
+ Environment="OLLAMA_HOST=0.0.0.0"
58
+ ```
59
+ 1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
60
+
61
+
62
+ ### Statistics
63
+ 1. `pip install not_again_ai[statistics]`
64
+
65
+
66
+ ### Visualization
67
+ 1. `pip install not_again_ai[viz]`
61
68
 
62
69
 
63
70
  # Development Information
@@ -93,10 +100,8 @@ $ poetry update
93
100
 
94
101
  To install all dependencies (with all extra dependencies) into an isolated virtual environment:
95
102
 
96
- > Append `--sync` to uninstall dependencies that are no longer in use from the virtual environment.
97
-
98
103
  ```bash
99
- $ poetry install --all-extras
104
+ $ poetry sync --all-extras
100
105
  ```
101
106
 
102
107
  To [activate](https://python-poetry.org/docs/basic-usage#activating-the-virtual-environment) the
@@ -152,7 +157,7 @@ Automated code quality checks are performed using
152
157
  environments and run commands based on [`noxfile.py`](./noxfile.py) for unit testing, PEP 8 style
153
158
  guide checking, type checking and documentation generation.
154
159
 
155
- > Note: `nox` is installed into the virtual environment automatically by the `poetry install`
160
+ > Note: `nox` is installed into the virtual environment automatically by the `poetry sync`
156
161
  > command above. Run `poetry shell` to activate the virtual environment.
157
162
 
158
163
  To run all default sessions:
@@ -187,10 +192,10 @@ areas of the project that are currently not tested.
187
192
 
188
193
  pytest and code coverage are configured in [`pyproject.toml`](./pyproject.toml).
189
194
 
190
- To pass arguments to `pytest` through `nox`:
195
+ To run selected tests:
191
196
 
192
197
  ```bash
193
- (.venv) $ nox -s test -- -k invalid_factorial
198
+ (.venv) $ nox -s test -- -k "test_web"
194
199
  ```
195
200
 
196
201
  ## Code Style Checking
@@ -1,8 +1,10 @@
1
- [tool.poetry]
1
+ [project]
2
2
  name = "not-again-ai"
3
- version = "0.13.0"
3
+ version = "0.15.0"
4
4
  description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
5
- authors = ["DaveCoDev <dave.co.dev@gmail.com>"]
5
+ authors = [
6
+ { name = "DaveCoDev", email = "dave.co.dev@gmail.com" }
7
+ ]
6
8
  license = "MIT"
7
9
  readme = "README.md"
8
10
  repository = "https://github.com/DaveCoDev/not-again-ai"
@@ -19,38 +21,49 @@ classifiers = [
19
21
  "Programming Language :: Python :: 3.12",
20
22
  "Typing :: Typed",
21
23
  ]
24
+ requires-python = ">=3.11, <3.13"
25
+ # Lets us define dependencies completely in tool.poetry.dependencies
26
+ dynamic = [ "dependencies" ]
27
+ dependencies = [
28
+ "loguru>=0.7",
29
+ "pydantic>=2.10"
30
+ ]
31
+
32
+ [project.urls]
33
+ Homepage = "https://github.com/DaveCoDev/not-again-ai"
34
+ Documentation = "https://davecodev.github.io/not-again-ai/"
35
+ Repository = "https://github.com/DaveCoDev/not-again-ai"
36
+
37
+ [tool.poetry]
38
+ requires-poetry = ">=2.0.1"
39
+
40
+ [tool.poetry.requires-plugins]
41
+ poetry-plugin-export = ">=1.8"
22
42
 
23
- [tool.poetry.dependencies]
24
- # Some packages, such as scipy, constrain their upper bound of Python versions they support.
25
- # Without also constraining the upper bound here, Poetry will not select those versions and will
26
- # result in an old version being resolved/locked.
27
- python = "^3.11 || ^3.12"
28
-
29
- loguru = { version = "==0.7.2" }
30
-
31
- # Optional dependencies are defined here, and groupings are defined below.
32
- azure-ai-inference = { version = "==1.0.0b4", optional = true }
33
- azure-identity = { version = "^1.18", optional = true }
34
- jinja2 = { version = "^3.1", optional = true }
35
- numpy = { version = "^2.1", optional = true }
36
- ollama = { version = "^0.3", optional = true }
37
- openai = { version = "^1.51", optional = true }
38
- pandas = { version = "^2.2", optional = true }
39
- pydantic = { version = "^2.9", optional = true }
40
- python-liquid = { version = "^1.12", optional = true }
41
- scipy = { version = "^1.14", optional = true }
42
- scikit-learn = { version = "^1.5", optional = true }
43
- seaborn = { version = "^0.13", optional = true }
44
- tiktoken = { version = "^0.7", optional = true }
45
- transformers = { version = "^4.45", optional = true }
46
-
47
- [tool.poetry.extras]
48
- llm = ["azure-ai-inference", "azure-identity", "openai", "pydantic", "python-liquid", "tiktoken"]
49
- local_llm = ["jinja2", "ollama", "transformers"]
50
- statistics = ["numpy", "scikit-learn", "scipy"]
51
- viz = ["numpy", "pandas", "seaborn"]
52
-
53
- [tool.poetry.dev-dependencies]
43
+ [project.optional-dependencies]
44
+ data = [
45
+ "playwright>=1.49",
46
+ "pytest-playwright>=0.6"
47
+ ]
48
+ llm = [
49
+ "azure-identity>=1.19",
50
+ "ollama>=0.4",
51
+ "openai>=1.60",
52
+ "python-liquid>=1.12",
53
+ "tiktoken>=0.8"
54
+ ]
55
+ statistics = [
56
+ "numpy>=2.2",
57
+ "scikit-learn>=1.6",
58
+ "scipy>=1.15"
59
+ ]
60
+ viz = [
61
+ "numpy>=2.2",
62
+ "pandas>=2.2",
63
+ "seaborn>=0.13"
64
+ ]
65
+
66
+ [tool.poetry.group.dev.dependencies]
54
67
  ipykernel = "*"
55
68
  ipywidgets = "*"
56
69
 
@@ -84,11 +97,8 @@ mkdocs-literate-nav = "*"
84
97
  [tool.poetry.group.typos.dependencies]
85
98
  typos = "*"
86
99
 
87
- [tool.poetry.scripts]
88
- not-again-ai = "not_again_ai.cli:entry_point"
89
-
90
100
  [build-system]
91
- requires = ["poetry-core"]
101
+ requires = ["poetry-core>=2.0.0,<3.0.0"]
92
102
  build-backend = "poetry.core.masonry.api"
93
103
 
94
104
  [tool.mypy]
@@ -114,6 +124,8 @@ select = [
114
124
  "B", # flake8-bugbear
115
125
  "C4", # flake8-comprehensions
116
126
  "ISC", # flake8-implicit-str-concat
127
+ "PIE", # flake8-pie
128
+ "PT", # flake-pytest-style
117
129
  "PTH", # flake8-use-pathlib
118
130
  "SIM", # flake8-simplify
119
131
  "TID", # flake8-tidy-imports
@@ -138,7 +150,7 @@ filterwarnings = [
138
150
  # When running tests, treat warnings as errors (e.g. -Werror).
139
151
  # See: https://docs.pytest.org/en/latest/reference/reference.html#confval-filterwarnings
140
152
  "error",
141
- # Add additional warning supressions as needed here. For example, if a third-party library
153
+ # Add additional warning suppressions as needed here. For example, if a third-party library
142
154
  # is throwing a deprecation warning that needs to be fixed upstream:
143
155
  # "ignore::DeprecationWarning:typer",
144
156
  "ignore::pytest.PytestUnraisableExceptionWarning"
@@ -0,0 +1,7 @@
1
+ import importlib.util
2
+
3
+ if importlib.util.find_spec("playwright") is None:
4
+ raise ImportError(
5
+ "not_again_ai.data requires the 'data' extra to be installed. "
6
+ "You can install it using 'pip install not_again_ai[data]'."
7
+ )
@@ -0,0 +1,56 @@
1
+ from loguru import logger
2
+ from playwright.sync_api import Browser, Playwright, sync_playwright
3
+
4
+
5
+ def create_browser(headless: bool = True) -> tuple[Playwright, Browser]:
6
+ """Creates and returns a new Playwright instance and browser.
7
+
8
+ Args:
9
+ headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
10
+
11
+ Returns:
12
+ tuple[Playwright, Browser]: A tuple containing the Playwright instance and browser.
13
+ """
14
+ pwright = sync_playwright().start()
15
+ browser = pwright.chromium.launch(
16
+ headless=headless,
17
+ chromium_sandbox=False,
18
+ timeout=15000,
19
+ )
20
+ return pwright, browser
21
+
22
+
23
+ def get_raw_web_content(url: str, browser: Browser | None = None, headless: bool = True) -> str:
24
+ """Fetches raw web content from a given URL using Playwright.
25
+
26
+ Args:
27
+ url (str): The URL to fetch content from.
28
+ browser (Browser | None, optional): An existing browser instance to use. Defaults to None.
29
+ headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
30
+
31
+ Returns:
32
+ str: The raw web content.
33
+ """
34
+ p = None
35
+ try:
36
+ if browser is None:
37
+ p, browser = create_browser(headless)
38
+
39
+ page = browser.new_page(
40
+ accept_downloads=False,
41
+ java_script_enabled=True,
42
+ viewport={"width": 1366, "height": 768},
43
+ user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.3",
44
+ )
45
+ page.goto(url)
46
+ content = page.content()
47
+ page.close()
48
+ return content
49
+ except Exception as e:
50
+ logger.error(f"Failed to get web content: {e}")
51
+ return ""
52
+ finally:
53
+ if browser:
54
+ browser.close()
55
+ if p:
56
+ p.stop()
@@ -0,0 +1,4 @@
1
+ from not_again_ai.llm.chat_completion.interface import chat_completion
2
+ from not_again_ai.llm.chat_completion.types import ChatCompletionRequest
3
+
4
+ __all__ = ["ChatCompletionRequest", "chat_completion"]
@@ -0,0 +1,32 @@
1
+ from collections.abc import Callable
2
+ from typing import Any
3
+
4
+ from not_again_ai.llm.chat_completion.providers.ollama_api import ollama_chat_completion
5
+ from not_again_ai.llm.chat_completion.providers.openai_api import openai_chat_completion
6
+ from not_again_ai.llm.chat_completion.types import ChatCompletionRequest, ChatCompletionResponse
7
+
8
+
9
+ def chat_completion(
10
+ request: ChatCompletionRequest,
11
+ provider: str,
12
+ client: Callable[..., Any],
13
+ ) -> ChatCompletionResponse:
14
+ """Get a chat completion response from the given provider. Currently supported providers:
15
+ - `openai` - OpenAI
16
+ - `azure_openai` - Azure OpenAI
17
+ - `ollama` - Ollama
18
+
19
+ Args:
20
+ request: Request parameter object
21
+ provider: The supported provider name
22
+ client: Client information, see the provider's implementation for what can be provided
23
+
24
+ Returns:
25
+ ChatCompletionResponse: The chat completion response.
26
+ """
27
+ if provider == "openai" or provider == "azure_openai":
28
+ return openai_chat_completion(request, client)
29
+ elif provider == "ollama":
30
+ return ollama_chat_completion(request, client)
31
+ else:
32
+ raise ValueError(f"Provider {provider} not supported")