not-again-ai 0.13.0__tar.gz → 0.14.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/PKG-INFO +58 -37
  2. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/README.md +49 -30
  3. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/pyproject.toml +11 -8
  4. not_again_ai-0.14.0/src/not_again_ai/data/__init__.py +7 -0
  5. not_again_ai-0.14.0/src/not_again_ai/data/web.py +56 -0
  6. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/chat_completion.py +140 -1
  7. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/LICENSE +0 -0
  8. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/__init__.py +0 -0
  9. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/base/__init__.py +0 -0
  10. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/base/file_system.py +0 -0
  11. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/base/parallel.py +0 -0
  12. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/__init__.py +0 -0
  13. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/gh_models/__init__.py +0 -0
  14. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/gh_models/azure_ai_client.py +0 -0
  15. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/gh_models/chat_completion.py +0 -0
  16. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/__init__.py +0 -0
  17. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/context_management.py +0 -0
  18. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/embeddings.py +0 -0
  19. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/openai_client.py +0 -0
  20. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/prompts.py +0 -0
  21. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/llm/openai_api/tokens.py +0 -0
  22. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/__init__.py +0 -0
  23. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/chat_completion.py +0 -0
  24. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/huggingface/__init__.py +0 -0
  25. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/huggingface/chat_completion.py +0 -0
  26. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/huggingface/helpers.py +0 -0
  27. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/ollama/__init__.py +0 -0
  28. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/ollama/chat_completion.py +0 -0
  29. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/ollama/model_mapping.py +0 -0
  30. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/ollama/ollama_client.py +0 -0
  31. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/ollama/service.py +0 -0
  32. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/ollama/tokens.py +0 -0
  33. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/prompts.py +0 -0
  34. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/local_llm/tokens.py +0 -0
  35. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/py.typed +0 -0
  36. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/statistics/__init__.py +0 -0
  37. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/statistics/dependence.py +0 -0
  38. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/viz/__init__.py +0 -0
  39. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/viz/barplots.py +0 -0
  40. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/viz/distributions.py +0 -0
  41. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/viz/scatterplot.py +0 -0
  42. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/viz/time_series.py +0 -0
  43. {not_again_ai-0.13.0 → not_again_ai-0.14.0}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.13.0
3
+ Version: 0.14.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -17,24 +17,26 @@ Classifier: Programming Language :: Python :: 3
17
17
  Classifier: Programming Language :: Python :: 3.11
18
18
  Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Typing :: Typed
20
+ Provides-Extra: data
20
21
  Provides-Extra: llm
21
22
  Provides-Extra: local-llm
22
23
  Provides-Extra: statistics
23
24
  Provides-Extra: viz
24
- Requires-Dist: azure-ai-inference (==1.0.0b4) ; extra == "llm"
25
- Requires-Dist: azure-identity (>=1.18,<2.0) ; extra == "llm"
25
+ Requires-Dist: azure-ai-inference (==1.0.0b5) ; extra == "llm"
26
+ Requires-Dist: azure-identity (>=1.19,<2.0) ; extra == "llm"
26
27
  Requires-Dist: jinja2 (>=3.1,<4.0) ; extra == "local-llm"
27
- Requires-Dist: loguru (==0.7.2)
28
+ Requires-Dist: loguru (>=0.7,<0.8)
28
29
  Requires-Dist: numpy (>=2.1,<3.0) ; extra == "statistics" or extra == "viz"
29
30
  Requires-Dist: ollama (>=0.3,<0.4) ; extra == "local-llm"
30
- Requires-Dist: openai (>=1.51,<2.0) ; extra == "llm"
31
+ Requires-Dist: openai (>=1.52,<2.0) ; extra == "llm"
31
32
  Requires-Dist: pandas (>=2.2,<3.0) ; extra == "viz"
32
- Requires-Dist: pydantic (>=2.9,<3.0) ; extra == "llm"
33
+ Requires-Dist: pydantic (>=2.9,<3.0)
34
+ Requires-Dist: pytest-playwright (>=0.5,<0.6) ; extra == "data"
33
35
  Requires-Dist: python-liquid (>=1.12,<2.0) ; extra == "llm"
34
36
  Requires-Dist: scikit-learn (>=1.5,<2.0) ; extra == "statistics"
35
37
  Requires-Dist: scipy (>=1.14,<2.0) ; extra == "statistics"
36
38
  Requires-Dist: seaborn (>=0.13,<0.14) ; extra == "viz"
37
- Requires-Dist: tiktoken (>=0.7,<0.8) ; extra == "llm"
39
+ Requires-Dist: tiktoken (>=0.8,<0.9) ; extra == "llm"
38
40
  Requires-Dist: transformers (>=4.45,<5.0) ; extra == "local-llm"
39
41
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
40
42
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
@@ -72,34 +74,53 @@ $ pip install not_again_ai[llm,local_llm,statistics,viz]
72
74
  Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
73
75
 
74
76
  The package is split into subpackages, so you can install only the parts you need.
75
- * **Base only**: `pip install not_again_ai`
76
- * **LLM**: `pip install not_again_ai[llm]`
77
- 1. OpenAI API
78
- 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
79
- 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
80
- 1. Azure OpenAI (AOAI)
81
- 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
82
- 1. Requires the correct role assigned to your user account and being signed into the Azure CLI.
83
- 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
84
- 1. GitHub Models
85
- 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
86
- 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
87
- * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
88
- 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
89
- 1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
90
- 2. If you wish to use Ollama:
91
- 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
92
- 2. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
93
- 3. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
94
- ```bash
95
- [Service]
96
- ...
97
- Environment="OLLAMA_HOST=0.0.0.0"
98
- ```
99
- 4. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
100
- 3. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
101
- * **Statistics**: `pip install not_again_ai[statistics]`
102
- * **Visualization**: `pip install not_again_ai[viz]`
77
+
78
+ ### Base
79
+ 1. `pip install not_again_ai`
80
+
81
+
82
+ ### Data
83
+ 1. `pip install not_again_ai[data]`
84
+ 1. `playwright install` to download the browser binaries.
85
+
86
+
87
+ ### LLM
88
+ 1. `pip install not_again_ai[llm]`
89
+ 1. Setup OpenAI API
90
+ 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
91
+ 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
92
+ 1. Setup Azure OpenAI (AOAI)
93
+ 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
94
+ * Requires the correct role assigned to your user account and being signed into the Azure CLI.
95
+ 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
96
+ 1. Setup GitHub Models
97
+ 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
98
+ 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
99
+
100
+
101
+ ### Local LLM
102
+ 1. `pip install not_again_ai[llm,local_llm]`
103
+ 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
104
+ * Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
105
+ 1. If you wish to use Ollama:
106
+ 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
107
+ 1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
108
+ 1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
109
+ ```bash
110
+ [Service]
111
+ ...
112
+ Environment="OLLAMA_HOST=0.0.0.0"
113
+ ```
114
+ 1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
115
+ 1. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
116
+
117
+
118
+ ### Statistics
119
+ 1. `pip install not_again_ai[statistics]`
120
+
121
+
122
+ ### Visualization
123
+ 1. `pip install not_again_ai[viz]`
103
124
 
104
125
 
105
126
  # Development Information
@@ -229,10 +250,10 @@ areas of the project that are currently not tested.
229
250
 
230
251
  pytest and code coverage are configured in [`pyproject.toml`](./pyproject.toml).
231
252
 
232
- To pass arguments to `pytest` through `nox`:
253
+ To run selected tests:
233
254
 
234
255
  ```bash
235
- (.venv) $ nox -s test -- -k invalid_factorial
256
+ (.venv) $ nox -s test -- -k "test_web"
236
257
  ```
237
258
 
238
259
  ## Code Style Checking
@@ -30,34 +30,53 @@ $ pip install not_again_ai[llm,local_llm,statistics,viz]
30
30
  Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
31
31
 
32
32
  The package is split into subpackages, so you can install only the parts you need.
33
- * **Base only**: `pip install not_again_ai`
34
- * **LLM**: `pip install not_again_ai[llm]`
35
- 1. OpenAI API
36
- 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
37
- 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
38
- 1. Azure OpenAI (AOAI)
39
- 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
40
- 1. Requires the correct role assigned to your user account and being signed into the Azure CLI.
41
- 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
42
- 1. GitHub Models
43
- 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
44
- 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
45
- * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
46
- 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
47
- 1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
48
- 2. If you wish to use Ollama:
49
- 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
50
- 2. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
51
- 3. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
52
- ```bash
53
- [Service]
54
- ...
55
- Environment="OLLAMA_HOST=0.0.0.0"
56
- ```
57
- 4. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
58
- 3. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
59
- * **Statistics**: `pip install not_again_ai[statistics]`
60
- * **Visualization**: `pip install not_again_ai[viz]`
33
+
34
+ ### Base
35
+ 1. `pip install not_again_ai`
36
+
37
+
38
+ ### Data
39
+ 1. `pip install not_again_ai[data]`
40
+ 1. `playwright install` to download the browser binaries.
41
+
42
+
43
+ ### LLM
44
+ 1. `pip install not_again_ai[llm]`
45
+ 1. Setup OpenAI API
46
+ 1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
47
+ 1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
48
+ 1. Setup Azure OpenAI (AOAI)
49
+ 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
50
+ * Requires the correct role assigned to your user account and being signed into the Azure CLI.
51
+ 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
52
+ 1. Setup GitHub Models
53
+ 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
54
+ 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
55
+
56
+
57
+ ### Local LLM
58
+ 1. `pip install not_again_ai[llm,local_llm]`
59
+ 1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
60
+ * Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
61
+ 1. If you wish to use Ollama:
62
+ 1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
63
+ 1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
64
+ 1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
65
+ ```bash
66
+ [Service]
67
+ ...
68
+ Environment="OLLAMA_HOST=0.0.0.0"
69
+ ```
70
+ 1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
71
+ 1. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
72
+
73
+
74
+ ### Statistics
75
+ 1. `pip install not_again_ai[statistics]`
76
+
77
+
78
+ ### Visualization
79
+ 1. `pip install not_again_ai[viz]`
61
80
 
62
81
 
63
82
  # Development Information
@@ -187,10 +206,10 @@ areas of the project that are currently not tested.
187
206
 
188
207
  pytest and code coverage are configured in [`pyproject.toml`](./pyproject.toml).
189
208
 
190
- To pass arguments to `pytest` through `nox`:
209
+ To run selected tests:
191
210
 
192
211
  ```bash
193
- (.venv) $ nox -s test -- -k invalid_factorial
212
+ (.venv) $ nox -s test -- -k "test_web"
194
213
  ```
195
214
 
196
215
  ## Code Style Checking
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "not-again-ai"
3
- version = "0.13.0"
3
+ version = "0.14.0"
4
4
  description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
5
5
  authors = ["DaveCoDev <dave.co.dev@gmail.com>"]
6
6
  license = "MIT"
@@ -26,26 +26,29 @@ classifiers = [
26
26
  # result in an old version being resolved/locked.
27
27
  python = "^3.11 || ^3.12"
28
28
 
29
- loguru = { version = "==0.7.2" }
29
+ loguru = { version = "^0.7" }
30
+ pydantic = { version = "^2.9"}
31
+
30
32
 
31
33
  # Optional dependencies are defined here, and groupings are defined below.
32
- azure-ai-inference = { version = "==1.0.0b4", optional = true }
33
- azure-identity = { version = "^1.18", optional = true }
34
+ azure-ai-inference = { version = "==1.0.0b5", optional = true }
35
+ azure-identity = { version = "^1.19", optional = true }
34
36
  jinja2 = { version = "^3.1", optional = true }
35
37
  numpy = { version = "^2.1", optional = true }
36
38
  ollama = { version = "^0.3", optional = true }
37
- openai = { version = "^1.51", optional = true }
39
+ openai = { version = "^1.52", optional = true }
38
40
  pandas = { version = "^2.2", optional = true }
39
- pydantic = { version = "^2.9", optional = true }
41
+ pytest-playwright = { version = "^0.5", optional = true }
40
42
  python-liquid = { version = "^1.12", optional = true }
41
43
  scipy = { version = "^1.14", optional = true }
42
44
  scikit-learn = { version = "^1.5", optional = true }
43
45
  seaborn = { version = "^0.13", optional = true }
44
- tiktoken = { version = "^0.7", optional = true }
46
+ tiktoken = { version = "^0.8", optional = true }
45
47
  transformers = { version = "^4.45", optional = true }
46
48
 
47
49
  [tool.poetry.extras]
48
- llm = ["azure-ai-inference", "azure-identity", "openai", "pydantic", "python-liquid", "tiktoken"]
50
+ data = ["pytest-playwright"]
51
+ llm = ["azure-ai-inference", "azure-identity", "openai", "python-liquid", "tiktoken"]
49
52
  local_llm = ["jinja2", "ollama", "transformers"]
50
53
  statistics = ["numpy", "scikit-learn", "scipy"]
51
54
  viz = ["numpy", "pandas", "seaborn"]
@@ -0,0 +1,7 @@
1
+ import importlib.util
2
+
3
+ if importlib.util.find_spec("playwright") is None:
4
+ raise ImportError(
5
+ "not_again_ai.data requires the 'data' extra to be installed. "
6
+ "You can install it using 'pip install not_again_ai[data]'."
7
+ )
@@ -0,0 +1,56 @@
1
+ from loguru import logger
2
+ from playwright.sync_api import Browser, Playwright, sync_playwright
3
+
4
+
5
+ def create_browser(headless: bool = True) -> tuple[Playwright, Browser]:
6
+ """Creates and returns a new Playwright instance and browser.
7
+
8
+ Args:
9
+ headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
10
+
11
+ Returns:
12
+ tuple[Playwright, Browser]: A tuple containing the Playwright instance and browser.
13
+ """
14
+ pwright = sync_playwright().start()
15
+ browser = pwright.chromium.launch(
16
+ headless=headless,
17
+ chromium_sandbox=False,
18
+ timeout=15000,
19
+ )
20
+ return pwright, browser
21
+
22
+
23
+ def get_raw_web_content(url: str, browser: Browser | None = None, headless: bool = True) -> str:
24
+ """Fetches raw web content from a given URL using Playwright.
25
+
26
+ Args:
27
+ url (str): The URL to fetch content from.
28
+ browser (Browser | None, optional): An existing browser instance to use. Defaults to None.
29
+ headless (bool, optional): Whether to run the browser in headless mode. Defaults to True.
30
+
31
+ Returns:
32
+ str: The raw web content.
33
+ """
34
+ p = None
35
+ try:
36
+ if browser is None:
37
+ p, browser = create_browser(headless)
38
+
39
+ page = browser.new_page(
40
+ accept_downloads=False,
41
+ java_script_enabled=True,
42
+ viewport={"width": 1366, "height": 768},
43
+ user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.3",
44
+ )
45
+ page.goto(url)
46
+ content = page.content()
47
+ page.close()
48
+ return content
49
+ except Exception as e:
50
+ logger.error(f"Failed to get web content: {e}")
51
+ return ""
52
+ finally:
53
+ if browser:
54
+ browser.close()
55
+ if p:
56
+ p.stop()
@@ -1,3 +1,4 @@
1
+ from collections.abc import Generator
1
2
  import contextlib
2
3
  import json
3
4
  import time
@@ -32,7 +33,7 @@ def chat_completion(
32
33
  model (str): ID of the model to use. See the model endpoint compatibility table:
33
34
  https://platform.openai.com/docs/models/model-endpoint-compatibility
34
35
  for details on which models work with the Chat API.
35
- client (OpenAI): An instance of the OpenAI or AzureOpenAI client.
36
+ client (OpenAI | AzureOpenAI | Any): An instance of the OpenAI or AzureOpenAI client.
36
37
  If anything else is provided, we assume that it follows the OpenAI spec and call it by passing kwargs directly.
37
38
  For example you can provide something like:
38
39
  ```
@@ -198,3 +199,141 @@ def chat_completion(
198
199
  del response_data["choices"]
199
200
 
200
201
  return response_data
202
+
203
+
204
+ def chat_completion_stream(
205
+ messages: list[dict[str, Any]],
206
+ model: str,
207
+ client: OpenAI | AzureOpenAI | Any,
208
+ tools: list[dict[str, Any]] | None = None,
209
+ tool_choice: str = "auto",
210
+ max_tokens: int | None = None,
211
+ temperature: float = 0.7,
212
+ seed: int | None = None,
213
+ **kwargs: Any,
214
+ ) -> Generator[dict[str, Any], None, None]:
215
+ """Stream a chat completion from the OpenAI API.
216
+
217
+ Args:
218
+ messages (list[dict[str, Any]]): The messages to send to the model.
219
+ model (str): The model to use for the chat completion.
220
+ client (OpenAI | AzureOpenAI | Any): The client to use to send the request.
221
+ If anything else is provided, we assume that it follows the OpenAI spec and call it by passing kwargs directly.
222
+ For example you can provide something like:
223
+ ```
224
+ def custom_client(**kwargs) -> Generator[dict[str, Any], None, None]: # type: ignore
225
+ client = openai_client()
226
+ completion = client.chat.completions.create(**kwargs)
227
+ for chunk in completion:
228
+ yield chunk.to_dict()
229
+ ```
230
+ tools (list[dict[str, Any]], optional):A list of tools the model may call.
231
+ Use this to provide a list of functions the model may generate JSON inputs for. Defaults to None.
232
+ tool_choice (str, optional): The tool choice to use. Can be "auto", "required", "none", or a specific function name.
233
+ Note the function name cannot be any of "auto", "required", or "none". Defaults to "auto".
234
+ max_tokens (int | None): The maximum number of tokens to generate.
235
+ temperature (float): The temperature to use for the chat completion.
236
+ seed (int, optional): If specified, OpenAI will make a best effort to sample deterministically,
237
+ such that repeated requests with the same `seed` and parameters should return the same result.
238
+ Does not currently return `system_fingerprint`.
239
+
240
+ Returns:
241
+ Generator[dict[str, Any], None, None]: A generator of chunks of the chat completion.
242
+ Each chunk is a dictionary with the following keys:
243
+ role (str): The role of the chunk. Can be "assistant", "tool", or "usage".
244
+ content (str): The content of the chunk.
245
+ tool_name (str | None): The name of the tool called by the model.
246
+ tool_call_id (str | None): The ID of the tool call.
247
+ completion_tokens (int | None): The number of tokens used by the model to generate the completion.
248
+ prompt_tokens (int | None): The number of tokens in the messages sent to the model.
249
+ """
250
+
251
+ class ChatCompletionStreamParser:
252
+ def __init__(self) -> None:
253
+ # Remembers if we are currently streaming an assistant message or tool call
254
+ self.last_type: str = ""
255
+ self.last_tool_name: str | None = None
256
+ self.last_tool_call_id: str | None = None
257
+
258
+ def process_chunk(self, chunk: dict[str, Any]) -> dict[str, Any] | None:
259
+ """Convert the current chunk into a more digestible format
260
+ {
261
+ "role": Literal["assistant", "tool", "usage"],
262
+ "content": str,
263
+ "tool_name": str | None,
264
+ "tool_call_id": str | None,
265
+ "completion_tokens": int | None,
266
+ "prompt_tokens": int | None,
267
+ }
268
+ """
269
+ processed_chunk: dict[str, Any] = {}
270
+ if chunk["choices"]:
271
+ choice = chunk["choices"][0]
272
+ # This checks if its just a regular message currently being streamed
273
+ if choice["delta"].get("role", "") and choice["delta"].get("tool_calls", None) is None:
274
+ if choice["delta"]["role"] != self.last_type:
275
+ self.last_type = choice["delta"]["role"]
276
+ processed_chunk["role"] = self.last_type
277
+ if not choice["delta"]["content"]:
278
+ processed_chunk["content"] = ""
279
+ else:
280
+ processed_chunk["content"] = choice["delta"]["content"]
281
+ else:
282
+ processed_chunk["role"] = self.last_type
283
+ elif choice["delta"].get("tool_calls", None):
284
+ # tool_calls will always be present if the model is calling a tool
285
+ tool_call = choice["delta"]["tool_calls"][0]
286
+ if tool_call["function"].get("name"):
287
+ self.last_type = "tool"
288
+ self.last_tool_name = tool_call["function"]["name"]
289
+ self.last_tool_call_id = tool_call["id"]
290
+ processed_chunk["role"] = "tool"
291
+ processed_chunk["content"] = tool_call["function"]["arguments"]
292
+ processed_chunk["tool_name"] = self.last_tool_name
293
+ processed_chunk["tool_call_id"] = self.last_tool_call_id
294
+ elif choice["delta"].get("content", ""):
295
+ # This is the case after the first regular assistant message
296
+ processed_chunk["role"] = self.last_type
297
+ processed_chunk["content"] = choice["delta"]["content"]
298
+ else:
299
+ if chunk.get("usage"):
300
+ processed_chunk["role"] = "usage"
301
+ processed_chunk["completion_tokens"] = chunk["usage"]["completion_tokens"]
302
+ processed_chunk["prompt_tokens"] = chunk["usage"]["prompt_tokens"]
303
+ else:
304
+ return None
305
+ return processed_chunk
306
+
307
+ kwargs.update(
308
+ {
309
+ "messages": messages,
310
+ "model": model,
311
+ "max_tokens": max_tokens,
312
+ "temperature": temperature,
313
+ "stream": True,
314
+ "stream_options": {"include_usage": True},
315
+ }
316
+ )
317
+
318
+ if tools is not None:
319
+ kwargs["tools"] = tools
320
+ if tool_choice not in ["none", "auto", "required"]:
321
+ kwargs["tool_choice"] = {"type": "function", "function": {"name": tool_choice}}
322
+ else:
323
+ kwargs["tool_choice"] = tool_choice
324
+
325
+ if seed is not None:
326
+ kwargs["seed"] = seed
327
+
328
+ if isinstance(client, OpenAI | AzureOpenAI):
329
+ response = client.chat.completions.create(**kwargs)
330
+ else:
331
+ response = client(**kwargs)
332
+
333
+ parser = ChatCompletionStreamParser()
334
+ for chunk in response:
335
+ if isinstance(client, OpenAI | AzureOpenAI):
336
+ chunk = chunk.to_dict()
337
+ processed_chunk = parser.process_chunk(chunk)
338
+ if processed_chunk:
339
+ yield processed_chunk
File without changes