not-again-ai 0.8.2__tar.gz → 0.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/PKG-INFO +9 -3
  2. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/README.md +6 -2
  3. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/pyproject.toml +4 -1
  4. not_again_ai-0.9.0/src/not_again_ai/local_llm/__init__.py +0 -0
  5. not_again_ai-0.9.0/src/not_again_ai/local_llm/huggingface/__init__.py +0 -0
  6. not_again_ai-0.9.0/src/not_again_ai/local_llm/huggingface/chat_completion.py +59 -0
  7. not_again_ai-0.9.0/src/not_again_ai/local_llm/huggingface/helpers.py +23 -0
  8. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/LICENSE +0 -0
  9. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/__init__.py +0 -0
  10. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/base/__init__.py +0 -0
  11. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/base/file_system.py +0 -0
  12. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/base/parallel.py +0 -0
  13. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/__init__.py +0 -0
  14. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/chat_completion.py +0 -0
  15. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/ollama/__init__.py +0 -0
  16. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/ollama/chat_completion.py +0 -0
  17. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/ollama/ollama_client.py +0 -0
  18. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/ollama/service.py +0 -0
  19. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/__init__.py +0 -0
  20. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/chat_completion.py +0 -0
  21. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/context_management.py +0 -0
  22. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/embeddings.py +0 -0
  23. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/openai_client.py +0 -0
  24. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/prompts.py +0 -0
  25. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/llm/openai_api/tokens.py +0 -0
  26. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/py.typed +0 -0
  27. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/statistics/__init__.py +0 -0
  28. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/statistics/dependence.py +0 -0
  29. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/viz/__init__.py +0 -0
  30. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/viz/barplots.py +0 -0
  31. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/viz/distributions.py +0 -0
  32. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/viz/scatterplot.py +0 -0
  33. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/viz/time_series.py +0 -0
  34. {not_again_ai-0.8.2 → not_again_ai-0.9.0}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.8.2
3
+ Version: 0.9.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -18,6 +18,7 @@ Classifier: Programming Language :: Python :: 3.11
18
18
  Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Typing :: Typed
20
20
  Provides-Extra: llm
21
+ Provides-Extra: local-llm
21
22
  Provides-Extra: statistics
22
23
  Provides-Extra: viz
23
24
  Requires-Dist: numpy (==1.26.4) ; extra == "statistics" or extra == "viz"
@@ -29,6 +30,7 @@ Requires-Dist: scikit-learn (==1.4.2) ; extra == "statistics"
29
30
  Requires-Dist: scipy (==1.13.0) ; extra == "statistics"
30
31
  Requires-Dist: seaborn (==0.13.2) ; extra == "viz"
31
32
  Requires-Dist: tiktoken (==0.7.0) ; extra == "llm"
33
+ Requires-Dist: transformers (==4.41.1) ; extra == "local-llm"
32
34
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
33
35
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
34
36
  Description-Content-Type: text/markdown
@@ -59,9 +61,11 @@ Requires: Python 3.11, or 3.12
59
61
  Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
60
62
 
61
63
  ```bash
62
- $ pip install not_again_ai[llm,statistics,viz]
64
+ $ pip install not_again_ai[llm,local_llm,statistics,viz]
63
65
  ```
64
66
 
67
+ Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
68
+
65
69
  The package is split into subpackages, so you can install only the parts you need. See the **[notebooks](notebooks)** for examples.
66
70
  * **Base only**: `pip install not_again_ai`
67
71
  * **LLM**: `pip install not_again_ai[llm]`
@@ -77,7 +81,9 @@ The package is split into subpackages, so you can install only the parts you nee
77
81
  ...
78
82
  Environment="OLLAMA_HOST=0.0.0.0"
79
83
  ```
80
- Now ollama will be available at `http://<local_address>:11434`
84
+ 1. Ollama will be available at `http://<local_address>:11434`
85
+ * **Local LLM**: `pip install not_again_ai[llm_local]`
86
+ - Most of this package is hardware dependent so this only installs some generic dependencies. Be sure to check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
81
87
  * **Statistics**: `pip install not_again_ai[statistics]`
82
88
  * **Visualization**: `pip install not_again_ai[viz]`
83
89
 
@@ -24,9 +24,11 @@ Requires: Python 3.11, or 3.12
24
24
  Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
25
25
 
26
26
  ```bash
27
- $ pip install not_again_ai[llm,statistics,viz]
27
+ $ pip install not_again_ai[llm,local_llm,statistics,viz]
28
28
  ```
29
29
 
30
+ Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
31
+
30
32
  The package is split into subpackages, so you can install only the parts you need. See the **[notebooks](notebooks)** for examples.
31
33
  * **Base only**: `pip install not_again_ai`
32
34
  * **LLM**: `pip install not_again_ai[llm]`
@@ -42,7 +44,9 @@ The package is split into subpackages, so you can install only the parts you nee
42
44
  ...
43
45
  Environment="OLLAMA_HOST=0.0.0.0"
44
46
  ```
45
- Now ollama will be available at `http://<local_address>:11434`
47
+ 1. Ollama will be available at `http://<local_address>:11434`
48
+ * **Local LLM**: `pip install not_again_ai[llm_local]`
49
+ - Most of this package is hardware dependent so this only installs some generic dependencies. Be sure to check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
46
50
  * **Statistics**: `pip install not_again_ai[statistics]`
47
51
  * **Visualization**: `pip install not_again_ai[viz]`
48
52
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "not-again-ai"
3
- version = "0.8.2"
3
+ version = "0.9.0"
4
4
  description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
5
5
  authors = ["DaveCoDev <dave.co.dev@gmail.com>"]
6
6
  license = "MIT"
@@ -36,14 +36,17 @@ scipy = { version = "==1.13.0", optional = true }
36
36
  scikit-learn = { version = "==1.4.2", optional = true }
37
37
  seaborn = { version = "==0.13.2", optional = true }
38
38
  tiktoken = { version = "==0.7.0", optional = true }
39
+ transformers = { version = "==4.41.1", optional = true }
39
40
 
40
41
  [tool.poetry.extras]
41
42
  llm = ["ollama", "openai", "python-liquid", "tiktoken"]
43
+ local_llm = ["transformers"]
42
44
  statistics = ["numpy", "scikit-learn", "scipy"]
43
45
  viz = ["numpy", "pandas", "seaborn"]
44
46
 
45
47
  [tool.poetry.dev-dependencies]
46
48
  ipykernel = "*"
49
+ ipywidgets = "*"
47
50
 
48
51
  [tool.poetry.group.nox.dependencies]
49
52
  nox-poetry = "*"
@@ -0,0 +1,59 @@
1
+ from pathlib import Path
2
+ from typing import Any
3
+
4
+ from PIL import Image
5
+
6
+
7
+ def chat_completion_image(
8
+ messages: list[dict[str, str]],
9
+ images: list[Path] | None,
10
+ model_processor: tuple[Any, Any],
11
+ max_tokens: int | None = None,
12
+ temperature: float = 0.7,
13
+ ) -> dict[str, Any]:
14
+ """A wrapper around ision language model inference for multimodal language models from huggingface.
15
+
16
+ Args:
17
+ messages (list[dict[str, str]]): A list of messages to send to the model.
18
+ images (list[Path] | None): A list of image paths to send to the model.
19
+ model_processor (tuple[Any, Any]): A tuple containing the model and processor objects.
20
+ max_tokens (int, optional): The maximum number of tokens to generate. Defaults to None.
21
+ temperature (float, optional): The temperature of the model. Increasing the temperature will make the model answer more creatively. Defaults to 0.7.
22
+
23
+ Returns:
24
+ dict[str, Any]: A dictionary with the following keys
25
+ message (str): The content of the generated assistant message.
26
+ completion_tokens (int): The number of tokens used by the model to generate the completion.
27
+ """
28
+
29
+ model, processor = model_processor
30
+
31
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
32
+
33
+ if images:
34
+ image_objects = [Image.open(image) for image in images]
35
+ inputs = processor(prompt, image_objects, return_tensors="pt").to("cuda:0")
36
+ else:
37
+ inputs = processor(prompt, return_tensors="pt").to("cuda:0")
38
+
39
+ generation_args = {
40
+ "max_new_tokens": max_tokens,
41
+ "temperature": temperature,
42
+ "num_beams": 1,
43
+ "do_sample": True,
44
+ }
45
+
46
+ generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
47
+
48
+ # Remove input tokens
49
+ generate_ids = generate_ids[:, inputs["input_ids"].shape[1] :]
50
+
51
+ # Get the number of generated tokens
52
+ completion_tokens = generate_ids.shape[1]
53
+
54
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
55
+
56
+ response_data: dict[str, Any] = {}
57
+ response_data["message"] = response[0]
58
+ response_data["completion_tokens"] = completion_tokens
59
+ return response_data
@@ -0,0 +1,23 @@
1
+ from typing import Any
2
+
3
+ from transformers import AutoModelForCausalLM, AutoProcessor
4
+
5
+
6
+ def load_model(model_id: str, device_map: str = "cuda", trust_remote_code: bool = True) -> Any:
7
+ """Load a model from Hugging Face."""
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ device_map=device_map,
11
+ trust_remote_code=trust_remote_code,
12
+ torch_dtype="auto",
13
+ )
14
+ return model
15
+
16
+
17
+ def load_processor(model_id: str, trust_remote_code: bool = True) -> Any:
18
+ """Load a processor from Hugging Face. This is typically used for multimodal language models."""
19
+ processor = AutoProcessor.from_pretrained(
20
+ model_id,
21
+ trust_remote_code=trust_remote_code,
22
+ )
23
+ return processor
File without changes