not-again-ai 0.8.1__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
File without changes
@@ -0,0 +1,59 @@
1
+ from pathlib import Path
2
+ from typing import Any
3
+
4
+ from PIL import Image
5
+
6
+
7
+ def chat_completion_image(
8
+ messages: list[dict[str, str]],
9
+ images: list[Path] | None,
10
+ model_processor: tuple[Any, Any],
11
+ max_tokens: int | None = None,
12
+ temperature: float = 0.7,
13
+ ) -> dict[str, Any]:
14
+ """A wrapper around ision language model inference for multimodal language models from huggingface.
15
+
16
+ Args:
17
+ messages (list[dict[str, str]]): A list of messages to send to the model.
18
+ images (list[Path] | None): A list of image paths to send to the model.
19
+ model_processor (tuple[Any, Any]): A tuple containing the model and processor objects.
20
+ max_tokens (int, optional): The maximum number of tokens to generate. Defaults to None.
21
+ temperature (float, optional): The temperature of the model. Increasing the temperature will make the model answer more creatively. Defaults to 0.7.
22
+
23
+ Returns:
24
+ dict[str, Any]: A dictionary with the following keys
25
+ message (str): The content of the generated assistant message.
26
+ completion_tokens (int): The number of tokens used by the model to generate the completion.
27
+ """
28
+
29
+ model, processor = model_processor
30
+
31
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
32
+
33
+ if images:
34
+ image_objects = [Image.open(image) for image in images]
35
+ inputs = processor(prompt, image_objects, return_tensors="pt").to("cuda:0")
36
+ else:
37
+ inputs = processor(prompt, return_tensors="pt").to("cuda:0")
38
+
39
+ generation_args = {
40
+ "max_new_tokens": max_tokens,
41
+ "temperature": temperature,
42
+ "num_beams": 1,
43
+ "do_sample": True,
44
+ }
45
+
46
+ generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
47
+
48
+ # Remove input tokens
49
+ generate_ids = generate_ids[:, inputs["input_ids"].shape[1] :]
50
+
51
+ # Get the number of generated tokens
52
+ completion_tokens = generate_ids.shape[1]
53
+
54
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
55
+
56
+ response_data: dict[str, Any] = {}
57
+ response_data["message"] = response[0]
58
+ response_data["completion_tokens"] = completion_tokens
59
+ return response_data
@@ -0,0 +1,23 @@
1
+ from typing import Any
2
+
3
+ from transformers import AutoModelForCausalLM, AutoProcessor
4
+
5
+
6
+ def load_model(model_id: str, device_map: str = "cuda", trust_remote_code: bool = True) -> Any:
7
+ """Load a model from Hugging Face."""
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ device_map=device_map,
11
+ trust_remote_code=trust_remote_code,
12
+ torch_dtype="auto",
13
+ )
14
+ return model
15
+
16
+
17
+ def load_processor(model_id: str, trust_remote_code: bool = True) -> Any:
18
+ """Load a processor from Hugging Face. This is typically used for multimodal language models."""
19
+ processor = AutoProcessor.from_pretrained(
20
+ model_id,
21
+ trust_remote_code=trust_remote_code,
22
+ )
23
+ return processor
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.8.1
3
+ Version: 0.9.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
7
7
  Author: DaveCoDev
8
8
  Author-email: dave.co.dev@gmail.com
9
- Requires-Python: >=3.11,<3.13
9
+ Requires-Python: >=3.11,<4.0
10
10
  Classifier: Development Status :: 3 - Alpha
11
11
  Classifier: Intended Audience :: Developers
12
12
  Classifier: Intended Audience :: Science/Research
@@ -18,17 +18,19 @@ Classifier: Programming Language :: Python :: 3.11
18
18
  Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Typing :: Typed
20
20
  Provides-Extra: llm
21
+ Provides-Extra: local-llm
21
22
  Provides-Extra: statistics
22
23
  Provides-Extra: viz
23
- Requires-Dist: numpy (>=1.26.4,<2.0.0) ; extra == "statistics" or extra == "viz"
24
- Requires-Dist: ollama (>=0.2.0,<0.3.0) ; extra == "llm"
25
- Requires-Dist: openai (>=1.29.0,<2.0.0) ; extra == "llm"
26
- Requires-Dist: pandas (>=2.2.2,<3.0.0) ; extra == "viz"
27
- Requires-Dist: python-liquid (>=1.12.1,<2.0.0) ; extra == "llm"
28
- Requires-Dist: scikit-learn (>=1.4.2,<2.0.0) ; extra == "statistics"
29
- Requires-Dist: scipy (>=1.13.0,<2.0.0) ; extra == "statistics"
30
- Requires-Dist: seaborn (>=0.13.2,<0.14.0) ; extra == "viz"
31
- Requires-Dist: tiktoken (>=0.7.0,<0.8.0) ; extra == "llm"
24
+ Requires-Dist: numpy (==1.26.4) ; extra == "statistics" or extra == "viz"
25
+ Requires-Dist: ollama (==0.2.0) ; extra == "llm"
26
+ Requires-Dist: openai (==1.30.1) ; extra == "llm"
27
+ Requires-Dist: pandas (==2.2.2) ; extra == "viz"
28
+ Requires-Dist: python-liquid (==1.12.1) ; extra == "llm"
29
+ Requires-Dist: scikit-learn (==1.4.2) ; extra == "statistics"
30
+ Requires-Dist: scipy (==1.13.0) ; extra == "statistics"
31
+ Requires-Dist: seaborn (==0.13.2) ; extra == "viz"
32
+ Requires-Dist: tiktoken (==0.7.0) ; extra == "llm"
33
+ Requires-Dist: transformers (==4.41.1) ; extra == "local-llm"
32
34
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
33
35
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
34
36
  Description-Content-Type: text/markdown
@@ -59,9 +61,11 @@ Requires: Python 3.11, or 3.12
59
61
  Install the entire package from [PyPI](https://pypi.org/project/not-again-ai/) with:
60
62
 
61
63
  ```bash
62
- $ pip install not_again_ai[llm,statistics,viz]
64
+ $ pip install not_again_ai[llm,local_llm,statistics,viz]
63
65
  ```
64
66
 
67
+ Note that local LLM requires separate installations and will not work out of the box due to how hardware dependent it is. Be sure to check the [notebooks](notebooks/local_llm/) for more details.
68
+
65
69
  The package is split into subpackages, so you can install only the parts you need. See the **[notebooks](notebooks)** for examples.
66
70
  * **Base only**: `pip install not_again_ai`
67
71
  * **LLM**: `pip install not_again_ai[llm]`
@@ -77,7 +81,9 @@ The package is split into subpackages, so you can install only the parts you nee
77
81
  ...
78
82
  Environment="OLLAMA_HOST=0.0.0.0"
79
83
  ```
80
- Now ollama will be available at `http://<local_address>:11434`
84
+ 1. Ollama will be available at `http://<local_address>:11434`
85
+ * **Local LLM**: `pip install not_again_ai[llm_local]`
86
+ - Most of this package is hardware dependent so this only installs some generic dependencies. Be sure to check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
81
87
  * **Statistics**: `pip install not_again_ai[statistics]`
82
88
  * **Visualization**: `pip install not_again_ai[viz]`
83
89
 
@@ -15,6 +15,10 @@ not_again_ai/llm/openai_api/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsK
15
15
  not_again_ai/llm/openai_api/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
16
16
  not_again_ai/llm/openai_api/prompts.py,sha256=7cDfvIKCTYM0t5lK34FLLqYf-SR_cynDXIXw3zWDizA,7094
17
17
  not_again_ai/llm/openai_api/tokens.py,sha256=pshGOSYAKvDAe2vnkib_vwENT5on8xQznC8ErLvciK4,4453
18
+ not_again_ai/local_llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ not_again_ai/local_llm/huggingface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ not_again_ai/local_llm/huggingface/chat_completion.py,sha256=Y6uMbxLG8TaMVi3hJGrMl_G9Y1N_0dld5Kv1iqYnoao,2300
21
+ not_again_ai/local_llm/huggingface/helpers.py,sha256=YPr8KbQ8Ac_Mn_nBcrFuL3bCl-IuDCdaRvYVCocy8Gk,734
18
22
  not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
19
23
  not_again_ai/statistics/__init__.py,sha256=gA8r9JQFbFSN0ykrHy4G1IQgcky4f2eM5Oo24oVI5Ik,466
20
24
  not_again_ai/statistics/dependence.py,sha256=yZDk_e3ng96mp4hu8dDtQ0-uIn6KdSuGRS9uyM0O3x0,4429
@@ -24,8 +28,8 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
24
28
  not_again_ai/viz/scatterplot.py,sha256=eBtIf0Tf_1EcN-akRNJgvwLU0zpRx1zOl0VF9QTnbZA,2290
25
29
  not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
26
30
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
27
- not_again_ai-0.8.1.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
28
- not_again_ai-0.8.1.dist-info/METADATA,sha256=XjBBZSVDW8tdwDy8hJY6R-xXo9Mec2i6N-5YFs0eRW8,14203
29
- not_again_ai-0.8.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
30
- not_again_ai-0.8.1.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
31
- not_again_ai-0.8.1.dist-info/RECORD,,
31
+ not_again_ai-0.9.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
32
+ not_again_ai-0.9.0.dist-info/METADATA,sha256=21ize_dY91Bt2ZQEIqLBYHP2WVTaQ_aUiXWgAiJLWOw,14700
33
+ not_again_ai-0.9.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
34
+ not_again_ai-0.9.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
35
+ not_again_ai-0.9.0.dist-info/RECORD,,