not-again-ai 0.10.0__tar.gz → 0.10.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/PKG-INFO +10 -2
  2. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/README.md +9 -1
  3. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/pyproject.toml +4 -1
  4. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/chat_completion.py +1 -1
  5. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/context_management.py +1 -1
  6. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/__init__.py +4 -0
  7. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/ollama/chat_completion.py +1 -1
  8. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/ollama/model_mapping.py +1 -0
  9. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/ollama/tokens.py +1 -7
  10. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/LICENSE +0 -0
  11. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/__init__.py +0 -0
  12. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/base/__init__.py +0 -0
  13. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/base/file_system.py +0 -0
  14. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/base/parallel.py +0 -0
  15. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/__init__.py +0 -0
  16. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/__init__.py +0 -0
  17. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/embeddings.py +0 -0
  18. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/openai_client.py +0 -0
  19. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/prompts.py +0 -0
  20. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/llm/openai_api/tokens.py +0 -0
  21. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/chat_completion.py +0 -0
  22. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/huggingface/__init__.py +0 -0
  23. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/huggingface/chat_completion.py +0 -0
  24. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/huggingface/helpers.py +0 -0
  25. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/ollama/__init__.py +0 -0
  26. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/ollama/ollama_client.py +0 -0
  27. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/ollama/service.py +0 -0
  28. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/prompts.py +0 -0
  29. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/local_llm/tokens.py +0 -0
  30. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/py.typed +0 -0
  31. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/statistics/__init__.py +0 -0
  32. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/statistics/dependence.py +0 -0
  33. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/viz/__init__.py +0 -0
  34. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/viz/barplots.py +0 -0
  35. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/viz/distributions.py +0 -0
  36. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/viz/scatterplot.py +0 -0
  37. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/viz/time_series.py +0 -0
  38. {not_again_ai-0.10.0 → not_again_ai-0.10.1}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.10.0
3
+ Version: 0.10.1
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -74,7 +74,7 @@ The package is split into subpackages, so you can install only the parts you nee
74
74
  1. If you wish to use OpenAI
75
75
  1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
76
76
  1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
77
- * **Local LLM**: `pip install not_again_ai[llm,llm_local]`
77
+ * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
78
78
  1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
79
79
  1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
80
80
  1. If you wish to use Ollama:
@@ -292,6 +292,14 @@ provides further examples on how to do this.
292
292
  Mypy looks for the existence of a file named [`py.typed`](./src/not-again-ai/py.typed) in the root of the
293
293
  installed package to indicate that inline type annotations should be checked.
294
294
 
295
+ ## Typos
296
+
297
+ Checks for typos using [typos](https://github.com/crate-ci/typos)
298
+
299
+ ```bash
300
+ (.venv) $ nox -s typos
301
+ ```
302
+
295
303
  ## Continuous Integration
296
304
 
297
305
  Continuous integration is provided by [GitHub Actions](https://github.com/features/actions). This
@@ -35,7 +35,7 @@ The package is split into subpackages, so you can install only the parts you nee
35
35
  1. If you wish to use OpenAI
36
36
  1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
37
37
  1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
38
- * **Local LLM**: `pip install not_again_ai[llm,llm_local]`
38
+ * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
39
39
  1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
40
40
  1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
41
41
  1. If you wish to use Ollama:
@@ -253,6 +253,14 @@ provides further examples on how to do this.
253
253
  Mypy looks for the existence of a file named [`py.typed`](./src/not-again-ai/py.typed) in the root of the
254
254
  installed package to indicate that inline type annotations should be checked.
255
255
 
256
+ ## Typos
257
+
258
+ Checks for typos using [typos](https://github.com/crate-ci/typos)
259
+
260
+ ```bash
261
+ (.venv) $ nox -s typos
262
+ ```
263
+
256
264
  ## Continuous Integration
257
265
 
258
266
  Continuous integration is provided by [GitHub Actions](https://github.com/features/actions). This
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "not-again-ai"
3
- version = "0.10.0"
3
+ version = "0.10.1"
4
4
  description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
5
5
  authors = ["DaveCoDev <dave.co.dev@gmail.com>"]
6
6
  license = "MIT"
@@ -78,6 +78,9 @@ mkdocstrings = { version = "*", extras = ["python"] }
78
78
  mkdocs-gen-files = "*"
79
79
  mkdocs-literate-nav = "*"
80
80
 
81
+ [tool.poetry.group.typos.dependencies]
82
+ typos = "*"
83
+
81
84
  [tool.poetry.scripts]
82
85
  not-again-ai = "not_again_ai.cli:entry_point"
83
86
 
@@ -165,7 +165,7 @@ def chat_completion(
165
165
  if seed is not None and response.system_fingerprint is not None:
166
166
  response_data["system_fingerprint"] = response.system_fingerprint
167
167
 
168
- response_data["response_duration"] = response_duration
168
+ response_data["response_duration"] = round(response_duration, 4)
169
169
 
170
170
  if len(response_data["choices"]) == 1:
171
171
  response_data.update(response_data["choices"][0])
@@ -33,7 +33,7 @@ def priority_truncation(
33
33
  Args:
34
34
  messages_unformatted: A list of dictionaries where each dictionary
35
35
  represents a message. Each message must have 'role' and 'content'
36
- keys with string values, where content is a string with any number of occurances of {{variable_name}}.
36
+ keys with string values, where content is a string with any number of occurrences of {{variable_name}}.
37
37
  variables: A dictionary where each key-value pair represents a variable name and its value to inject.
38
38
  priority: A list of variable names in their order of priority.
39
39
  token_limit: The maximum number of tokens allowed in the messages.
@@ -21,3 +21,7 @@ else:
21
21
  import openai # noqa: F401
22
22
  import tiktoken # noqa: F401
23
23
  import transformers # noqa: F401
24
+ from transformers.utils import logging
25
+
26
+ logging.disable_progress_bar()
27
+ logging.set_verbosity_error()
@@ -95,6 +95,6 @@ def chat_completion(
95
95
  response_data["completion_tokens"] = num_tokens_in_string(str(response_data["message"]), tokenizer)
96
96
 
97
97
  # Get the latency of the response
98
- response_data["response_duration"] = response_duration
98
+ response_data["response_duration"] = round(response_duration, 4)
99
99
 
100
100
  return response_data
@@ -12,4 +12,5 @@ OLLAMA_MODEL_MAPPING = {
12
12
  "granite-code": "ibm-granite/granite-34b-code-instruct",
13
13
  "llama3-gradient": "nvidia/Llama3-ChatQA-1.5-8B",
14
14
  "command-r": "CohereForAI/c4ai-command-r-v01",
15
+ "deepseek-coder-v2": "deepseek-ai/DeepSeek-Coder-V2-Lite-Base",
15
16
  }
@@ -1,19 +1,13 @@
1
1
  """By default use the associated huggingface transformer tokenizer.
2
2
  If it does not exist in the mapping, default to tiktoken with some buffer (const + percentage)"""
3
3
 
4
- import os
5
-
6
4
  from loguru import logger
7
5
  import tiktoken
6
+ from transformers import AutoTokenizer
8
7
 
9
8
  from not_again_ai.llm.openai_api.tokens import num_tokens_from_messages as openai_num_tokens_from_messages
10
9
  from not_again_ai.local_llm.ollama.model_mapping import OLLAMA_MODEL_MAPPING
11
10
 
12
- # Prevents the transformers library from printing advisories that are not relevant to this code like not having torch installed.
13
- os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
14
-
15
- from transformers import AutoTokenizer # noqa: E402
16
-
17
11
  TIKTOKEN_NUM_TOKENS_BUFFER = 10
18
12
  TIKTOKEN_PERCENT_TOKENS_BUFFER = 1.1
19
13
 
File without changes