not-again-ai 0.16.0__tar.gz → 0.16.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/PKG-INFO +2 -2
  2. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/pyproject.toml +2 -2
  3. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/chat_completion/providers/ollama_api.py +5 -0
  4. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/chat_completion/providers/openai_api.py +4 -0
  5. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/chat_completion/types.py +5 -0
  6. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/LICENSE +0 -0
  7. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/README.md +0 -0
  8. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/__init__.py +0 -0
  9. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/base/__init__.py +0 -0
  10. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/base/file_system.py +0 -0
  11. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/base/parallel.py +0 -0
  12. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/data/__init__.py +0 -0
  13. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/data/web.py +0 -0
  14. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/__init__.py +0 -0
  15. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/chat_completion/__init__.py +0 -0
  16. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/chat_completion/interface.py +0 -0
  17. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/chat_completion/providers/__init__.py +0 -0
  18. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/embedding/__init__.py +0 -0
  19. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/embedding/interface.py +0 -0
  20. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/embedding/providers/__init__.py +0 -0
  21. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/embedding/providers/ollama_api.py +0 -0
  22. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/embedding/providers/openai_api.py +0 -0
  23. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/embedding/types.py +0 -0
  24. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/prompting/__init__.py +0 -0
  25. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/prompting/compile_prompt.py +0 -0
  26. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/prompting/interface.py +0 -0
  27. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/prompting/providers/__init__.py +0 -0
  28. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/prompting/providers/openai_tiktoken.py +0 -0
  29. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/llm/prompting/types.py +0 -0
  30. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/py.typed +0 -0
  31. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/statistics/__init__.py +0 -0
  32. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/statistics/dependence.py +0 -0
  33. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/viz/__init__.py +0 -0
  34. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/viz/barplots.py +0 -0
  35. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/viz/distributions.py +0 -0
  36. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/viz/scatterplot.py +0 -0
  37. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/viz/time_series.py +0 -0
  38. {not_again_ai-0.16.0 → not_again_ai-0.16.1}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: not-again-ai
3
- Version: 0.16.0
3
+ Version: 0.16.1
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  License: MIT
6
6
  Author: DaveCoDev
@@ -27,7 +27,7 @@ Requires-Dist: numpy (>=2.2) ; extra == "viz"
27
27
  Requires-Dist: ollama (>=0.4) ; extra == "llm"
28
28
  Requires-Dist: openai (>=1) ; extra == "llm"
29
29
  Requires-Dist: pandas (>=2.2) ; extra == "viz"
30
- Requires-Dist: playwright (>=1.49) ; extra == "data"
30
+ Requires-Dist: playwright (>=1.50) ; extra == "data"
31
31
  Requires-Dist: pydantic (>=2.10)
32
32
  Requires-Dist: pytest-playwright (>=0.7) ; extra == "data"
33
33
  Requires-Dist: python-liquid (>=1.12) ; extra == "llm"
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "not-again-ai"
3
- version = "0.16.0"
3
+ version = "0.16.1"
4
4
  description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
5
5
  authors = [
6
6
  { name = "DaveCoDev", email = "dave.co.dev@gmail.com" }
@@ -40,7 +40,7 @@ poetry-plugin-export = ">=1.8"
40
40
 
41
41
  [project.optional-dependencies]
42
42
  data = [
43
- "playwright>=1.49",
43
+ "playwright>=1.50",
44
44
  "pytest-playwright>=0.7"
45
45
  ]
46
46
  llm = [
@@ -28,6 +28,7 @@ OLLAMA_PARAMETER_MAP = {
28
28
  "logit_bias": None,
29
29
  "top_logprobs": None,
30
30
  "presence_penalty": None,
31
+ "max_tokens": "num_predict",
31
32
  }
32
33
 
33
34
 
@@ -45,6 +46,10 @@ def validate(request: ChatCompletionRequest) -> None:
45
46
  logger.warning("Parameter 'stop' needs to be a string and not a list. It will be ignored.")
46
47
  request.stop = None
47
48
 
49
+ # Raise an error if both "max_tokens" and "max_completion_tokens" are provided
50
+ if request.max_tokens is not None and request.max_completion_tokens is not None:
51
+ raise ValueError("`max_tokens` and `max_completion_tokens` cannot both be provided.")
52
+
48
53
 
49
54
  def ollama_chat_completion(
50
55
  request: ChatCompletionRequest,
@@ -31,6 +31,10 @@ def validate(request: ChatCompletionRequest) -> None:
31
31
  if request.json_mode and request.structured_outputs is not None:
32
32
  raise ValueError("json_schema and json_mode cannot be used together.")
33
33
 
34
+ # Raise an error if both "max_tokens" and "max_completion_tokens" are provided
35
+ if request.max_tokens is not None and request.max_completion_tokens is not None:
36
+ raise ValueError("`max_tokens` and `max_completion_tokens` cannot both be provided.")
37
+
34
38
 
35
39
  def openai_chat_completion(
36
40
  request: ChatCompletionRequest,
@@ -118,6 +118,11 @@ class ChatCompletionRequest(BaseModel):
118
118
  top_k: int | None = Field(default=None)
119
119
  min_p: float | None = Field(default=None)
120
120
 
121
+ max_tokens: int | None = Field(
122
+ default=None,
123
+ description="Sometimes `max_completion_tokens` is not correctly supported so we provide this as a fallback.",
124
+ )
125
+
121
126
 
122
127
  class ChatCompletionChoice(BaseModel):
123
128
  message: AssistantMessage
File without changes
File without changes