not-again-ai 0.7.0__tar.gz → 0.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/PKG-INFO +1 -1
  2. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/pyproject.toml +1 -1
  3. not_again_ai-0.8.0/src/not_again_ai/llm/chat_completion.py +76 -0
  4. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/context_management.py +1 -1
  5. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/LICENSE +0 -0
  6. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/README.md +0 -0
  7. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/__init__.py +0 -0
  8. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/base/__init__.py +0 -0
  9. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/base/file_system.py +0 -0
  10. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/base/parallel.py +0 -0
  11. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/llm/__init__.py +0 -0
  12. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/llm/ollama/__init__.py +0 -0
  13. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/llm/ollama/chat_completion.py +0 -0
  14. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/llm/ollama/ollama_client.py +0 -0
  15. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/llm/ollama/service.py +0 -0
  16. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/__init__.py +0 -0
  17. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/chat_completion.py +0 -0
  18. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/embeddings.py +0 -0
  19. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/openai_client.py +0 -0
  20. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/prompts.py +0 -0
  21. {not_again_ai-0.7.0/src/not_again_ai/llm/openai → not_again_ai-0.8.0/src/not_again_ai/llm/openai_api}/tokens.py +0 -0
  22. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/py.typed +0 -0
  23. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/statistics/__init__.py +0 -0
  24. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/statistics/dependence.py +0 -0
  25. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/viz/__init__.py +0 -0
  26. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/viz/barplots.py +0 -0
  27. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/viz/distributions.py +0 -0
  28. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/viz/scatterplot.py +0 -0
  29. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/viz/time_series.py +0 -0
  30. {not_again_ai-0.7.0 → not_again_ai-0.8.0}/src/not_again_ai/viz/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.7.0
3
+ Version: 0.8.0
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "not-again-ai"
3
- version = "0.7.0"
3
+ version = "0.8.0"
4
4
  description = "Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place."
5
5
  authors = ["DaveCoDev <dave.co.dev@gmail.com>"]
6
6
  license = "MIT"
@@ -0,0 +1,76 @@
1
+ from typing import Any
2
+
3
+ from ollama import Client
4
+ from openai import OpenAI
5
+
6
+ from not_again_ai.llm.ollama import chat_completion as chat_completion_ollama
7
+ from not_again_ai.llm.openai_api import chat_completion as chat_completion_openai
8
+
9
+
10
+ def chat_completion(
11
+ messages: list[dict[str, Any]],
12
+ model: str,
13
+ client: OpenAI | Client,
14
+ max_tokens: int | None = None,
15
+ temperature: float = 0.7,
16
+ json_mode: bool = False,
17
+ seed: int | None = None,
18
+ **kwargs: Any,
19
+ ) -> dict[str, Any]:
20
+ """Creates a common wrapper around chat completion models from different providers.
21
+ Currently supports the OpenAI API and Ollama local models.
22
+ All input parameters are supported by all providers in similar ways and the output is standardized.
23
+
24
+ Args:
25
+ messages (list[dict[str, Any]]): A list of messages to send to the model.
26
+ model (str): The model name to use.
27
+ client (OpenAI | Client): The client object to use for chat completion.
28
+ max_tokens (int, optional): The maximum number of tokens to generate.
29
+ temperature (float, optional): The temperature of the model. Increasing the temperature will make the model answer more creatively.
30
+ json_mode (bool, optional): This will structure the response as a valid JSON object.
31
+ seed (int, optional): The seed to use for the model for reproducible outputs.
32
+
33
+ Returns:
34
+ dict[str, Any]: A dictionary with the following keys
35
+ message (str | dict): The content of the generated assistant message.
36
+ If json_mode is True, this will be a dictionary.
37
+ completion_tokens (int): The number of tokens used by the model to generate the completion.
38
+ extras (dict): This will contain any additional fields returned by corresponding provider.
39
+ """
40
+ # Determine which chat_completion function to call based on the client type
41
+ if isinstance(client, OpenAI):
42
+ response = chat_completion_openai.chat_completion(
43
+ messages=messages,
44
+ model=model,
45
+ client=client,
46
+ max_tokens=max_tokens,
47
+ temperature=temperature,
48
+ json_mode=json_mode,
49
+ seed=seed,
50
+ **kwargs,
51
+ )
52
+ elif isinstance(client, Client):
53
+ response = chat_completion_ollama.chat_completion(
54
+ messages=messages,
55
+ model=model,
56
+ client=client,
57
+ max_tokens=max_tokens,
58
+ temperature=temperature,
59
+ json_mode=json_mode,
60
+ seed=seed,
61
+ **kwargs,
62
+ )
63
+ else:
64
+ raise ValueError("Invalid client type")
65
+
66
+ # Parse the responses to be consistent
67
+ response_data = {}
68
+ response_data["message"] = response.get("message", None)
69
+ response_data["completion_tokens"] = response.get("completion_tokens", None)
70
+
71
+ # Return any additional fields from the response in an "extras" dictionary
72
+ extras = {k: v for k, v in response.items() if k not in response_data}
73
+ if extras:
74
+ response_data["extras"] = extras
75
+
76
+ return response_data
@@ -1,6 +1,6 @@
1
1
  import copy
2
2
 
3
- from not_again_ai.llm.openai.tokens import num_tokens_from_messages, truncate_str
3
+ from not_again_ai.llm.openai_api.tokens import num_tokens_from_messages, truncate_str
4
4
 
5
5
 
6
6
  def _inject_variable(
File without changes
File without changes