not-again-ai 0.7.0__py3-none-any.whl → 0.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,76 @@
1
+ from typing import Any
2
+
3
+ from ollama import Client
4
+ from openai import OpenAI
5
+
6
+ from not_again_ai.llm.ollama import chat_completion as chat_completion_ollama
7
+ from not_again_ai.llm.openai_api import chat_completion as chat_completion_openai
8
+
9
+
10
+ def chat_completion(
11
+ messages: list[dict[str, Any]],
12
+ model: str,
13
+ client: OpenAI | Client,
14
+ max_tokens: int | None = None,
15
+ temperature: float = 0.7,
16
+ json_mode: bool = False,
17
+ seed: int | None = None,
18
+ **kwargs: Any,
19
+ ) -> dict[str, Any]:
20
+ """Creates a common wrapper around chat completion models from different providers.
21
+ Currently supports the OpenAI API and Ollama local models.
22
+ All input parameters are supported by all providers in similar ways and the output is standardized.
23
+
24
+ Args:
25
+ messages (list[dict[str, Any]]): A list of messages to send to the model.
26
+ model (str): The model name to use.
27
+ client (OpenAI | Client): The client object to use for chat completion.
28
+ max_tokens (int, optional): The maximum number of tokens to generate.
29
+ temperature (float, optional): The temperature of the model. Increasing the temperature will make the model answer more creatively.
30
+ json_mode (bool, optional): This will structure the response as a valid JSON object.
31
+ seed (int, optional): The seed to use for the model for reproducible outputs.
32
+
33
+ Returns:
34
+ dict[str, Any]: A dictionary with the following keys
35
+ message (str | dict): The content of the generated assistant message.
36
+ If json_mode is True, this will be a dictionary.
37
+ completion_tokens (int): The number of tokens used by the model to generate the completion.
38
+ extras (dict): This will contain any additional fields returned by corresponding provider.
39
+ """
40
+ # Determine which chat_completion function to call based on the client type
41
+ if isinstance(client, OpenAI):
42
+ response = chat_completion_openai.chat_completion(
43
+ messages=messages,
44
+ model=model,
45
+ client=client,
46
+ max_tokens=max_tokens,
47
+ temperature=temperature,
48
+ json_mode=json_mode,
49
+ seed=seed,
50
+ **kwargs,
51
+ )
52
+ elif isinstance(client, Client):
53
+ response = chat_completion_ollama.chat_completion(
54
+ messages=messages,
55
+ model=model,
56
+ client=client,
57
+ max_tokens=max_tokens,
58
+ temperature=temperature,
59
+ json_mode=json_mode,
60
+ seed=seed,
61
+ **kwargs,
62
+ )
63
+ else:
64
+ raise ValueError("Invalid client type")
65
+
66
+ # Parse the responses to be consistent
67
+ response_data = {}
68
+ response_data["message"] = response.get("message", None)
69
+ response_data["completion_tokens"] = response.get("completion_tokens", None)
70
+
71
+ # Return any additional fields from the response in an "extras" dictionary
72
+ extras = {k: v for k, v in response.items() if k not in response_data}
73
+ if extras:
74
+ response_data["extras"] = extras
75
+
76
+ return response_data
@@ -1,6 +1,6 @@
1
1
  import copy
2
2
 
3
- from not_again_ai.llm.openai.tokens import num_tokens_from_messages, truncate_str
3
+ from not_again_ai.llm.openai_api.tokens import num_tokens_from_messages, truncate_str
4
4
 
5
5
 
6
6
  def _inject_variable(
@@ -82,6 +82,8 @@ def num_tokens_from_messages(messages: list[dict[str, str]], model: str = "gpt-3
82
82
  "gpt-4-0125-preview",
83
83
  "gpt-4-turbo",
84
84
  "gpt-4-turbo-2024-04-09",
85
+ "gpt-4o",
86
+ "gpt-4o-2024-05-13",
85
87
  }:
86
88
  tokens_per_message = 3 # every message follows <|start|>{role/name}\n{content}<|end|>\n
87
89
  tokens_per_name = 1 # if there's a name, the role is omitted
@@ -91,6 +93,8 @@ def num_tokens_from_messages(messages: list[dict[str, str]], model: str = "gpt-3
91
93
  # Approximate catch-all. Assumes future versions of 3.5 and 4 will have the same token counts as the 0613 versions.
92
94
  elif "gpt-3.5-turbo" in model:
93
95
  return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
96
+ elif "gpt-4o" in model:
97
+ return num_tokens_from_messages(messages, model="gpt-4o-2024-05-13")
94
98
  elif "gpt-4" in model:
95
99
  return num_tokens_from_messages(messages, model="gpt-4-0613")
96
100
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.7.0
3
+ Version: 0.8.1
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -21,14 +21,14 @@ Provides-Extra: llm
21
21
  Provides-Extra: statistics
22
22
  Provides-Extra: viz
23
23
  Requires-Dist: numpy (>=1.26.4,<2.0.0) ; extra == "statistics" or extra == "viz"
24
- Requires-Dist: ollama (>=0.1.9,<0.2.0) ; extra == "llm"
25
- Requires-Dist: openai (>=1.25.1,<2.0.0) ; extra == "llm"
24
+ Requires-Dist: ollama (>=0.2.0,<0.3.0) ; extra == "llm"
25
+ Requires-Dist: openai (>=1.29.0,<2.0.0) ; extra == "llm"
26
26
  Requires-Dist: pandas (>=2.2.2,<3.0.0) ; extra == "viz"
27
27
  Requires-Dist: python-liquid (>=1.12.1,<2.0.0) ; extra == "llm"
28
28
  Requires-Dist: scikit-learn (>=1.4.2,<2.0.0) ; extra == "statistics"
29
29
  Requires-Dist: scipy (>=1.13.0,<2.0.0) ; extra == "statistics"
30
30
  Requires-Dist: seaborn (>=0.13.2,<0.14.0) ; extra == "viz"
31
- Requires-Dist: tiktoken (>=0.6.0,<0.7.0) ; extra == "llm"
31
+ Requires-Dist: tiktoken (>=0.7.0,<0.8.0) ; extra == "llm"
32
32
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
33
33
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
34
34
  Description-Content-Type: text/markdown
@@ -3,17 +3,18 @@ not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
3
3
  not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
4
4
  not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
5
5
  not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
6
+ not_again_ai/llm/chat_completion.py,sha256=a5jqce5qQzwdDFK8W7XRqGjTdFHAqKcWDmrTDtKls9U,3070
6
7
  not_again_ai/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
8
  not_again_ai/llm/ollama/chat_completion.py,sha256=EKvqpHztsEISH9skYaLgKABEeoDhUlUyrakz_v6zvTw,3682
8
9
  not_again_ai/llm/ollama/ollama_client.py,sha256=dktyw7aKFq4EA3dU7Le5UpfsSq3Oh_POmYSrAI4qLi8,765
9
10
  not_again_ai/llm/ollama/service.py,sha256=XczbxISTAp4KHnIkqRZaMsfBohH-TAHrjZ8T9x3cRAY,2900
10
- not_again_ai/llm/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- not_again_ai/llm/openai/chat_completion.py,sha256=UJljrAV1lS2UvA5Sjt6N7S-9DaAgKRBpswLNDDP9RBI,8623
12
- not_again_ai/llm/openai/context_management.py,sha256=Aj3vIsKgTlfja101ybYg3Ps24LF2BoJ23KjeqkyEEU0,3431
13
- not_again_ai/llm/openai/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
14
- not_again_ai/llm/openai/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
15
- not_again_ai/llm/openai/prompts.py,sha256=7cDfvIKCTYM0t5lK34FLLqYf-SR_cynDXIXw3zWDizA,7094
16
- not_again_ai/llm/openai/tokens.py,sha256=2WGHzZJ0mIFAGpkuS_otjFzwhqjaSMgLoP2FVMnJTiE,4301
11
+ not_again_ai/llm/openai_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ not_again_ai/llm/openai_api/chat_completion.py,sha256=UJljrAV1lS2UvA5Sjt6N7S-9DaAgKRBpswLNDDP9RBI,8623
13
+ not_again_ai/llm/openai_api/context_management.py,sha256=SKksCHs-C-UXtSksvXSvWWR4eeUyuyVc6XIhGgqJUF4,3435
14
+ not_again_ai/llm/openai_api/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
15
+ not_again_ai/llm/openai_api/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
16
+ not_again_ai/llm/openai_api/prompts.py,sha256=7cDfvIKCTYM0t5lK34FLLqYf-SR_cynDXIXw3zWDizA,7094
17
+ not_again_ai/llm/openai_api/tokens.py,sha256=pshGOSYAKvDAe2vnkib_vwENT5on8xQznC8ErLvciK4,4453
17
18
  not_again_ai/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
18
19
  not_again_ai/statistics/__init__.py,sha256=gA8r9JQFbFSN0ykrHy4G1IQgcky4f2eM5Oo24oVI5Ik,466
19
20
  not_again_ai/statistics/dependence.py,sha256=yZDk_e3ng96mp4hu8dDtQ0-uIn6KdSuGRS9uyM0O3x0,4429
@@ -23,8 +24,8 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
23
24
  not_again_ai/viz/scatterplot.py,sha256=eBtIf0Tf_1EcN-akRNJgvwLU0zpRx1zOl0VF9QTnbZA,2290
24
25
  not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
25
26
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
26
- not_again_ai-0.7.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
27
- not_again_ai-0.7.0.dist-info/METADATA,sha256=58-RwfLn8dNRqMTK2Qp8gcPSAYAes3OFVBFbSLobvpo,14203
28
- not_again_ai-0.7.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
29
- not_again_ai-0.7.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
30
- not_again_ai-0.7.0.dist-info/RECORD,,
27
+ not_again_ai-0.8.1.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
28
+ not_again_ai-0.8.1.dist-info/METADATA,sha256=XjBBZSVDW8tdwDy8hJY6R-xXo9Mec2i6N-5YFs0eRW8,14203
29
+ not_again_ai-0.8.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
30
+ not_again_ai-0.8.1.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
31
+ not_again_ai-0.8.1.dist-info/RECORD,,
File without changes
File without changes
File without changes