not-again-ai 0.11.0__py3-none-any.whl → 0.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,20 @@
1
+ import os
2
+
3
+ from azure.ai.inference import ChatCompletionsClient
4
+ from azure.core.credentials import AzureKeyCredential
5
+
6
+
7
+ def azure_ai_client(
8
+ token: str | None = None,
9
+ endpoint: str = "https://models.inference.ai.azure.com",
10
+ ) -> ChatCompletionsClient:
11
+ if not token:
12
+ token = os.getenv("GITHUB_TOKEN")
13
+ if not token:
14
+ raise ValueError("Token must be provided or GITHUB_TOKEN environment variable must be set")
15
+
16
+ client = ChatCompletionsClient(
17
+ endpoint=endpoint,
18
+ credential=AzureKeyCredential(token),
19
+ )
20
+ return client
@@ -0,0 +1,81 @@
1
+ import contextlib
2
+ import json
3
+ import time
4
+ from typing import Any
5
+
6
+ from azure.ai.inference import ChatCompletionsClient
7
+ from azure.ai.inference.models import ChatCompletionsToolDefinition, ChatRequestMessage
8
+
9
+
10
+ def chat_completion(
11
+ messages: list[ChatRequestMessage],
12
+ model: str,
13
+ client: ChatCompletionsClient,
14
+ tools: list[ChatCompletionsToolDefinition] | None = None,
15
+ max_tokens: int | None = None,
16
+ temperature: float | None = None,
17
+ json_mode: bool = False,
18
+ seed: int | None = None,
19
+ ) -> dict[str, Any]:
20
+ """Gets a response from GitHub Models using the Azure AI Inference SDK.
21
+ See the available models at https://github.com/marketplace/models
22
+ Full documentation of the SDK is at: https://learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-chat-completions
23
+ And samples at: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples
24
+
25
+ Returns:
26
+ dict[str, Any]: A dictionary with the following keys
27
+ message (str | dict): The content of the generated assistant message.
28
+ If json_mode is True, this will be a dictionary.
29
+ tool_names (list[str], optional): The names of the tools called by the model.
30
+ If the model does not support tools, a ResponseError is raised.
31
+ tool_args_list (list[dict], optional): The arguments of the tools called by the model.
32
+ prompt_tokens (int): The number of tokens in the messages sent to the model.
33
+ completion_tokens (int): The number of tokens used by the model to generate the completion.
34
+ response_duration (float): The time, in seconds, taken to generate the response by using the model.
35
+ system_fingerprint (str, optional): If seed is set, a unique identifier for the model used to generate the response.
36
+ """
37
+ response_format = {"type": "json_object"} if json_mode else None
38
+ start_time = time.time()
39
+ response = client.complete( # type: ignore
40
+ messages=messages,
41
+ model=model,
42
+ response_format=response_format, # type: ignore
43
+ max_tokens=max_tokens,
44
+ temperature=temperature,
45
+ tools=tools,
46
+ seed=seed,
47
+ )
48
+ end_time = time.time()
49
+ response_duration = end_time - start_time
50
+
51
+ response_data = {}
52
+ finish_reason = response.choices[0].finish_reason
53
+ response_data["finish_reason"] = finish_reason.value # type: ignore
54
+
55
+ message = response.choices[0].message.content
56
+ if message and json_mode:
57
+ with contextlib.suppress(json.JSONDecodeError):
58
+ message = json.loads(message)
59
+ response_data["message"] = message
60
+
61
+ # Check for tool calls because even if the finish_reason is stop, the model may have called a tool
62
+ tool_calls = response.choices[0].message.tool_calls
63
+ if tool_calls:
64
+ tool_names = []
65
+ tool_args_list = []
66
+ for tool_call in tool_calls:
67
+ tool_names.append(tool_call.function.name) # type: ignore
68
+ tool_args_list.append(json.loads(tool_call.function.arguments)) # type: ignore
69
+ response_data["tool_names"] = tool_names
70
+ response_data["tool_args_list"] = tool_args_list
71
+
72
+ if seed is not None and hasattr(response, "system_fingerprint"):
73
+ response_data["system_fingerprint"] = response.system_fingerprint
74
+
75
+ usage = response.usage
76
+ if usage is not None:
77
+ response_data["completion_tokens"] = usage.completion_tokens
78
+ response_data["prompt_tokens"] = usage.prompt_tokens
79
+ response_data["response_duration"] = round(response_duration, 4)
80
+
81
+ return response_data
@@ -4,6 +4,7 @@ import time
4
4
  from typing import Any
5
5
 
6
6
  from openai import OpenAI
7
+ from pydantic import BaseModel
7
8
 
8
9
 
9
10
  def chat_completion(
@@ -15,6 +16,7 @@ def chat_completion(
15
16
  max_tokens: int | None = None,
16
17
  temperature: float = 0.7,
17
18
  json_mode: bool = False,
19
+ json_schema: dict[str, Any] | None = None,
18
20
  seed: int | None = None,
19
21
  logprobs: tuple[bool, int | None] | None = None,
20
22
  n: int = 1,
@@ -44,6 +46,9 @@ def chat_completion(
44
46
  json_mode (bool, optional): When JSON mode is enabled, the model is constrained to only
45
47
  generate strings that parse into valid JSON object and will return a dictionary.
46
48
  See https://platform.openai.com/docs/guides/text-generation/json-mode
49
+ json_schema (dict, optional): Enables Structured Outputs which ensures the model will
50
+ always generate responses that adhere to your supplied JSON Schema.
51
+ See https://platform.openai.com/docs/guides/structured-outputs/structured-outputs
47
52
  seed (int, optional): If specified, OpenAI will make a best effort to sample deterministically,
48
53
  such that repeated requests with the same `seed` and parameters should return the same result.
49
54
  Determinism is not guaranteed, and you should refer to the `system_fingerprint` response
@@ -74,7 +79,19 @@ def chat_completion(
74
79
  system_fingerprint (str, optional): If seed is set, a unique identifier for the model used to generate the response.
75
80
  response_duration (float): The time, in seconds, taken to generate the response from the API.
76
81
  """
77
- response_format = {"type": "json_object"} if json_mode else None
82
+
83
+ if json_mode and json_schema is not None:
84
+ raise ValueError("json_schema and json_mode cannot be used together.")
85
+
86
+ if json_mode:
87
+ response_format: dict[str, Any] = {"type": "json_object"}
88
+ elif json_schema is not None:
89
+ if isinstance(json_schema, dict):
90
+ response_format = {"type": "json_schema", "json_schema": json_schema}
91
+ elif issubclass(json_schema, BaseModel):
92
+ response_format = json_schema
93
+ else:
94
+ response_format = {"type": "text"}
78
95
 
79
96
  kwargs.update(
80
97
  {
@@ -126,7 +143,7 @@ def chat_completion(
126
143
  response_data_curr["tool_args_list"] = tool_args_list
127
144
  elif finish_reason == "stop" or finish_reason == "length":
128
145
  message = response_choice.message.content
129
- if json_mode:
146
+ if json_mode or json_schema is not None:
130
147
  with contextlib.suppress(json.JSONDecodeError):
131
148
  message = json.loads(message)
132
149
  response_data_curr["message"] = message
@@ -1,4 +1,7 @@
1
- from openai import OpenAI
1
+ from typing import Literal
2
+
3
+ from azure.identity import DefaultAzureCredential, get_bearer_token_provider
4
+ from openai import AzureOpenAI, OpenAI
2
5
 
3
6
 
4
7
  class InvalidOAIAPITypeError(Exception):
@@ -8,13 +11,18 @@ class InvalidOAIAPITypeError(Exception):
8
11
 
9
12
 
10
13
  def openai_client(
11
- api_type: str = "openai",
14
+ api_type: Literal["openai", "azure_openai"] = "openai",
12
15
  api_key: str | None = None,
13
16
  organization: str | None = None,
17
+ aoai_api_version: str = "2024-06-01",
18
+ azure_endpoint: str | None = None,
14
19
  timeout: float | None = None,
15
20
  max_retries: int | None = None,
16
- ) -> OpenAI:
17
- """Create an OpenAI client instance based on the specified API type and other provided parameters.
21
+ ) -> OpenAI | AzureOpenAI:
22
+ """Create an OpenAI or Azure OpenAI client instance based on the specified API type and other provided parameters.
23
+
24
+ Azure OpenAI requires RBAC authentication. You must be signed in with the Azure CLI and have correct role assigned.
25
+ See https://techcommunity.microsoft.com/t5/microsoft-developer-community/using-keyless-authentication-with-azure-openai/ba-p/4111521
18
26
 
19
27
  Args:
20
28
  api_type (str, optional): Type of the API to be used. Accepted values are 'openai' or 'azure_openai'.
@@ -23,6 +31,9 @@ def openai_client(
23
31
  OpenAI automatically uses `OPENAI_API_KEY` from the environment.
24
32
  organization (str, optional): The ID of the organization. If not provided,
25
33
  OpenAI automotically uses `OPENAI_ORG_ID` from the environment.
34
+ aoai_api_version (str, optional): Only applicable if using Azure OpenAI https://learn.microsoft.com/azure/ai-services/openai/reference#rest-api-versioning
35
+ azure_endpoint (str, optional): The endpoint to use for Azure OpenAI.
36
+ If not provided, will be read from the `AZURE_OPENAI_ENDPOINT` environment variable.
26
37
  timeout (float, optional): By default requests time out after 10 minutes.
27
38
  max_retries (int, optional): Certain errors are automatically retried 2 times by default,
28
39
  with a short exponential backoff. Connection errors (for example, due to a network connectivity problem),
@@ -52,6 +63,16 @@ def openai_client(
52
63
  filtered_args = {k: v for k, v in args.items() if v is not None}
53
64
  return OpenAI(**filtered_args) # type: ignore
54
65
  elif api_type == "azure_openai":
55
- raise NotImplementedError("AzureOpenAI is not yet supported by not-again-ai.")
66
+ azure_credential = DefaultAzureCredential()
67
+ ad_token_provider = get_bearer_token_provider(azure_credential, "https://cognitiveservices.azure.com/.default")
68
+ args = {
69
+ "api_version": aoai_api_version,
70
+ "azure_endpoint": azure_endpoint,
71
+ "azure_ad_token_provider": ad_token_provider, # type: ignore
72
+ "timeout": timeout,
73
+ "max_retries": max_retries,
74
+ }
75
+ filtered_args = {k: v for k, v in args.items() if v is not None}
76
+ return AzureOpenAI(**filtered_args) # type: ignore
56
77
  else:
57
- raise NotImplementedError("This should never happen.")
78
+ raise NotImplementedError(f"API type '{api_type}' is invalid.")
@@ -87,7 +87,7 @@ def chat_prompt(messages_unformatted: list[dict[str, Any]], variables: dict[str,
87
87
  A list which represents messages in the format that OpenAI expects for its chat completions API.
88
88
  See here for details: https://platform.openai.com/docs/api-reference/chat/create
89
89
 
90
- Examples:
90
+ Example:
91
91
  >>> # Assume cat_image and dog_image are Path objects to image files
92
92
  >>> messages = [
93
93
  ... {"role": "system", "content": "You are a helpful assistant."},
@@ -86,6 +86,7 @@ def num_tokens_from_messages(
86
86
  "gpt-4-turbo-2024-04-09",
87
87
  "gpt-4o",
88
88
  "gpt-4o-2024-05-13",
89
+ "gpt-4o-2024-08-06",
89
90
  "gpt-4o-mini",
90
91
  "gpt-4o-mini-2024-07-18",
91
92
  }:
@@ -1,8 +1,10 @@
1
1
  from typing import Any
2
2
 
3
+ from azure.ai.inference import ChatCompletionsClient
3
4
  from ollama import Client
4
- from openai import OpenAI
5
+ from openai import AzureOpenAI, OpenAI
5
6
 
7
+ from not_again_ai.llm.gh_models import chat_completion as chat_completion_gh_models
6
8
  from not_again_ai.llm.openai_api import chat_completion as chat_completion_openai
7
9
  from not_again_ai.local_llm.ollama import chat_completion as chat_completion_ollama
8
10
 
@@ -10,7 +12,7 @@ from not_again_ai.local_llm.ollama import chat_completion as chat_completion_oll
10
12
  def chat_completion(
11
13
  messages: list[dict[str, Any]],
12
14
  model: str,
13
- client: OpenAI | Client,
15
+ client: OpenAI | AzureOpenAI | Client | ChatCompletionsClient,
14
16
  tools: list[dict[str, Any]] | None = None,
15
17
  max_tokens: int | None = None,
16
18
  temperature: float = 0.7,
@@ -25,7 +27,7 @@ def chat_completion(
25
27
  Args:
26
28
  messages (list[dict[str, Any]]): A list of messages to send to the model.
27
29
  model (str): The model name to use.
28
- client (OpenAI | Client): The client object to use for chat completion.
30
+ client (OpenAI | AzureOpenAI | Client | ChatCompletionsClient): The client object to use for chat completion.
29
31
  tools (list[dict[str, Any]], optional):A list of tools the model may call.
30
32
  Use this to provide a list of functions the model may generate JSON inputs for. Defaults to None.
31
33
  max_tokens (int, optional): The maximum number of tokens to generate.
@@ -46,7 +48,7 @@ def chat_completion(
46
48
  extras (dict): This will contain any additional fields returned by corresponding provider.
47
49
  """
48
50
  # Determine which chat_completion function to call based on the client type
49
- if isinstance(client, OpenAI):
51
+ if isinstance(client, OpenAI | AzureOpenAI):
50
52
  response = chat_completion_openai.chat_completion(
51
53
  messages=messages,
52
54
  model=model,
@@ -70,6 +72,18 @@ def chat_completion(
70
72
  seed=seed,
71
73
  **kwargs,
72
74
  )
75
+ elif isinstance(client, ChatCompletionsClient):
76
+ response = chat_completion_gh_models.chat_completion(
77
+ messages=messages, # type: ignore
78
+ model=model,
79
+ client=client,
80
+ tools=tools, # type: ignore
81
+ max_tokens=max_tokens,
82
+ temperature=temperature,
83
+ json_mode=json_mode,
84
+ seed=seed,
85
+ **kwargs,
86
+ )
73
87
  else:
74
88
  raise ValueError("Invalid client type")
75
89
 
@@ -85,14 +85,12 @@ def chat_completion(
85
85
 
86
86
  response_data: dict[str, Any] = {}
87
87
 
88
- # Handle getting the message returned by the model
89
88
  message = response["message"].get("content", "")
90
89
  if message and json_mode:
91
90
  with contextlib.suppress(json.JSONDecodeError):
92
91
  message = json.loads(message)
93
92
  response_data["message"] = message
94
93
 
95
- # Try getting tool calls
96
94
  if response["message"].get("tool_calls"):
97
95
  tool_calls = response["message"]["tool_calls"]
98
96
  tool_names = [tool_call["function"]["name"] for tool_call in tool_calls]
@@ -104,12 +102,10 @@ def chat_completion(
104
102
  prompt_tokens = num_tokens_from_messages(messages, tokenizer)
105
103
  response_data["prompt_tokens"] = prompt_tokens
106
104
 
107
- # Get the number of tokens generated
108
105
  response_data["completion_tokens"] = response.get("eval_count", None)
109
106
  if response_data["completion_tokens"] is None:
110
107
  response_data["completion_tokens"] = num_tokens_in_string(str(response_data["message"]), tokenizer)
111
108
 
112
- # Get the latency of the response
113
109
  response_data["response_duration"] = round(response_duration, 4)
114
110
 
115
111
  return response_data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: not-again-ai
3
- Version: 0.11.0
3
+ Version: 0.12.1
4
4
  Summary: Designed to once and for all collect all the little things that come up over and over again in AI projects and put them in one place.
5
5
  Home-page: https://github.com/DaveCoDev/not-again-ai
6
6
  License: MIT
@@ -21,18 +21,21 @@ Provides-Extra: llm
21
21
  Provides-Extra: local-llm
22
22
  Provides-Extra: statistics
23
23
  Provides-Extra: viz
24
+ Requires-Dist: azure-ai-inference (==1.0.0b3) ; extra == "llm"
25
+ Requires-Dist: azure-identity (>=1.17,<2.0) ; extra == "llm"
24
26
  Requires-Dist: jinja2 (>=3.1,<4.0) ; extra == "local-llm"
25
27
  Requires-Dist: loguru (==0.7.2)
26
28
  Requires-Dist: numpy (>=1.26,<2.0) ; extra == "statistics" or extra == "viz"
27
29
  Requires-Dist: ollama (>=0.3,<0.4) ; extra == "local-llm"
28
- Requires-Dist: openai (>=1.37,<2.0) ; extra == "llm"
30
+ Requires-Dist: openai (>=1.41,<2.0) ; extra == "llm"
29
31
  Requires-Dist: pandas (>=2.2,<3.0) ; extra == "viz"
32
+ Requires-Dist: pydantic (>=2.8,<3.0) ; extra == "llm"
30
33
  Requires-Dist: python-liquid (>=1.12,<2.0) ; extra == "llm"
31
34
  Requires-Dist: scikit-learn (>=1.5,<2.0) ; extra == "statistics"
32
35
  Requires-Dist: scipy (>=1.14,<2.0) ; extra == "statistics"
33
36
  Requires-Dist: seaborn (>=0.13,<0.14) ; extra == "viz"
34
37
  Requires-Dist: tiktoken (>=0.7,<0.8) ; extra == "llm"
35
- Requires-Dist: transformers (>=4.43,<5.0) ; extra == "local-llm"
38
+ Requires-Dist: transformers (>=4.44,<5.0) ; extra == "local-llm"
36
39
  Project-URL: Documentation, https://github.com/DaveCoDev/not-again-ai
37
40
  Project-URL: Repository, https://github.com/DaveCoDev/not-again-ai
38
41
  Description-Content-Type: text/markdown
@@ -71,23 +74,30 @@ Note that local LLM requires separate installations and will not work out of the
71
74
  The package is split into subpackages, so you can install only the parts you need.
72
75
  * **Base only**: `pip install not_again_ai`
73
76
  * **LLM**: `pip install not_again_ai[llm]`
74
- 1. If you wish to use OpenAI
77
+ 1. OpenAI API
75
78
  1. Go to https://platform.openai.com/settings/profile?tab=api-keys to get your API key.
76
79
  1. (Optional) Set the `OPENAI_API_KEY` and the `OPENAI_ORG_ID` environment variables.
80
+ 1. Azure OpenAI (AOAI)
81
+ 1. Using AOAI requires using Entra ID authentication. See https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity for how to set this up for your AOAI deployment.
82
+ 1. Requires the correct role assigned to your user account and being signed into the Azure CLI.
83
+ 1. (Optional) Set the `AZURE_OPENAI_ENDPOINT` environment variable.
84
+ 1. GitHub Models
85
+ 1. Get a Personal Access Token from https://github.com/settings/tokens and set the `GITHUB_TOKEN` environment variable. The token does not need any permissions.
86
+ 1. Check the [Github Marketplace](https://github.com/marketplace/models) to see which models are available.
77
87
  * **Local LLM**: `pip install not_again_ai[llm,local_llm]`
78
88
  1. Some HuggingFace transformers tokenizers are gated behind access requests. If you wish to use these, you will need to request access from HuggingFace on the model card.
79
89
  1. Then set the `HF_TOKEN` environment variable to your HuggingFace API token which can be found here: https://huggingface.co/settings/tokens
80
- 1. If you wish to use Ollama:
90
+ 2. If you wish to use Ollama:
81
91
  1. Follow the instructions at https://github.com/ollama/ollama to install Ollama for your system.
82
- 1. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
83
- 1. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
92
+ 2. (Optional) [Add Ollama as a startup service (recommended)](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended)
93
+ 3. (Optional) To make the Ollama service accessible on your local network from a Linux server, add the following to the `/etc/systemd/system/ollama.service` file which will make Ollama available at `http://<local_address>:11434`:
84
94
  ```bash
85
95
  [Service]
86
96
  ...
87
97
  Environment="OLLAMA_HOST=0.0.0.0"
88
98
  ```
89
- 1. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
90
- 2. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
99
+ 4. It is recommended to always have the latest version of Ollama. To update Ollama check the [docs](https://github.com/ollama/ollama/blob/main/docs/). The command for Linux is: `curl -fsSL https://ollama.com/install.sh | sh`
100
+ 3. HuggingFace transformers and other requirements are hardware dependent so for providers other than Ollama, this only installs some generic dependencies. Check the [notebooks](notebooks/local_llm/) for more details on what is available and how to install it.
91
101
  * **Statistics**: `pip install not_again_ai[statistics]`
92
102
  * **Visualization**: `pip install not_again_ai[viz]`
93
103
 
@@ -3,20 +3,23 @@ not_again_ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
3
3
  not_again_ai/base/file_system.py,sha256=KNQmacO4Q__CQuq2oPzWrg3rQO48n3evglc9bNiP7KM,949
4
4
  not_again_ai/base/parallel.py,sha256=fcYhKBYBWvob84iKp3O93wvFFdXeidljZsShgBLTNGA,3448
5
5
  not_again_ai/llm/__init__.py,sha256=_wNUL6FDaT369Z8W48FsaC_NkcOZ-ib2MMUvnaLOS-0,451
6
+ not_again_ai/llm/gh_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ not_again_ai/llm/gh_models/azure_ai_client.py,sha256=GkVn9ZwYbsLm3X0A3pGKKHuoqrxc-BZnZ4n9ExelRUQ,580
8
+ not_again_ai/llm/gh_models/chat_completion.py,sha256=t6HfwOh8UKtE7OqJsCaFOjE2CqpnJV3gQPNXZvoSyYo,3631
6
9
  not_again_ai/llm/openai_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- not_again_ai/llm/openai_api/chat_completion.py,sha256=PRFi5Sl1K5GOgfWDYygHlmS-Ks1ZE6ETBzinZsz5GCc,8954
10
+ not_again_ai/llm/openai_api/chat_completion.py,sha256=xlqVAp2YaCXqw5zU_vAPCW7SaJfUe1vdeUqKSjoqDtE,9771
8
11
  not_again_ai/llm/openai_api/context_management.py,sha256=BJSG100_qw9MeTCZGztDV5CBXjVOxU4x7gyoRlLxWnI,3561
9
12
  not_again_ai/llm/openai_api/embeddings.py,sha256=4OBnxZicrY6q4dQhuPqMdAnifyjwrsKMTDj-kVre0yc,2500
10
- not_again_ai/llm/openai_api/openai_client.py,sha256=6pZw2xw9X-ceV22rhApwFJ2tAKCxi-SxkjxBsTBZ2Nw,2470
11
- not_again_ai/llm/openai_api/prompts.py,sha256=7cDfvIKCTYM0t5lK34FLLqYf-SR_cynDXIXw3zWDizA,7094
12
- not_again_ai/llm/openai_api/tokens.py,sha256=31neIrY66ejJQ10VB3EWnkN00wuw9vMpCS8tsw2WtFg,4392
13
+ not_again_ai/llm/openai_api/openai_client.py,sha256=AK9SDBkpP94u5Q73-Q5i5HRPQh_D8cF8Dfl0IgPsJDQ,3816
14
+ not_again_ai/llm/openai_api/prompts.py,sha256=B62xs3WKaTv7SfT_TVC-PqO9oeWWpO0xS4_oxW9MYMQ,7093
15
+ not_again_ai/llm/openai_api/tokens.py,sha256=RYBzl5vqE_MzWM60QbWC_6X9YOQoOgBOeR-68rM34II,4421
13
16
  not_again_ai/local_llm/__init__.py,sha256=BsUn39U3QQaw6yomQHfp_HIPHRIBoMAgjcP3CDADx04,882
14
- not_again_ai/local_llm/chat_completion.py,sha256=buZQGV2sChaSi5cgiAcOd9gi4lAEzFIGGIqV-1qazAc,4174
17
+ not_again_ai/local_llm/chat_completion.py,sha256=PmICXrGZJXIuqY00ULBGi2bKnPG8ticqTXZHSTzZK9o,4828
15
18
  not_again_ai/local_llm/huggingface/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
19
  not_again_ai/local_llm/huggingface/chat_completion.py,sha256=Y6uMbxLG8TaMVi3hJGrMl_G9Y1N_0dld5Kv1iqYnoao,2300
17
20
  not_again_ai/local_llm/huggingface/helpers.py,sha256=YPr8KbQ8Ac_Mn_nBcrFuL3bCl-IuDCdaRvYVCocy8Gk,734
18
21
  not_again_ai/local_llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- not_again_ai/local_llm/ollama/chat_completion.py,sha256=C8uU-yq7FL9OLdflZVjbNkEOofmD2A3Hcsd8k-59iS4,5053
22
+ not_again_ai/local_llm/ollama/chat_completion.py,sha256=WNnR-fe50wSDsoAdSZSKyjoqLCCPb00jIWSIFWo_Bbg,4890
20
23
  not_again_ai/local_llm/ollama/model_mapping.py,sha256=sJqPg97OO68O0k5MFkTjRLIn9gB7gCRAcyUEjxMfizo,891
21
24
  not_again_ai/local_llm/ollama/ollama_client.py,sha256=dktyw7aKFq4EA3dU7Le5UpfsSq3Oh_POmYSrAI4qLi8,765
22
25
  not_again_ai/local_llm/ollama/service.py,sha256=XczbxISTAp4KHnIkqRZaMsfBohH-TAHrjZ8T9x3cRAY,2900
@@ -32,8 +35,8 @@ not_again_ai/viz/distributions.py,sha256=OyWwJaNI6lMRm_iSrhq-CORLNvXfeuLSgDtVo3u
32
35
  not_again_ai/viz/scatterplot.py,sha256=5CUOWeknbBOaZPeX9oPin5sBkRKEwk8qeFH45R-9LlY,2292
33
36
  not_again_ai/viz/time_series.py,sha256=pOGZqXp_2nd6nKo-PUQNCtmMh__69jxQ6bQibTGLwZA,5212
34
37
  not_again_ai/viz/utils.py,sha256=hN7gwxtBt3U6jQni2K8j5m5pCXpaJDoNzGhBBikEU28,238
35
- not_again_ai-0.11.0.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
36
- not_again_ai-0.11.0.dist-info/METADATA,sha256=S7IbgUSScGSxAeKWmyxk4hpy4qS-eLqpmfDfFEZq4Ok,15517
37
- not_again_ai-0.11.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
38
- not_again_ai-0.11.0.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
39
- not_again_ai-0.11.0.dist-info/RECORD,,
38
+ not_again_ai-0.12.1.dist-info/LICENSE,sha256=btjOgNGpp-ux5xOo1Gx1MddxeWtT9sof3s3Nui29QfA,1071
39
+ not_again_ai-0.12.1.dist-info/METADATA,sha256=VydzFufICQyP6paN15KJTudJi6rSpwWn5H_W1v46p6Y,16389
40
+ not_again_ai-0.12.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
41
+ not_again_ai-0.12.1.dist-info/entry_points.txt,sha256=EMJegugnmJUd-jMUA_qIRMIPAasbei8gP6O4-ER0BxQ,61
42
+ not_again_ai-0.12.1.dist-info/RECORD,,