chatlas 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

chatlas/_groq.py ADDED
@@ -0,0 +1,143 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import TYPE_CHECKING, Optional
5
+
6
+ from ._chat import Chat
7
+ from ._logging import log_model_default
8
+ from ._openai import ChatOpenAI
9
+ from ._turn import Turn
10
+ from ._utils import MISSING, MISSING_TYPE
11
+
12
+ if TYPE_CHECKING:
13
+ from ._openai import ChatCompletion
14
+ from .types.openai import ChatClientArgs, SubmitInputArgs
15
+
16
+
17
+ def ChatGroq(
18
+ *,
19
+ system_prompt: Optional[str] = None,
20
+ turns: Optional[list[Turn]] = None,
21
+ model: Optional[str] = None,
22
+ api_key: Optional[str] = None,
23
+ base_url: str = "https://api.groq.com/openai/v1",
24
+ seed: Optional[int] | MISSING_TYPE = MISSING,
25
+ kwargs: Optional["ChatClientArgs"] = None,
26
+ ) -> Chat["SubmitInputArgs", ChatCompletion]:
27
+ """
28
+ Chat with a model hosted on Groq.
29
+
30
+ Groq provides a platform for highly efficient AI inference.
31
+
32
+ Prerequisites
33
+ -------------
34
+
35
+ ::: {.callout-note}
36
+ ## API key
37
+
38
+ Sign up at <https://groq.com> to get an API key.
39
+ :::
40
+
41
+ ::: {.callout-note}
42
+ ## Python requirements
43
+
44
+ `ChatGroq` requires the `openai` package (e.g., `pip install openai`).
45
+ :::
46
+
47
+ Examples
48
+ --------
49
+
50
+ ```python
51
+ import os
52
+ from chatlas import ChatGroq
53
+
54
+ chat = ChatGroq(api_key=os.getenv("GROQ_API_KEY"))
55
+ chat.chat("What is the capital of France?")
56
+ ```
57
+
58
+ Parameters
59
+ ----------
60
+ system_prompt
61
+ A system prompt to set the behavior of the assistant.
62
+ turns
63
+ A list of turns to start the chat with (i.e., continuing a previous
64
+ conversation). If not provided, the conversation begins from scratch.
65
+ Do not provide non-`None` values for both `turns` and `system_prompt`.
66
+ Each message in the list should be a dictionary with at least `role`
67
+ (usually `system`, `user`, or `assistant`, but `tool` is also possible).
68
+ Normally there is also a `content` field, which is a string.
69
+ model
70
+ The model to use for the chat. The default, None, will pick a reasonable
71
+ default, and warn you about it. We strongly recommend explicitly choosing
72
+ a model for all but the most casual use.
73
+ api_key
74
+ The API key to use for authentication. You generally should not supply
75
+ this directly, but instead set the `GROQ_API_KEY` environment variable.
76
+ base_url
77
+ The base URL to the endpoint; the default uses Groq's API.
78
+ seed
79
+ Optional integer seed that ChatGPT uses to try and make output more
80
+ reproducible.
81
+ kwargs
82
+ Additional arguments to pass to the `openai.OpenAI()` client constructor.
83
+
84
+ Returns
85
+ -------
86
+ Chat
87
+ A chat object that retains the state of the conversation.
88
+
89
+ Note
90
+ ----
91
+ This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
92
+ the defaults tweaked for groq.
93
+
94
+ Note
95
+ ----
96
+ Pasting an API key into a chat constructor (e.g., `ChatGroq(api_key="...")`)
97
+ is the simplest way to get started, and is fine for interactive use, but is
98
+ problematic for code that may be shared with others.
99
+
100
+ Instead, consider using environment variables or a configuration file to manage
101
+ your credentials. One popular way to manage credentials is to use a `.env` file
102
+ to store your credentials, and then use the `python-dotenv` package to load them
103
+ into your environment.
104
+
105
+ ```shell
106
+ pip install python-dotenv
107
+ ```
108
+
109
+ ```shell
110
+ # .env
111
+ GROQ_API_KEY=...
112
+ ```
113
+
114
+ ```python
115
+ from chatlas import ChatGroq
116
+ from dotenv import load_dotenv
117
+
118
+ load_dotenv()
119
+ chat = ChatGroq()
120
+ chat.console()
121
+ ```
122
+
123
+ Another, more general, solution is to load your environment variables into the shell
124
+ before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
125
+
126
+ ```shell
127
+ export GROQ_API_KEY=...
128
+ ```
129
+ """
130
+ if model is None:
131
+ model = log_model_default("llama3-8b-8192")
132
+ if api_key is None:
133
+ api_key = os.getenv("GROQ_API_KEY")
134
+
135
+ return ChatOpenAI(
136
+ system_prompt=system_prompt,
137
+ turns=turns,
138
+ model=model,
139
+ api_key=api_key,
140
+ base_url=base_url,
141
+ seed=seed,
142
+ kwargs=kwargs,
143
+ )
@@ -0,0 +1,133 @@
1
+ import inspect
2
+ from pathlib import Path
3
+ from typing import Any, Optional, Union
4
+
5
+ from jinja2 import Environment
6
+
7
+ __all__ = (
8
+ "interpolate",
9
+ "interpolate_file",
10
+ )
11
+
12
+
13
+ def interpolate(
14
+ prompt: str,
15
+ *,
16
+ variables: Optional[dict[str, Any]] = None,
17
+ variable_start: str = "{{",
18
+ variable_end: str = "}}",
19
+ ) -> str:
20
+ """
21
+ Interpolate variables into a prompt
22
+
23
+ This is a light-weight wrapper around the Jinja2 templating engine, making
24
+ it easier to interpolate dynamic data into a prompt template. Compared to
25
+ f-strings, which expects you to wrap dynamic values in `{ }`, this function
26
+ expects `{{ }}` instead, making it easier to include Python code and JSON in
27
+ your prompt.
28
+
29
+ Parameters
30
+ ----------
31
+ prompt
32
+ The prompt to interpolate (as a string).
33
+ variables
34
+ A dictionary of variables to interpolate into the prompt. If not
35
+ provided, the caller's global and local variables are used.
36
+ variable_start
37
+ The string that marks the beginning of a variable.
38
+ variable_end
39
+ The string that marks the end of a variable.
40
+
41
+ Returns
42
+ -------
43
+ str
44
+ The prompt with variables interpolated.
45
+
46
+ Examples
47
+ --------
48
+
49
+ ```python
50
+ from chatlas import interpolate
51
+
52
+ x = 1
53
+ interpolate("The value of `x` is: {{ x }}")
54
+ ```
55
+ """
56
+ if variables is None:
57
+ frame = inspect.currentframe()
58
+ variables = _infer_variables(frame)
59
+ del frame
60
+
61
+ env = Environment(
62
+ variable_start_string=variable_start,
63
+ variable_end_string=variable_end,
64
+ )
65
+
66
+ template = env.from_string(prompt)
67
+ return template.render(variables)
68
+
69
+
70
+ def interpolate_file(
71
+ path: Union[str, Path],
72
+ *,
73
+ variables: Optional[dict[str, Any]] = None,
74
+ variable_start: str = "{{",
75
+ variable_end: str = "}}",
76
+ ) -> str:
77
+ """
78
+ Interpolate variables into a prompt from a file
79
+
80
+ This is a light-weight wrapper around the Jinja2 templating engine, making
81
+ it easier to interpolate dynamic data into a static prompt. Compared to
82
+ f-strings, which expects you to wrap dynamic values in `{ }`, this function
83
+ expects `{{ }}` instead, making it easier to include Python code and JSON in
84
+ your prompt.
85
+
86
+ Parameters
87
+ ----------
88
+ path
89
+ The path to the file containing the prompt to interpolate.
90
+ variables
91
+ A dictionary of variables to interpolate into the prompt. If not
92
+ provided, the caller's global and local variables are used.
93
+ variable_start
94
+ The string that marks the beginning of a variable.
95
+ variable_end
96
+ The string that marks the end of a variable.
97
+
98
+ Returns
99
+ -------
100
+ str
101
+ The prompt with variables interpolated.
102
+
103
+ See Also
104
+ --------
105
+ interpolate
106
+ Interpolating data into a system prompt
107
+ """
108
+ if variables is None:
109
+ frame = inspect.currentframe()
110
+ variables = _infer_variables(frame)
111
+ del frame
112
+
113
+ with open(path, "r") as file:
114
+ return interpolate(
115
+ file.read(),
116
+ variables=variables,
117
+ variable_start=variable_start,
118
+ variable_end=variable_end,
119
+ )
120
+
121
+
122
+ def _infer_variables(frame) -> dict[str, Any]:
123
+ if not inspect.isframe(frame) or frame.f_back is None:
124
+ raise RuntimeError(
125
+ "`interpolate()` was unable to infer the caller's global and local "
126
+ "variables (because the caller's frame is not available). Consider "
127
+ "passing `variables` explicitly to `interpolate()`."
128
+ )
129
+
130
+ return {
131
+ **frame.f_back.f_globals,
132
+ **frame.f_back.f_locals,
133
+ }
chatlas/_logging.py ADDED
@@ -0,0 +1,61 @@
1
+ import logging
2
+ import os
3
+ import warnings
4
+
5
+ from rich.logging import RichHandler
6
+
7
+
8
+ def _rich_handler() -> RichHandler:
9
+ formatter = logging.Formatter("%(name)s - %(message)s")
10
+ handler = RichHandler()
11
+ handler.setFormatter(formatter)
12
+ return handler
13
+
14
+
15
+ logger = logging.getLogger("chatlas")
16
+
17
+ if os.environ.get("CHATLAS_LOG") == "info":
18
+ # By adding a RichHandler to chatlas' logger, we can guarantee that they
19
+ # never get dropped, even if the root logger's handlers are not
20
+ # RichHandlers.
21
+ logger.setLevel(logging.INFO)
22
+ logger.addHandler(_rich_handler())
23
+ logger.propagate = False
24
+
25
+ # Add a RichHandler to the root logger if there are no handlers. Note that
26
+ # if chatlas is imported before other libraries that set up logging, (like
27
+ # openai, anthropic, or httpx), this will ensure that logs from those
28
+ # libraries are also displayed in the rich console.
29
+ root = logging.getLogger()
30
+ if not root.handlers:
31
+ root.addHandler(_rich_handler())
32
+
33
+ # Warn if there are non-RichHandler handlers on the root logger.
34
+ # TODO: we could consider something a bit more abusive here, like removing
35
+ # non-RichHandler handlers from the root logger, but that could be
36
+ # surprising to users.
37
+ bad_handlers = [
38
+ h.get_name() for h in root.handlers if not isinstance(h, RichHandler)
39
+ ]
40
+ if len(bad_handlers) > 0:
41
+ warnings.warn(
42
+ "When setting up logging handlers for CHATLAS_LOG, chatlas detected "
43
+ f"non-rich handler(s) on the root logger named {bad_handlers}. "
44
+ "As a result, logs handled those handlers may be dropped when the "
45
+ "`echo` argument of `.chat()`, `.stream()`, etc., is something "
46
+ "other than 'none'. This problem can likely be fixed by importing "
47
+ "`chatlas` before other libraries that set up logging, or adding a "
48
+ "RichHandler to the root logger before loading other libraries.",
49
+ )
50
+
51
+
52
+ def log_model_default(model: str) -> str:
53
+ logger.info(f"Defaulting to `model = '{model}'`.")
54
+ return model
55
+
56
+
57
+ def log_tool_error(name: str, arguments: str, e: Exception):
58
+ logger.info(
59
+ f"Error invoking tool function '{name}' with arguments: {arguments}. "
60
+ f"The error message is: '{e}'",
61
+ )
chatlas/_merge.py ADDED
@@ -0,0 +1,103 @@
1
+ # Adapted from https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/utils/_merge.py
2
+ # Also tweaked to more closely match https://github.com/hadley/elmer/blob/main/R/utils-merge.R
3
+
4
+ from __future__ import annotations
5
+
6
+ from typing import Any, Optional
7
+
8
+
9
+ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]:
10
+ """Merge many dicts, handling specific scenarios where a key exists in both
11
+ dictionaries but has a value of None in 'left'. In such cases, the method uses the
12
+ value from 'right' for that key in the merged dictionary.
13
+
14
+ Args:
15
+ left: The first dictionary to merge.
16
+ others: The other dictionaries to merge.
17
+
18
+ Returns:
19
+ The merged dictionary.
20
+
21
+ Raises:
22
+ TypeError: If the key exists in both dictionaries but has a different type.
23
+ TypeError: If the value has an unsupported type.
24
+
25
+ Example:
26
+ If left = {"function_call": {"arguments": None}} and
27
+ right = {"function_call": {"arguments": "{\n"}}
28
+ then, after merging, for the key "function_call",
29
+ the value from 'right' is used,
30
+ resulting in merged = {"function_call": {"arguments": "{\n"}}.
31
+ """
32
+ merged = left.copy()
33
+ for right in others:
34
+ for right_k, right_v in right.items():
35
+ left_v = merged.get(right_k, None)
36
+
37
+ if right_v is None:
38
+ if right_k not in merged:
39
+ merged[right_k] = None
40
+ elif left_v is None:
41
+ merged[right_k] = right_v
42
+ elif left_v == right_v:
43
+ continue
44
+ elif isinstance(left_v, str):
45
+ merged[right_k] += right_v
46
+ elif isinstance(left_v, (int, float)):
47
+ merged[right_k] = right_v
48
+ elif isinstance(merged[right_k], dict):
49
+ merged[right_k] = merge_dicts(merged[right_k], right_v)
50
+ elif isinstance(merged[right_k], list):
51
+ merged[right_k] = merge_lists(merged[right_k], right_v)
52
+ elif type(merged[right_k]) is not type(right_v):
53
+ raise TypeError(
54
+ f'additional_kwargs["{right_k}"] already exists in this message,'
55
+ " but with a different type."
56
+ )
57
+ else:
58
+ raise TypeError(
59
+ f"Additional kwargs key {right_k} already exists in left dict and "
60
+ f"value has unsupported type {type(merged[right_k])}."
61
+ )
62
+ return merged
63
+
64
+
65
+ def merge_lists(
66
+ left: Optional[list[Any]], *others: Optional[list[Any]]
67
+ ) -> Optional[list[Any]]:
68
+ """Add many lists, handling None.
69
+
70
+ Args:
71
+ left: The first list to merge.
72
+ others: The other lists to merge.
73
+
74
+ Returns:
75
+ The merged list.
76
+ """
77
+ merged = left.copy() if left is not None else None
78
+ for other in others:
79
+ if other is None:
80
+ continue
81
+ elif merged is None:
82
+ merged = other.copy()
83
+ else:
84
+ for e in other:
85
+ if isinstance(e, dict) and "index" in e and isinstance(e["index"], int):
86
+ to_merge = [
87
+ i
88
+ for i, e_left in enumerate(merged)
89
+ if e_left["index"] == e["index"]
90
+ ]
91
+ if to_merge:
92
+ # TODO: Remove this once merge_dict is updated with special
93
+ # handling for 'type'.
94
+ if "type" in e:
95
+ e: dict[str, Any] = { # noqa: PLW2901
96
+ k: v for k, v in e.items() if k != "type"
97
+ }
98
+ merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)
99
+ else:
100
+ merged.append(e)
101
+ else:
102
+ merged.append(e)
103
+ return merged
chatlas/_ollama.py ADDED
@@ -0,0 +1,125 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import re
5
+ import urllib.request
6
+ from typing import TYPE_CHECKING, Optional
7
+
8
+ from ._chat import Chat
9
+ from ._openai import ChatOpenAI
10
+ from ._turn import Turn
11
+
12
+ if TYPE_CHECKING:
13
+ from ._openai import ChatCompletion
14
+ from .types.openai import ChatClientArgs, SubmitInputArgs
15
+
16
+
17
+ def ChatOllama(
18
+ model: Optional[str] = None,
19
+ *,
20
+ system_prompt: Optional[str] = None,
21
+ turns: Optional[list[Turn]] = None,
22
+ base_url: str = "http://localhost:11434",
23
+ seed: Optional[int] = None,
24
+ kwargs: Optional["ChatClientArgs"] = None,
25
+ ) -> Chat["SubmitInputArgs", ChatCompletion]:
26
+ """
27
+ Chat with a local Ollama model.
28
+
29
+ [Ollama](https://ollama.com) makes it easy to run a wide-variety of
30
+ open-source models locally, making it a great choice for privacy
31
+ and security.
32
+
33
+
34
+ Prerequisites
35
+ -------------
36
+
37
+ ::: {.callout-note}
38
+ ## Ollama runtime
39
+
40
+ `ChatOllama` requires the [ollama](https://ollama.com/download) executable
41
+ to be installed and running on your machine.
42
+ :::
43
+
44
+ ::: {.callout-note}
45
+ ## Pull model(s)
46
+
47
+ Once ollama is running locally, download a model from the command line
48
+ (e.g. `ollama pull llama3.2`).
49
+ :::
50
+
51
+ Examples
52
+ --------
53
+
54
+ ```python
55
+ from chatlas import ChatOllama
56
+
57
+ chat = ChatOllama(model="llama3.2")
58
+ chat.chat("What is the capital of France?")
59
+ ```
60
+
61
+ Parameters
62
+ ----------
63
+ model
64
+ The model to use for the chat. If `None`, a list of locally installed
65
+ models will be printed.
66
+ system_prompt
67
+ A system prompt to set the behavior of the assistant.
68
+ turns
69
+ A list of turns to start the chat with (i.e., continuing a previous
70
+ conversation). If not provided, the conversation begins from scratch. Do
71
+ not provide non-`None` values for both `turns` and `system_prompt`. Each
72
+ message in the list should be a dictionary with at least `role` (usually
73
+ `system`, `user`, or `assistant`, but `tool` is also possible). Normally
74
+ there is also a `content` field, which is a string.
75
+ base_url
76
+ The base URL to the endpoint; the default uses ollama's API.
77
+ seed
78
+ Optional integer seed that helps to make output more reproducible.
79
+ kwargs
80
+ Additional arguments to pass to the `openai.OpenAI()` client constructor.
81
+
82
+ Note
83
+ ----
84
+ This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
85
+ the defaults tweaked for ollama.
86
+
87
+ Limitations
88
+ -----------
89
+ `ChatOllama` currently doesn't work with streaming tools, and tool calling more
90
+ generally doesn't seem to work very well with currently available models.
91
+ """
92
+
93
+ base_url = re.sub("/+$", "", base_url)
94
+
95
+ if not has_ollama(base_url):
96
+ raise RuntimeError("Can't find locally running ollama.")
97
+
98
+ if model is None:
99
+ models = ollama_models(base_url)
100
+ raise ValueError(
101
+ f"Must specify model. Locally installed models: {', '.join(models)}"
102
+ )
103
+
104
+ return ChatOpenAI(
105
+ system_prompt=system_prompt,
106
+ turns=turns,
107
+ base_url=f"{base_url}/v1",
108
+ model=model,
109
+ seed=seed,
110
+ kwargs=kwargs,
111
+ )
112
+
113
+
114
+ def ollama_models(base_url: str) -> list[str]:
115
+ res = urllib.request.urlopen(url=f"{base_url}/api/tags")
116
+ data = json.loads(res.read())
117
+ return [re.sub(":latest$", "", x["name"]) for x in data["models"]]
118
+
119
+
120
+ def has_ollama(base_url):
121
+ try:
122
+ urllib.request.urlopen(url=f"{base_url}/api/tags")
123
+ return True
124
+ except Exception:
125
+ return False