chatlas 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

@@ -0,0 +1,22 @@
1
+ # ---------------------------------------------------------
2
+ # Do not modify this file. It was generated by `scripts/generate_typed_dicts.py`.
3
+ # ---------------------------------------------------------
4
+
5
+
6
+ from typing import Mapping, Optional, TypedDict, Union
7
+
8
+ import httpx
9
+ import openai
10
+
11
+
12
+ class ChatClientArgs(TypedDict, total=False):
13
+ api_key: str | None
14
+ organization: str | None
15
+ project: str | None
16
+ base_url: str | httpx.URL | None
17
+ timeout: Union[float, openai.Timeout, None, openai.NotGiven]
18
+ max_retries: int
19
+ default_headers: Optional[Mapping[str, str]]
20
+ default_query: Optional[Mapping[str, object]]
21
+ http_client: httpx.AsyncClient
22
+ _strict_response_validation: bool
@@ -0,0 +1,25 @@
1
+ # ---------------------------------------------------------
2
+ # Do not modify this file. It was generated by `scripts/generate_typed_dicts.py`.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Mapping, Optional, TypedDict
6
+
7
+ import httpx
8
+ import openai
9
+
10
+
11
+ class ChatAzureClientArgs(TypedDict, total=False):
12
+ azure_endpoint: str | None
13
+ azure_deployment: str | None
14
+ api_version: str | None
15
+ api_key: str | None
16
+ azure_ad_token: str | None
17
+ organization: str | None
18
+ project: str | None
19
+ base_url: str | None
20
+ timeout: float | openai.Timeout | None | openai.NotGiven
21
+ max_retries: int
22
+ default_headers: Optional[Mapping[str, str]]
23
+ default_query: Optional[Mapping[str, object]]
24
+ http_client: httpx.AsyncClient
25
+ _strict_response_validation: bool
@@ -0,0 +1,135 @@
1
+ # ---------------------------------------------------------
2
+ # Do not modify this file. It was generated by `scripts/generate_typed_dicts.py`.
3
+ # ---------------------------------------------------------
4
+
5
+
6
+ from typing import Iterable, Literal, Mapping, Optional, TypedDict, Union
7
+
8
+ import openai
9
+ import openai.types.chat.chat_completion_assistant_message_param
10
+ import openai.types.chat.chat_completion_audio_param
11
+ import openai.types.chat.chat_completion_function_call_option_param
12
+ import openai.types.chat.chat_completion_function_message_param
13
+ import openai.types.chat.chat_completion_named_tool_choice_param
14
+ import openai.types.chat.chat_completion_prediction_content_param
15
+ import openai.types.chat.chat_completion_stream_options_param
16
+ import openai.types.chat.chat_completion_system_message_param
17
+ import openai.types.chat.chat_completion_tool_message_param
18
+ import openai.types.chat.chat_completion_tool_param
19
+ import openai.types.chat.chat_completion_user_message_param
20
+ import openai.types.chat.completion_create_params
21
+ import openai.types.shared_params.response_format_json_object
22
+ import openai.types.shared_params.response_format_json_schema
23
+ import openai.types.shared_params.response_format_text
24
+
25
+
26
+ class SubmitInputArgs(TypedDict, total=False):
27
+ messages: Iterable[
28
+ Union[
29
+ openai.types.chat.chat_completion_system_message_param.ChatCompletionSystemMessageParam,
30
+ openai.types.chat.chat_completion_user_message_param.ChatCompletionUserMessageParam,
31
+ openai.types.chat.chat_completion_assistant_message_param.ChatCompletionAssistantMessageParam,
32
+ openai.types.chat.chat_completion_tool_message_param.ChatCompletionToolMessageParam,
33
+ openai.types.chat.chat_completion_function_message_param.ChatCompletionFunctionMessageParam,
34
+ ]
35
+ ]
36
+ model: Union[
37
+ str,
38
+ Literal[
39
+ "o1-preview",
40
+ "o1-preview-2024-09-12",
41
+ "o1-mini",
42
+ "o1-mini-2024-09-12",
43
+ "gpt-4o",
44
+ "gpt-4o-2024-11-20",
45
+ "gpt-4o-2024-08-06",
46
+ "gpt-4o-2024-05-13",
47
+ "gpt-4o-realtime-preview",
48
+ "gpt-4o-realtime-preview-2024-10-01",
49
+ "gpt-4o-audio-preview",
50
+ "gpt-4o-audio-preview-2024-10-01",
51
+ "chatgpt-4o-latest",
52
+ "gpt-4o-mini",
53
+ "gpt-4o-mini-2024-07-18",
54
+ "gpt-4-turbo",
55
+ "gpt-4-turbo-2024-04-09",
56
+ "gpt-4-0125-preview",
57
+ "gpt-4-turbo-preview",
58
+ "gpt-4-1106-preview",
59
+ "gpt-4-vision-preview",
60
+ "gpt-4",
61
+ "gpt-4-0314",
62
+ "gpt-4-0613",
63
+ "gpt-4-32k",
64
+ "gpt-4-32k-0314",
65
+ "gpt-4-32k-0613",
66
+ "gpt-3.5-turbo",
67
+ "gpt-3.5-turbo-16k",
68
+ "gpt-3.5-turbo-0301",
69
+ "gpt-3.5-turbo-0613",
70
+ "gpt-3.5-turbo-1106",
71
+ "gpt-3.5-turbo-0125",
72
+ "gpt-3.5-turbo-16k-0613",
73
+ ],
74
+ ]
75
+ audio: Union[
76
+ openai.types.chat.chat_completion_audio_param.ChatCompletionAudioParam,
77
+ None,
78
+ openai.NotGiven,
79
+ ]
80
+ frequency_penalty: Union[float, None, openai.NotGiven]
81
+ function_call: Union[
82
+ Literal["none", "auto"],
83
+ openai.types.chat.chat_completion_function_call_option_param.ChatCompletionFunctionCallOptionParam,
84
+ openai.NotGiven,
85
+ ]
86
+ functions: Union[
87
+ Iterable[openai.types.chat.completion_create_params.Function], openai.NotGiven
88
+ ]
89
+ logit_bias: Union[dict[str, int], None, openai.NotGiven]
90
+ logprobs: Union[bool, None, openai.NotGiven]
91
+ max_completion_tokens: Union[int, None, openai.NotGiven]
92
+ max_tokens: Union[int, None, openai.NotGiven]
93
+ metadata: Union[dict[str, str], None, openai.NotGiven]
94
+ modalities: Union[list[Literal["text", "audio"]], None, openai.NotGiven]
95
+ n: Union[int, None, openai.NotGiven]
96
+ parallel_tool_calls: bool | openai.NotGiven
97
+ prediction: Union[
98
+ openai.types.chat.chat_completion_prediction_content_param.ChatCompletionPredictionContentParam,
99
+ None,
100
+ openai.NotGiven,
101
+ ]
102
+ presence_penalty: Union[float, None, openai.NotGiven]
103
+ response_format: Union[
104
+ openai.types.shared_params.response_format_text.ResponseFormatText,
105
+ openai.types.shared_params.response_format_json_object.ResponseFormatJSONObject,
106
+ openai.types.shared_params.response_format_json_schema.ResponseFormatJSONSchema,
107
+ openai.NotGiven,
108
+ ]
109
+ seed: Union[int, None, openai.NotGiven]
110
+ service_tier: Union[Literal["auto", "default"], None, openai.NotGiven]
111
+ stop: Union[str, None, list[str], openai.NotGiven]
112
+ store: Union[bool, None, openai.NotGiven]
113
+ stream: Union[Literal[False], None, Literal[True], openai.NotGiven]
114
+ stream_options: Union[
115
+ openai.types.chat.chat_completion_stream_options_param.ChatCompletionStreamOptionsParam,
116
+ None,
117
+ openai.NotGiven,
118
+ ]
119
+ temperature: Union[float, None, openai.NotGiven]
120
+ tool_choice: Union[
121
+ Literal["none", "auto", "required"],
122
+ openai.types.chat.chat_completion_named_tool_choice_param.ChatCompletionNamedToolChoiceParam,
123
+ openai.NotGiven,
124
+ ]
125
+ tools: Union[
126
+ Iterable[openai.types.chat.chat_completion_tool_param.ChatCompletionToolParam],
127
+ openai.NotGiven,
128
+ ]
129
+ top_logprobs: Union[int, None, openai.NotGiven]
130
+ top_p: Union[float, None, openai.NotGiven]
131
+ user: str | openai.NotGiven
132
+ extra_headers: Optional[Mapping[str, Union[str, openai.Omit]]]
133
+ extra_query: Optional[Mapping[str, object]]
134
+ extra_body: object | None
135
+ timeout: float | openai.Timeout | None | openai.NotGiven
@@ -0,0 +1,319 @@
1
+ Metadata-Version: 2.3
2
+ Name: chatlas
3
+ Version: 0.2.0
4
+ Summary: A simple and consistent interface for chatting with LLMs
5
+ Project-URL: Homepage, https://posit-dev.github.io/chatlas
6
+ Project-URL: Documentation, https://posit-dev.github.io/chatlas
7
+ Project-URL: Repository, https://github.com/posit-dev/chatlas
8
+ Project-URL: Issues, https://github.com/posit-dev/chatlas/issues/
9
+ Project-URL: Changelog, https://github.com/posit-dev/chatlas/blob/main/CHANGELOG.md
10
+ Author-email: Carson Sievert <carson@posit.co>
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3.9
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Requires-Python: >=3.9
20
+ Requires-Dist: jinja2
21
+ Requires-Dist: pydantic>=2.0
22
+ Requires-Dist: rich
23
+ Provides-Extra: dev
24
+ Requires-Dist: anthropic[bedrock]; extra == 'dev'
25
+ Requires-Dist: google-generativeai>=0.8.3; extra == 'dev'
26
+ Requires-Dist: matplotlib; extra == 'dev'
27
+ Requires-Dist: numpy>1.24.4; extra == 'dev'
28
+ Requires-Dist: openai; extra == 'dev'
29
+ Requires-Dist: pillow; extra == 'dev'
30
+ Requires-Dist: python-dotenv; extra == 'dev'
31
+ Requires-Dist: ruff>=0.6.5; extra == 'dev'
32
+ Requires-Dist: shiny; extra == 'dev'
33
+ Provides-Extra: docs
34
+ Requires-Dist: griffe>=1; extra == 'docs'
35
+ Requires-Dist: ipykernel; extra == 'docs'
36
+ Requires-Dist: ipywidgets; extra == 'docs'
37
+ Requires-Dist: nbclient; extra == 'docs'
38
+ Requires-Dist: nbformat; extra == 'docs'
39
+ Requires-Dist: pandas; extra == 'docs'
40
+ Requires-Dist: pyyaml; extra == 'docs'
41
+ Requires-Dist: quartodoc>=0.7; extra == 'docs'
42
+ Provides-Extra: test
43
+ Requires-Dist: pyright>=1.1.379; extra == 'test'
44
+ Requires-Dist: pytest-asyncio; extra == 'test'
45
+ Requires-Dist: pytest>=8.3.2; extra == 'test'
46
+ Requires-Dist: syrupy>=4; extra == 'test'
47
+ Description-Content-Type: text/markdown
48
+
49
+ # chatlas
50
+
51
+ chatlas provides a simple and unified interface across large language model (llm) providers in Python.
52
+ It abstracts away complexity from common tasks like streaming chat interfaces, tool calling, structured output, and much more.
53
+ chatlas helps you prototype faster without painting you into a corner; for example, switching providers is as easy as changing one line of code, but provider specific features are still accessible when needed.
54
+ Developer experience is also a key focus of chatlas: typing support, rich console output, and built-in tooling are all included.
55
+
56
+ (Looking for something similar to chatlas, but in R? Check out [elmer](https://elmer.tidyverse.org/)!)
57
+
58
+ ## Install
59
+
60
+ Install the latest stable release from PyPI:
61
+
62
+ ```bash
63
+ pip install -U chatlas
64
+ ```
65
+
66
+ Or, install the latest development version from GitHub:
67
+
68
+ ```bash
69
+ pip install -U git+https://github.com/posit-dev/chatlas
70
+ ```
71
+
72
+ ## Model providers
73
+
74
+ `chatlas` supports a variety of model providers. See the [API reference](https://posit-dev.github.io/chatlas/reference/index.html) for more details (like managing credentials) on each provider.
75
+
76
+ * Anthropic (Claude): [`ChatAnthropic()`](https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html).
77
+ * GitHub model marketplace: [`ChatGithub()`](https://posit-dev.github.io/chatlas/reference/ChatGithub.html).
78
+ * Google (Gemini): [`ChatGoogle()`](https://posit-dev.github.io/chatlas/reference/ChatGoogle.html).
79
+ * Groq: [`ChatGroq()`](https://posit-dev.github.io/chatlas/reference/ChatGroq.html).
80
+ * Ollama local models: [`ChatOllama()`](https://posit-dev.github.io/chatlas/reference/ChatOllama.html).
81
+ * OpenAI: [`ChatOpenAI()`](https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html).
82
+ * perplexity.ai: [`ChatPerplexity()`](https://posit-dev.github.io/chatlas/reference/ChatPerplexity.html).
83
+
84
+ It also supports the following enterprise cloud providers:
85
+
86
+ * AWS Bedrock: [`ChatBedrockAnthropic()`](https://posit-dev.github.io/chatlas/reference/ChatBedrockAnthropic.html).
87
+ * Azure OpenAI: [`ChatAzureOpenAI()`](https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html).
88
+
89
+ To use a model provider that isn't listed here, you have two options:
90
+
91
+ 1. If the model is OpenAI compatible, use `ChatOpenAI()` with the appropriate `base_url` and `api_key` (see [`ChatGithub`](https://github.com/posit-dev/chatlas/blob/main/chatlas/_github.py) for a reference).
92
+ 2. If you're motivated, implement a new provider by subclassing [`Provider`](https://github.com/posit-dev/chatlas/blob/main/chatlas/_provider.py) and implementing the required methods.
93
+
94
+
95
+ ## Model choice
96
+
97
+ If you're using chatlas inside your organisation, you'll be limited to what your org allows, which is likely to be one provided by a big cloud provider (e.g. `ChatAzureOpenAI()` and `ChatBedrockAnthropic()`). If you're using chatlas for your own personal exploration, you have a lot more freedom so we have a few recommendations to help you get started:
98
+
99
+ - `ChatOpenAI()` or `ChatAnthropic()` are both good places to start. `ChatOpenAI()` defaults to **GPT-4o**, but you can use `model = "gpt-4o-mini"` for a cheaper lower-quality model, or `model = "o1-mini"` for more complex reasoning. `ChatAnthropic()` is similarly good; it defaults to **Claude 3.5 Sonnet** which we have found to be particularly good at writing code.
100
+
101
+ - `ChatGoogle()` is great for large prompts, because it has a much larger context window than other models. It allows up to 1 million tokens, compared to Claude 3.5 Sonnet's 200k and GPT-4o's 128k.
102
+
103
+ - `ChatOllama()`, which uses [Ollama](https://ollama.com), allows you to run models on your own computer. The biggest models you can run locally aren't as good as the state of the art hosted models, but they also don't share your data and and are effectively free.
104
+
105
+ ## Using chatlas
106
+
107
+ You can chat via `chatlas` in several different ways, depending on whether you are working interactively or programmatically. They all start with creating a new chat object:
108
+
109
+ ```python
110
+ from chatlas import ChatOpenAI
111
+
112
+ chat = ChatOpenAI(
113
+ model = "gpt-4o",
114
+ system_prompt = "You are a friendly but terse assistant.",
115
+ )
116
+ ```
117
+
118
+ ### Interactive console
119
+
120
+ From a `chat` instance, it's simple to start a web-based or terminal-based chat console, which is great for testing the capabilities of the model. In either case, responses stream in real-time, and context is preserved across turns.
121
+
122
+ ```python
123
+ chat.app()
124
+ ```
125
+
126
+ <div style="display:flex;justify-content:center;">
127
+ <img width="500" alt="A web app for chatting with an LLM via chatlas" src="https://github.com/user-attachments/assets/e43f60cb-3686-435a-bd11-8215cb024d2e" class="border rounded">
128
+ </div>
129
+
130
+
131
+ Or, if you prefer to work from the terminal:
132
+
133
+ ```python
134
+ chat.console()
135
+ ```
136
+
137
+ ```
138
+ Entering chat console. Press Ctrl+C to quit.
139
+
140
+ ?> Who created Python?
141
+
142
+ Python was created by Guido van Rossum. He began development in the late 1980s and released the first version in 1991.
143
+
144
+ ?> Where did he develop it?
145
+
146
+ Guido van Rossum developed Python while working at Centrum Wiskunde & Informatica (CWI) in the Netherlands.
147
+ ```
148
+
149
+
150
+ ### The `.chat()` method
151
+
152
+ For a more programmatic approach, you can use the `.chat()` method to ask a question and get a response. By default, the response prints to a [rich](https://github.com/Textualize/rich) console as it streams in:
153
+
154
+ ```python
155
+ chat.chat("What preceding languages most influenced Python?")
156
+ ```
157
+
158
+ ```
159
+ Python was primarily influenced by ABC, with additional inspiration from C,
160
+ Modula-3, and various other languages.
161
+ ```
162
+
163
+ To ask a question about an image, pass one or more additional input arguments using `content_image_file()` and/or `content_image_url()`:
164
+
165
+ ```python
166
+ from chatlas import content_image_url
167
+
168
+ chat.chat(
169
+ content_image_url("https://www.python.org/static/img/python-logo.png"),
170
+ "Can you explain this logo?"
171
+ )
172
+ ```
173
+
174
+ ```
175
+ The Python logo features two intertwined snakes in yellow and blue,
176
+ representing the Python programming language. The design symbolizes...
177
+ ```
178
+
179
+ To get the full response as a string, use the built-in `str()` function. Optionally, you can also suppress the rich console output by setting `echo="none"`:
180
+
181
+ ```python
182
+ response = chat.chat("Who is Posit?", echo="none")
183
+ print(str(response))
184
+ ```
185
+
186
+ As we'll see in later articles, `echo="all"` can also be useful for debugging, as it shows additional information, such as tool calls.
187
+
188
+ ### The `.stream()` method
189
+
190
+ If you want to do something with the response in real-time (i.e., as it arrives in chunks), use the `.stream()` method. This method returns an iterator that yields each chunk of the response as it arrives:
191
+
192
+ ```python
193
+ response = chat.stream("Who is Posit?")
194
+ for chunk in response:
195
+ print(chunk, end="")
196
+ ```
197
+
198
+ The `.stream()` method can also be useful if you're [building a chatbot](https://posit-dev.github.io/chatlas/web-apps.html) or other programs that needs to display responses as they arrive.
199
+
200
+
201
+ ### Tool calling
202
+
203
+ Tool calling is as simple as passing a function with type hints and docstring to `.register_tool()`.
204
+
205
+ ```python
206
+ import sys
207
+
208
+ def get_current_python_version() -> str:
209
+ """Get the current version of Python."""
210
+ return sys.version
211
+
212
+ chat.register_tool(get_current_python_version)
213
+ chat.chat("What's the current version of Python?")
214
+ ```
215
+
216
+ ```
217
+ The current version of Python is 3.13.
218
+ ```
219
+
220
+ Learn more in the [tool calling article](https://posit-dev.github.io/chatlas/tool-calling.html)
221
+
222
+ ### Structured data
223
+
224
+ Structured data (i.e., structured output) is as simple as passing a [pydantic](https://docs.pydantic.dev/latest/) model to `.extract_data()`.
225
+
226
+ ```python
227
+ from pydantic import BaseModel
228
+
229
+ class Person(BaseModel):
230
+ name: str
231
+ age: int
232
+
233
+ chat.extract_data(
234
+ "My name is Susan and I'm 13 years old",
235
+ data_model=Person,
236
+ )
237
+ ```
238
+
239
+ ```
240
+ {'name': 'Susan', 'age': 13}
241
+ ```
242
+
243
+ Learn more in the [structured data article](https://posit-dev.github.io/chatlas/structured-data.html)
244
+
245
+ ### Export chat
246
+
247
+ Easily get a full markdown or HTML export of a conversation:
248
+
249
+ ```python
250
+ chat.export("index.html", title="Python Q&A")
251
+ ```
252
+
253
+ If the export doesn't have all the information you need, you can also access the full conversation history via the `.get_turns()` method:
254
+
255
+ ```python
256
+ chat.get_turns()
257
+ ```
258
+
259
+ And, if the conversation is too long, you can specify which turns to include:
260
+
261
+ ```python
262
+ chat.export("index.html", turns=chat.get_turns()[-5:])
263
+ ```
264
+
265
+ ### Async
266
+
267
+ `chat` methods tend to be synchronous by default, but you can use the async flavor by appending `_async` to the method name:
268
+
269
+ ```python
270
+ import asyncio
271
+
272
+ async def main():
273
+ await chat.chat_async("What is the capital of France?")
274
+
275
+ asyncio.run(main())
276
+ ```
277
+
278
+ ### Typing support
279
+
280
+ `chatlas` has full typing support, meaning that, among other things, autocompletion just works in your favorite editor:
281
+
282
+ <div style="display:flex;justify-content:center;">
283
+ <img width="500" alt="Autocompleting model options in ChatOpenAI" src="https://github.com/user-attachments/assets/163d6d8a-7d58-422d-b3af-cc9f2adee759" class="rounded">
284
+ </div>
285
+
286
+
287
+
288
+ ### Troubleshooting
289
+
290
+ Sometimes things like token limits, tool errors, or other issues can cause problems that are hard to diagnose.
291
+ In these cases, the `echo="all"` option is helpful for getting more information about what's going on under the hood.
292
+
293
+ ```python
294
+ chat.chat("What is the capital of France?", echo="all")
295
+ ```
296
+
297
+ This shows important information like tool call results, finish reasons, and more.
298
+
299
+ If the problem isn't self-evident, you can also reach into the `.get_last_turn()`, which contains the full response object, with full details about the completion.
300
+
301
+
302
+ <div style="display:flex;justify-content:center;">
303
+ <img width="500" alt="Turn completion details with typing support" src="https://github.com/user-attachments/assets/eaea338d-e44a-4e23-84a7-2e998d8af3ba" class="rounded">
304
+ </div>
305
+
306
+
307
+ For monitoring issues in a production (or otherwise non-interactive) environment, you may want to enabling logging. Also, since `chatlas` builds on top of packages like `anthropic` and `openai`, you can also enable their debug logging to get lower-level information, like HTTP requests and response codes.
308
+
309
+ ```shell
310
+ $ export CHATLAS_LOG=info
311
+ $ export OPENAI_LOG=info
312
+ $ export ANTHROPIC_LOG=info
313
+ ```
314
+
315
+ ### Next steps
316
+
317
+ If you're new to world LLMs, you might want to read the [Get Started](https://posit-dev.github.io/chatlas/get-started.html) guide, which covers some basic concepts and terminology.
318
+
319
+ Once you're comfortable with the basics, you can explore more in-depth topics like [prompt design](https://posit-dev.github.io/chatlas/prompt-design.html) or the [API reference](https://posit-dev.github.io/chatlas/reference/index.html).
@@ -0,0 +1,37 @@
1
+ chatlas/__init__.py,sha256=OJbTO71ne1O9SDxkwIKOMpCMKbh0T8eDpYPFhrAb28A,974
2
+ chatlas/_anthropic.py,sha256=AUb1ZJfZo6AEVwfNrMl520-zGomkOfc-ewJoFXGGEEc,21180
3
+ chatlas/_chat.py,sha256=_WTqI3v84voJ9GJJlizIPjxomx3O--fToN5NQSGeFuM,38452
4
+ chatlas/_content.py,sha256=vpWF_WKS2tCDUtnL8l9lfW6b6g9e7LbDKP-_TegauVE,5883
5
+ chatlas/_content_image.py,sha256=4nk9wTvLtNmtcytdFp8p9otEV5-0_K6wzIxCyK0PIEI,8367
6
+ chatlas/_display.py,sha256=_IcQcvpyTNjGHOpY70_LOrDWwTjzdkziy6pTvxHEiWI,4053
7
+ chatlas/_github.py,sha256=D3L7Qu35K-M1qEW7-w-Oq-pF-9mVetia3MHYNNLEYtU,4373
8
+ chatlas/_google.py,sha256=Y3vFcUKyEiTAGUkh6Nhw8pbsDYSN3xAECXc9jpd7-6A,13953
9
+ chatlas/_groq.py,sha256=3VnYiKdxJTHPhEgUKnL2nY5uYL2L4PKBo7GZMwR0D8k,4158
10
+ chatlas/_interpolate.py,sha256=ykwLP3x-ya9Q33U4knSU75dtk6pzJAeythEEIW-43Pc,3631
11
+ chatlas/_logging.py,sha256=7a20sAl1PkW1qBNrfd_ieUbQXV8Gf4Vuf0Wn62LNBmk,2290
12
+ chatlas/_merge.py,sha256=Xt2uutLdEmYAGfGCa8GCEd8sdNadQM5o3l-zuIQFbWU,3923
13
+ chatlas/_ollama.py,sha256=G1rGasb6cq8WhuvSpo2oHMBkeeguZE_TrurIyZSIPJ8,3584
14
+ chatlas/_openai.py,sha256=mEeUTcwT7s33CWhAvavferQlc4ltk3Wm75Bqh1jQKw0,21855
15
+ chatlas/_perplexity.py,sha256=Bw_mlM8N8egGKIrbNerTn2pMlybugADOshjYOfN1ixM,4446
16
+ chatlas/_provider.py,sha256=nUfJEXcVs_Yxns2WLr3BevmAnU19fnIGEK_VAeSyt6E,3601
17
+ chatlas/_tokens.py,sha256=3W3EPUp9eWXUiwuzJwEPBv43AUznbK46pm59Htti7z4,2392
18
+ chatlas/_tokens_old.py,sha256=L9d9oafrXvEx2u4nIn_Jjn7adnQyLBnYBuPwJUE8Pl8,5005
19
+ chatlas/_tools.py,sha256=-qt4U1AFkebQoX9kpsBy5QXK8a2PpHX6Amgm44gcQ68,4113
20
+ chatlas/_turn.py,sha256=nKwk20FrOIrZX4xJxdGyUEpwUH2H-UYcoJLlO2ZD5iU,4836
21
+ chatlas/_typing_extensions.py,sha256=YdzmlyPSBpIEcsOkoz12e6jETT1XEMV2Q72haE4cfwY,1036
22
+ chatlas/_utils.py,sha256=qAiWuDx-uG8BGFZ_PWvum9wpN-WogdItO32X4pRhhLs,2762
23
+ chatlas/types/__init__.py,sha256=pgHl8pd2Ytskd6lkfNtm98Yj1ZP0b3R35RH4Uht2BAs,694
24
+ chatlas/types/anthropic/__init__.py,sha256=OwubA-DPHYpYo0XyRyAFwftOI0mOxtHzAyhUSLcDx54,417
25
+ chatlas/types/anthropic/_client.py,sha256=Iz7U7u7_Af4UzKuDDOaYe09T_WW1w9yXtqxrgn0ITSk,918
26
+ chatlas/types/anthropic/_client_bedrock.py,sha256=mNazQlu0pQt8JdzrYn3LKNgE4n732GjhQUJdQQK9QkY,785
27
+ chatlas/types/anthropic/_submit.py,sha256=MBsKtgPrRq4z_Ls5haT3oZKSTpM0_K9STxJZ_2ge-iA,2122
28
+ chatlas/types/google/__init__.py,sha256=ZJhi8Kwvio2zp8T1TQqmvdHqkS-Khb6BGESPjREADgo,337
29
+ chatlas/types/google/_client.py,sha256=YA5hsT-m-KcONKtwpCULYMnGwMPfkScpvhjx_qBLg5o,4421
30
+ chatlas/types/google/_submit.py,sha256=yp1wtp5eScLlHDNxeXl0qJOKv7SWLnRQ8oslupRFUBE,4839
31
+ chatlas/types/openai/__init__.py,sha256=Q2RAr1bSH1nHsxICK05nAmKmxdhKmhbBkWD_XHiVSrI,411
32
+ chatlas/types/openai/_client.py,sha256=4V9UcysJI6Iu8CsaFH68EJiGIs-H41veguldsVj4KMA,707
33
+ chatlas/types/openai/_client_azure.py,sha256=zrQv0JZS47UB-ViIOKcFSv8YVJIzaridP_e-FF1f-U4,811
34
+ chatlas/types/openai/_submit.py,sha256=Gl8YPARAY-kChPOdjoyLBEtVgarYrX69OOeFQH6b6cI,5679
35
+ chatlas-0.2.0.dist-info/METADATA,sha256=C1CrsrVrcKI36Wnp2u-samFDFoQ6krG9RnfawFIPzSs,12617
36
+ chatlas-0.2.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
37
+ chatlas-0.2.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.26.3
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any