chatlas 0.11.1__tar.gz → 0.13.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- {chatlas-0.11.1 → chatlas-0.13.0}/.vscode/settings.json +9 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/CHANGELOG.md +37 -6
- {chatlas-0.11.1 → chatlas-0.13.0}/PKG-INFO +2 -1
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/__init__.py +10 -0
- chatlas-0.13.0/chatlas/_auto.py +290 -0
- chatlas-0.13.0/chatlas/_batch_chat.py +211 -0
- chatlas-0.13.0/chatlas/_batch_job.py +234 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_chat.py +181 -43
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_content.py +13 -8
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider.py +88 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_anthropic.py +106 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_openai.py +143 -12
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_tools.py +11 -3
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_version.py +2 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/anthropic/_submit.py +2 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/openai/_client.py +2 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/openai/_client_azure.py +2 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/openai/_submit.py +2 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/_quarto.yml +12 -5
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/models.qmd +28 -7
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/structured-data.qmd +12 -22
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/structured-data/article-summary.qmd +3 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/structured-data/classification.qmd +5 -3
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/structured-data/entity-recognition.qmd +3 -3
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/structured-data/multi-modal.qmd +4 -3
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/structured-data/sentiment-analysis.qmd +2 -2
- {chatlas-0.11.1 → chatlas-0.13.0}/pyproject.toml +1 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/scripts/_generate_openai_types.py +14 -3
- chatlas-0.13.0/tests/batch/country-capitals-structured.json +140 -0
- chatlas-0.13.0/tests/batch/country-capitals.json +140 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/conftest.py +13 -9
- chatlas-0.13.0/tests/test_auto.py +266 -0
- chatlas-0.13.0/tests/test_batch_chat.py +202 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_chat.py +8 -7
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_tools_enhanced.py +131 -0
- chatlas-0.13.0/tests/test_turns.py +350 -0
- chatlas-0.11.1/chatlas/_auto.py +0 -178
- chatlas-0.11.1/tests/test_auto.py +0 -109
- chatlas-0.11.1/tests/test_turns.py +0 -92
- {chatlas-0.11.1 → chatlas-0.13.0}/.github/workflows/check-update-types.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/.github/workflows/docs-publish.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/.github/workflows/release.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/.github/workflows/test.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/.github/workflows/update-pricing.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/.gitignore +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/.vscode/extensions.json +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/CLAUDE.md +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/LICENSE +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/Makefile +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/README.md +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_callbacks.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_content_image.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_content_pdf.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_display.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_interpolate.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_live_render.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_logging.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_mcp_manager.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_merge.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_cloudflare.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_databricks.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_deepseek.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_github.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_google.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_groq.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_huggingface.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_mistral.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_ollama.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_openrouter.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_perplexity.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_portkey.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_provider_snowflake.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_tokens.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_tokens_old.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_turn.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_typing_extensions.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/_utils.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/data/prices.json +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/py.typed +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/__init__.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/anthropic/__init__.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/anthropic/_client.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/anthropic/_client_bedrock.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/google/__init__.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/google/_client.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/google/_submit.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/chatlas/types/openai/__init__.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/.gitignore +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/_extensions/machow/interlinks/.gitignore +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/_extensions/machow/interlinks/_extension.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/_extensions/machow/interlinks/interlinks.lua +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/_sidebar.yml +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/congressional-assets.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/async.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/chat.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/chatbots.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/debug.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/monitor.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/parameters.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/stream.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/system-prompt.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/get-started/tools.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chat-app.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chat-console.mp4 +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chat-console.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chat-notebook.mp4 +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chat-parameters.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chatbot-gradio.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chatbot-shiny.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chatbot-streamlit.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chatbot-textual.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/chatlas-hello.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/client-parameters.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/congressional-assets.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/hello-chat-console.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/model-parameters.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/model-type-hints.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/posit-logo.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/shiny-mcp-run-python.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/shiny-tool-call-display.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/shiny-tool-call-map.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/tool-calling-right.svg +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/images/tool-calling-wrong.svg +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/index.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/logos/hero/hero-old.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/logos/hero/hero.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/logos/hex/logo.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/logos/small/logo.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/misc/RAG.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/misc/examples.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/misc/mcp-tools.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/misc/vocabulary.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/styles.scss +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/tool-calling/approval.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/tool-calling/displays.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/tool-calling/how-it-works.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/docs/why-chatlas.qmd +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/pytest.ini +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/scripts/_generate_anthropic_types.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/scripts/_generate_google_types.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/scripts/_utils.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/scripts/main.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/__init__.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/__snapshots__/test_chat.ambr +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/apples.pdf +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/images/dice.png +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/mcp_servers/http_add.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/mcp_servers/http_current_date.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/mcp_servers/stdio_current_date.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/mcp_servers/stdio_subtract_multiply.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_callbacks.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_content.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_content_html.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_content_image.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_content_pdf.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_content_tools.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_interpolate.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_mcp_client.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_anthropic.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_azure.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_bedrock.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_cloudflare.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_databricks.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_deepseek.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_github.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_google.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_huggingface.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_mistral.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_openai.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_openrouter.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_portkey.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_provider_snowflake.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_register_tool_models.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_set_model_params.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_tokens.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_tool_from_mcp.py +0 -0
- {chatlas-0.11.1 → chatlas-0.13.0}/tests/test_utils_merge.py +0 -0
|
@@ -7,5 +7,12 @@
|
|
|
7
7
|
},
|
|
8
8
|
"editor.defaultFormatter": "charliermarsh.ruff",
|
|
9
9
|
},
|
|
10
|
-
"flake8.args": [
|
|
11
|
-
|
|
10
|
+
"flake8.args": [
|
|
11
|
+
"--max-line-length=120"
|
|
12
|
+
],
|
|
13
|
+
"python.testing.pytestArgs": [
|
|
14
|
+
"tests"
|
|
15
|
+
],
|
|
16
|
+
"python.testing.unittestEnabled": false,
|
|
17
|
+
"python.testing.pytestEnabled": true
|
|
18
|
+
}
|
|
@@ -7,6 +7,38 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
|
|
7
7
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
8
8
|
-->
|
|
9
9
|
|
|
10
|
+
## [0.13.0] - 2025-09-10
|
|
11
|
+
|
|
12
|
+
### New features
|
|
13
|
+
|
|
14
|
+
* Added support for submitting multiple chats in one batch. With batch submission, results can take up to 24 hours to complete, but in return you pay ~50% less than usual. For more, see the [reference](https://posit-dev.github.io/chatlas/reference/) for `batch_chat()`, `batch_chat_text()`, `batch_chat_structured()` and `batch_chat_completed()`. (#177)
|
|
15
|
+
* The `Chat` class gains new `.chat_structured()` (and `.chat_structured_async()`) methods. These methods supersede the now deprecated `.extract_data()` (and `.extract_data_async()`). The only difference is that the new methods return a `BaseModel` instance (instead of a `dict()`), leading to a better type hinting/checking experience. (#175)
|
|
16
|
+
* The `.get_turns()` method gains a `tool_result_role` parameter. Set `tool_result_role="assistant"` to collect tool result content (plus the surrounding assistant turn contents) into a single assistant turn. This is convenient for display purposes and more generally if you want the tool calling loop to be contained in a single turn. (#179)
|
|
17
|
+
|
|
18
|
+
### Improvements
|
|
19
|
+
|
|
20
|
+
* The `.app()` method now:
|
|
21
|
+
* Enables bookmarking by default (i.e., chat session survives page reload). (#179)
|
|
22
|
+
* Correctly renders pre-existing turns that contain tool calls. (#179)
|
|
23
|
+
|
|
24
|
+
## [0.12.0] - 2025-09-08
|
|
25
|
+
|
|
26
|
+
### Breaking changes
|
|
27
|
+
|
|
28
|
+
* `ChatAuto()`'s first (optional) positional parameter has changed from `system_prompt` to `provider_model`, and `system_prompt` is now a keyword parameter. As a result, you may need to change `ChatAuto("[system prompt]")` -> `ChatAuto(system_prompt="[system prompt]")`. In addition, the `provider` and `model` keyword arguments are now deprecated, but continue to work with a warning, as are the previous `CHATLAS_CHAT_PROVIDER` and `CHATLAS_CHAT_MODEL` environment variables. (#159)
|
|
29
|
+
|
|
30
|
+
### New features
|
|
31
|
+
|
|
32
|
+
* `ChatAuto()`'s new `provider_model` takes both provider and model in a single string in the format `"{provider}/{model}"`, e.g. `"openai/gpt-5"`. If not provided, `ChatAuto()` looks for the `CHATLAS_CHAT_PROVIDER_MODEL` environment variable, defaulting to `"openai"` if neither are provided. Unlike previous versions of `ChatAuto()`, the environment variables are now used *only if function arguments are not provided*. In other words, if `provider_model` is given, the `CHATLAS_CHAT_PROVIDER_MODEL` environment variable is ignored. Similarly, `CHATLAS_CHAT_ARGS` are only used if no `kwargs` are provided. This improves interactive use cases, makes it easier to introduce application-specific environment variables, and puts more control in the hands of the developer. (#159)
|
|
33
|
+
* The `.register_tool()` method now:
|
|
34
|
+
* Accepts a `Tool` instance as input. This is primarily useful for binding things like `annotations` to the `Tool` in one place, and registering it in another. (#172)
|
|
35
|
+
* Supports function parameter names that start with an underscore. (#174)
|
|
36
|
+
* The `ToolAnnotations` type gains an `extra` key field -- providing a place for providing additional information that other consumers of tool annotations (e.g., [shinychat](https://posit-dev.github.io/shinychat/)) may make use of.
|
|
37
|
+
|
|
38
|
+
### Bug fixes
|
|
39
|
+
|
|
40
|
+
* `ChatAuto()` now supports recently added providers such as `ChatCloudflare()`, `ChatDeepseek()`, `ChatHuggingFace()`, etc. (#159)
|
|
41
|
+
|
|
10
42
|
## [0.11.1] - 2025-08-29
|
|
11
43
|
|
|
12
44
|
### New features
|
|
@@ -22,7 +54,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
22
54
|
|
|
23
55
|
* `.register_tool(annotations=annotations)` drops support for `mcp.types.ToolAnnotations()` and instead expects a dictionary of the same info. (#164)
|
|
24
56
|
|
|
25
|
-
|
|
26
57
|
## [0.11.0] - 2025-08-26
|
|
27
58
|
|
|
28
59
|
### New features
|
|
@@ -42,7 +73,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
42
73
|
|
|
43
74
|
### New features
|
|
44
75
|
|
|
45
|
-
* Added `ChatCloudflare()` for chatting via [Cloudflare AI](https://developers.cloudflare.com/workers-ai/get-started/rest-api/). (#150)
|
|
76
|
+
* Added `ChatCloudflare()` for chatting via [Cloudflare AI](https://developers.cloudflare.com/workers-ai/get-started/rest-api/). (#150)
|
|
46
77
|
* Added `ChatDeepSeek()` for chatting via [DeepSeek](https://www.deepseek.com/). (#147)
|
|
47
78
|
* Added `ChatOpenRouter()` for chatting via [Open Router](https://openrouter.ai/). (#148)
|
|
48
79
|
* Added `ChatHuggingFace()` for chatting via [Hugging Face](https://huggingface.co/). (#144)
|
|
@@ -78,7 +109,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
78
109
|
|
|
79
110
|
### New features
|
|
80
111
|
|
|
81
|
-
* `Chat` gains a handful of new methods:
|
|
112
|
+
* `Chat` gains a handful of new methods:
|
|
82
113
|
* `.register_mcp_tools_http_stream_async()` and `.register_mcp_tools_stdio_async()`: for registering tools from a [MCP server](https://modelcontextprotocol.io/). (#39)
|
|
83
114
|
* `.get_tools()` and `.set_tools()`: for fine-grained control over registered tools. (#39)
|
|
84
115
|
* `.set_model_params()`: for setting common LLM parameters in a model-agnostic fashion. (#127)
|
|
@@ -87,7 +118,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
87
118
|
* Tool functions passed to `.register_tool()` can now `yield` numerous results. (#39)
|
|
88
119
|
* A `ContentToolResultImage` content class was added for returning images from tools. It is currently only works with `ChatAnthropic`. (#39)
|
|
89
120
|
* A `Tool` can now be constructed from a pre-existing tool schema (via a new `__init__` method). (#39)
|
|
90
|
-
* The `Chat.app()` method gains a `host` parameter. (#122)
|
|
121
|
+
* The `Chat.app()` method gains a `host` parameter. (#122)
|
|
91
122
|
* `ChatGithub()` now supports the more standard `GITHUB_TOKEN` environment variable for storing the API key. (#123)
|
|
92
123
|
|
|
93
124
|
### Changes
|
|
@@ -149,7 +180,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
149
180
|
|
|
150
181
|
## [0.7.1] - 2025-05-10
|
|
151
182
|
|
|
152
|
-
* Added `openai` as a hard dependency, making installation easier for a wide range of use cases. (#91)
|
|
183
|
+
* Added `openai` as a hard dependency, making installation easier for a wide range of use cases. (#91)
|
|
153
184
|
|
|
154
185
|
## [0.7.0] - 2025-04-22
|
|
155
186
|
|
|
@@ -159,7 +190,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
159
190
|
* `.stream()` and `.stream_async()` gain a `content` argument. Set this to `"all"` to include `ContentToolResult`/`ContentToolRequest` objects in the stream. (#75)
|
|
160
191
|
* `ContentToolResult`/`ContentToolRequest` are now exported to `chatlas` namespace. (#75)
|
|
161
192
|
* `ContentToolResult`/`ContentToolRequest` gain a `.tagify()` method so they render sensibly in a Shiny app. (#75)
|
|
162
|
-
* A tool can now return a `ContentToolResult`. This is useful for:
|
|
193
|
+
* A tool can now return a `ContentToolResult`. This is useful for:
|
|
163
194
|
* Specifying the format used for sending the tool result to the chat model (`model_format`). (#87)
|
|
164
195
|
* Custom rendering of the tool result (by overriding relevant methods in a subclass). (#75)
|
|
165
196
|
* `Chat` gains a new `.current_display` property. When a `.chat()` or `.stream()` is currently active, this property returns an object with a `.echo()` method (to echo new content to the display). This is primarily useful for displaying custom content during a tool call. (#79)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: chatlas
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.13.0
|
|
4
4
|
Summary: A simple and consistent interface for chatting with LLMs
|
|
5
5
|
Project-URL: Homepage, https://posit-dev.github.io/chatlas
|
|
6
6
|
Project-URL: Documentation, https://posit-dev.github.io/chatlas
|
|
@@ -44,6 +44,7 @@ Requires-Dist: pillow; extra == 'dev'
|
|
|
44
44
|
Requires-Dist: python-dotenv; extra == 'dev'
|
|
45
45
|
Requires-Dist: ruff>=0.6.5; extra == 'dev'
|
|
46
46
|
Requires-Dist: shiny; extra == 'dev'
|
|
47
|
+
Requires-Dist: shinychat; extra == 'dev'
|
|
47
48
|
Requires-Dist: snowflake-ml-python>=1.8.4; extra == 'dev'
|
|
48
49
|
Requires-Dist: tenacity; extra == 'dev'
|
|
49
50
|
Requires-Dist: tiktoken; extra == 'dev'
|
|
@@ -1,5 +1,11 @@
|
|
|
1
1
|
from . import types
|
|
2
2
|
from ._auto import ChatAuto
|
|
3
|
+
from ._batch_chat import (
|
|
4
|
+
batch_chat,
|
|
5
|
+
batch_chat_completed,
|
|
6
|
+
batch_chat_structured,
|
|
7
|
+
batch_chat_text,
|
|
8
|
+
)
|
|
3
9
|
from ._chat import Chat
|
|
4
10
|
from ._content import (
|
|
5
11
|
ContentToolRequest,
|
|
@@ -36,6 +42,10 @@ except ImportError: # pragma: no cover
|
|
|
36
42
|
__version__ = "0.0.0" # stub value for docs
|
|
37
43
|
|
|
38
44
|
__all__ = (
|
|
45
|
+
"batch_chat",
|
|
46
|
+
"batch_chat_completed",
|
|
47
|
+
"batch_chat_structured",
|
|
48
|
+
"batch_chat_text",
|
|
39
49
|
"ChatAnthropic",
|
|
40
50
|
"ChatAuto",
|
|
41
51
|
"ChatBedrockAnthropic",
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import warnings
|
|
5
|
+
from typing import Callable, Literal, Optional
|
|
6
|
+
|
|
7
|
+
import orjson
|
|
8
|
+
|
|
9
|
+
from ._chat import Chat
|
|
10
|
+
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
11
|
+
from ._provider_cloudflare import ChatCloudflare
|
|
12
|
+
from ._provider_databricks import ChatDatabricks
|
|
13
|
+
from ._provider_deepseek import ChatDeepSeek
|
|
14
|
+
from ._provider_github import ChatGithub
|
|
15
|
+
from ._provider_google import ChatGoogle, ChatVertex
|
|
16
|
+
from ._provider_groq import ChatGroq
|
|
17
|
+
from ._provider_huggingface import ChatHuggingFace
|
|
18
|
+
from ._provider_mistral import ChatMistral
|
|
19
|
+
from ._provider_ollama import ChatOllama
|
|
20
|
+
from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
|
|
21
|
+
from ._provider_openrouter import ChatOpenRouter
|
|
22
|
+
from ._provider_perplexity import ChatPerplexity
|
|
23
|
+
from ._provider_portkey import ChatPortkey
|
|
24
|
+
from ._provider_snowflake import ChatSnowflake
|
|
25
|
+
from ._utils import MISSING_TYPE as DEPRECATED_TYPE
|
|
26
|
+
|
|
27
|
+
AutoProviders = Literal[
|
|
28
|
+
"anthropic",
|
|
29
|
+
"bedrock-anthropic",
|
|
30
|
+
"cloudflare",
|
|
31
|
+
"databricks",
|
|
32
|
+
"deep-seek",
|
|
33
|
+
"github",
|
|
34
|
+
"google",
|
|
35
|
+
"groq",
|
|
36
|
+
"hugging-face",
|
|
37
|
+
"mistral",
|
|
38
|
+
"ollama",
|
|
39
|
+
"openai",
|
|
40
|
+
"azure-openai",
|
|
41
|
+
"open-router",
|
|
42
|
+
"perplexity",
|
|
43
|
+
"portkey",
|
|
44
|
+
"snowflake",
|
|
45
|
+
"vertex",
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
_provider_chat_model_map: dict[AutoProviders, Callable[..., Chat]] = {
|
|
49
|
+
"anthropic": ChatAnthropic,
|
|
50
|
+
"bedrock-anthropic": ChatBedrockAnthropic,
|
|
51
|
+
"cloudflare": ChatCloudflare,
|
|
52
|
+
"databricks": ChatDatabricks,
|
|
53
|
+
"deep-seek": ChatDeepSeek,
|
|
54
|
+
"github": ChatGithub,
|
|
55
|
+
"google": ChatGoogle,
|
|
56
|
+
"groq": ChatGroq,
|
|
57
|
+
"hugging-face": ChatHuggingFace,
|
|
58
|
+
"mistral": ChatMistral,
|
|
59
|
+
"ollama": ChatOllama,
|
|
60
|
+
"openai": ChatOpenAI,
|
|
61
|
+
"azure-openai": ChatAzureOpenAI,
|
|
62
|
+
"open-router": ChatOpenRouter,
|
|
63
|
+
"perplexity": ChatPerplexity,
|
|
64
|
+
"portkey": ChatPortkey,
|
|
65
|
+
"snowflake": ChatSnowflake,
|
|
66
|
+
"vertex": ChatVertex,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
DEPRECATED = DEPRECATED_TYPE()
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def ChatAuto(
|
|
73
|
+
provider_model: Optional[str] = None,
|
|
74
|
+
*,
|
|
75
|
+
system_prompt: Optional[str] = None,
|
|
76
|
+
provider: AutoProviders | DEPRECATED_TYPE = DEPRECATED,
|
|
77
|
+
model: str | DEPRECATED_TYPE = DEPRECATED,
|
|
78
|
+
**kwargs,
|
|
79
|
+
) -> Chat:
|
|
80
|
+
"""
|
|
81
|
+
Chat with any provider.
|
|
82
|
+
|
|
83
|
+
This is a generic interface to all the other `Chat*()` functions, allowing
|
|
84
|
+
you to pick the provider (and model) with a simple string.
|
|
85
|
+
|
|
86
|
+
Prerequisites
|
|
87
|
+
-------------
|
|
88
|
+
|
|
89
|
+
::: {.callout-note}
|
|
90
|
+
## API key
|
|
91
|
+
|
|
92
|
+
Follow the instructions for the specific provider to obtain an API key.
|
|
93
|
+
:::
|
|
94
|
+
|
|
95
|
+
::: {.callout-note}
|
|
96
|
+
## Python requirements
|
|
97
|
+
|
|
98
|
+
Follow the instructions for the specific provider to install the required
|
|
99
|
+
Python packages.
|
|
100
|
+
:::
|
|
101
|
+
|
|
102
|
+
Examples
|
|
103
|
+
--------
|
|
104
|
+
|
|
105
|
+
`ChatAuto()` makes it easy to switch between different chat providers and models.
|
|
106
|
+
|
|
107
|
+
```python
|
|
108
|
+
import pandas as pd
|
|
109
|
+
from chatlas import ChatAuto
|
|
110
|
+
|
|
111
|
+
# Default provider (OpenAI) & model
|
|
112
|
+
chat = ChatAuto()
|
|
113
|
+
print(chat.provider.name)
|
|
114
|
+
print(chat.provider.model)
|
|
115
|
+
|
|
116
|
+
# Different provider (Anthropic) & default model
|
|
117
|
+
chat = ChatAuto("anthropic")
|
|
118
|
+
|
|
119
|
+
# List models available through the provider
|
|
120
|
+
models = chat.list_models()
|
|
121
|
+
print(pd.DataFrame(models))
|
|
122
|
+
|
|
123
|
+
# Choose specific provider/model (Claude Sonnet 4)
|
|
124
|
+
chat = ChatAuto("anthropic/claude-sonnet-4-0")
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
The default provider/model can also be controlled through an environment variable:
|
|
128
|
+
|
|
129
|
+
```bash
|
|
130
|
+
export CHATLAS_CHAT_PROVIDER_MODEL="anthropic/claude-sonnet-4-0"
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
from chatlas import ChatAuto
|
|
135
|
+
|
|
136
|
+
chat = ChatAuto()
|
|
137
|
+
print(chat.provider.name) # anthropic
|
|
138
|
+
print(chat.provider.model) # claude-sonnet-4-0
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
For application-specific configurations, consider defining your own environment variables:
|
|
142
|
+
|
|
143
|
+
```bash
|
|
144
|
+
export MYAPP_PROVIDER_MODEL="google/gemini-2.5-flash"
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
And passing them to `ChatAuto()` as an alternative way to configure the provider/model:
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
import os
|
|
151
|
+
from chatlas import ChatAuto
|
|
152
|
+
|
|
153
|
+
chat = ChatAuto(os.getenv("MYAPP_PROVIDER_MODEL"))
|
|
154
|
+
print(chat.provider.name) # google
|
|
155
|
+
print(chat.provider.model) # gemini-2.5-flash
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
Parameters
|
|
159
|
+
----------
|
|
160
|
+
provider_model
|
|
161
|
+
The name of the provider and model to use in the format
|
|
162
|
+
`"{provider}/{model}"`. Providers are strings formatted in kebab-case,
|
|
163
|
+
e.g. to use `ChatBedrockAnthropic` set `provider="bedrock-anthropic"`,
|
|
164
|
+
and models are the provider-specific model names, e.g.
|
|
165
|
+
`"claude-3-7-sonnet-20250219"`. The `/{model}` portion may also be
|
|
166
|
+
omitted, in which case, the default model for that provider will be
|
|
167
|
+
used.
|
|
168
|
+
|
|
169
|
+
If no value is provided, the `CHATLAS_CHAT_PROVIDER_MODEL` environment
|
|
170
|
+
variable will be consulted for a fallback value. If this variable is also
|
|
171
|
+
not set, a default value of `"openai"` is used.
|
|
172
|
+
system_prompt
|
|
173
|
+
A system prompt to set the behavior of the assistant.
|
|
174
|
+
provider
|
|
175
|
+
Deprecated; use `provider_model` instead.
|
|
176
|
+
model
|
|
177
|
+
Deprecated; use `provider_model` instead.
|
|
178
|
+
**kwargs
|
|
179
|
+
Additional keyword arguments to pass to the `Chat` constructor. See the
|
|
180
|
+
documentation for each provider for more details on the available
|
|
181
|
+
options.
|
|
182
|
+
|
|
183
|
+
These arguments can also be provided via the `CHATLAS_CHAT_ARGS`
|
|
184
|
+
environment variable as a JSON string. When any additional arguments are
|
|
185
|
+
provided to `ChatAuto()`, the env var is ignored.
|
|
186
|
+
|
|
187
|
+
Note that `system_prompt` and `turns` can't be set via environment variables.
|
|
188
|
+
They must be provided/set directly to/on `ChatAuto()`.
|
|
189
|
+
|
|
190
|
+
Note
|
|
191
|
+
----
|
|
192
|
+
If you want to work with a specific provider, but don't know what models are
|
|
193
|
+
available (or the exact model name), use
|
|
194
|
+
`ChatAuto('provider_name').list_models()` to list available models. Another
|
|
195
|
+
option is to use the provider more directly (e.g., `ChatAnthropic()`). There,
|
|
196
|
+
the `model` parameter may have type hints for available models.
|
|
197
|
+
|
|
198
|
+
Returns
|
|
199
|
+
-------
|
|
200
|
+
Chat
|
|
201
|
+
A chat instance using the specified provider.
|
|
202
|
+
|
|
203
|
+
Raises
|
|
204
|
+
------
|
|
205
|
+
ValueError
|
|
206
|
+
If no valid provider is specified either through parameters or
|
|
207
|
+
environment variables.
|
|
208
|
+
"""
|
|
209
|
+
if provider is not DEPRECATED:
|
|
210
|
+
warn_deprecated_param("provider")
|
|
211
|
+
|
|
212
|
+
if model is not DEPRECATED:
|
|
213
|
+
if provider is DEPRECATED:
|
|
214
|
+
raise ValueError(
|
|
215
|
+
"The `model` parameter is deprecated and cannot be used without the `provider` parameter. "
|
|
216
|
+
"Use `provider_model` instead."
|
|
217
|
+
)
|
|
218
|
+
warn_deprecated_param("model")
|
|
219
|
+
|
|
220
|
+
if provider_model is None:
|
|
221
|
+
provider_model = os.environ.get("CHATLAS_CHAT_PROVIDER_MODEL")
|
|
222
|
+
|
|
223
|
+
# Backwards compatibility: construct from old env vars as a fallback
|
|
224
|
+
if provider_model is None:
|
|
225
|
+
env_provider = get_legacy_env_var("CHATLAS_CHAT_PROVIDER", provider)
|
|
226
|
+
env_model = get_legacy_env_var("CHATLAS_CHAT_MODEL", model)
|
|
227
|
+
|
|
228
|
+
if env_provider:
|
|
229
|
+
provider_model = env_provider
|
|
230
|
+
if env_model:
|
|
231
|
+
provider_model += f"/{env_model}"
|
|
232
|
+
|
|
233
|
+
# Fall back to OpenAI if nothing is specified
|
|
234
|
+
if provider_model is None:
|
|
235
|
+
provider_model = "openai"
|
|
236
|
+
|
|
237
|
+
if "/" in provider_model:
|
|
238
|
+
the_provider, the_model = provider_model.split("/", 1)
|
|
239
|
+
else:
|
|
240
|
+
the_provider, the_model = provider_model, None
|
|
241
|
+
|
|
242
|
+
if the_provider not in _provider_chat_model_map:
|
|
243
|
+
raise ValueError(
|
|
244
|
+
f"Provider name '{the_provider}' is not a known chatlas provider: "
|
|
245
|
+
f"{', '.join(_provider_chat_model_map.keys())}"
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
# `system_prompt`, `turns` and `model` always come from `ChatAuto()`
|
|
249
|
+
base_args = {
|
|
250
|
+
"system_prompt": system_prompt,
|
|
251
|
+
"turns": None,
|
|
252
|
+
"model": the_model,
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
# Environment kwargs, used only if no kwargs provided
|
|
256
|
+
env_kwargs = {}
|
|
257
|
+
if not kwargs:
|
|
258
|
+
env_kwargs = orjson.loads(os.environ.get("CHATLAS_CHAT_ARGS", "{}"))
|
|
259
|
+
|
|
260
|
+
final_kwargs = {**env_kwargs, **kwargs, **base_args}
|
|
261
|
+
final_kwargs = {k: v for k, v in final_kwargs.items() if v is not None}
|
|
262
|
+
|
|
263
|
+
return _provider_chat_model_map[the_provider](**final_kwargs)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def get_legacy_env_var(
|
|
267
|
+
env_var_name: str,
|
|
268
|
+
default: str | DEPRECATED_TYPE,
|
|
269
|
+
) -> str | None:
|
|
270
|
+
env_value = os.environ.get(env_var_name)
|
|
271
|
+
if env_value:
|
|
272
|
+
warnings.warn(
|
|
273
|
+
f"The '{env_var_name}' environment variable is deprecated. "
|
|
274
|
+
"Use 'CHATLAS_CHAT_PROVIDER_MODEL' instead.",
|
|
275
|
+
DeprecationWarning,
|
|
276
|
+
stacklevel=3,
|
|
277
|
+
)
|
|
278
|
+
return env_value
|
|
279
|
+
elif isinstance(default, DEPRECATED_TYPE):
|
|
280
|
+
return None
|
|
281
|
+
else:
|
|
282
|
+
return default
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def warn_deprecated_param(param_name: str, stacklevel: int = 3) -> None:
|
|
286
|
+
warnings.warn(
|
|
287
|
+
f"The '{param_name}' parameter is deprecated. Use 'provider_model' instead.",
|
|
288
|
+
DeprecationWarning,
|
|
289
|
+
stacklevel=stacklevel,
|
|
290
|
+
)
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Batch chat processing for submitting multiple requests simultaneously.
|
|
3
|
+
|
|
4
|
+
This module provides functionality for submitting multiple chat requests
|
|
5
|
+
in batches to providers that support it (currently OpenAI and Anthropic).
|
|
6
|
+
Batch processing can take up to 24 hours but offers significant cost savings
|
|
7
|
+
(up to 50% less than regular requests).
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import copy
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import TypeVar, Union
|
|
15
|
+
|
|
16
|
+
from pydantic import BaseModel
|
|
17
|
+
|
|
18
|
+
from ._batch_job import BatchJob, ContentT
|
|
19
|
+
from ._chat import Chat
|
|
20
|
+
|
|
21
|
+
ChatT = TypeVar("ChatT", bound=Chat)
|
|
22
|
+
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def batch_chat(
|
|
26
|
+
chat: ChatT,
|
|
27
|
+
prompts: list[ContentT] | list[list[ContentT]],
|
|
28
|
+
path: Union[str, Path],
|
|
29
|
+
wait: bool = True,
|
|
30
|
+
) -> list[ChatT | None]:
|
|
31
|
+
"""
|
|
32
|
+
Submit multiple chat requests in a batch.
|
|
33
|
+
|
|
34
|
+
This function allows you to submit multiple chat requests simultaneously
|
|
35
|
+
using provider batch APIs (currently OpenAI and Anthropic). Batch processing
|
|
36
|
+
can take up to 24 hours but offers significant cost savings.
|
|
37
|
+
|
|
38
|
+
Parameters
|
|
39
|
+
----------
|
|
40
|
+
chat
|
|
41
|
+
Chat instance to use for the batch
|
|
42
|
+
prompts
|
|
43
|
+
List of prompts to process. Each can be a string or list of strings.
|
|
44
|
+
path
|
|
45
|
+
Path to file (with .json extension) to store batch state
|
|
46
|
+
wait
|
|
47
|
+
If True, wait for batch to complete. If False, return None if incomplete.
|
|
48
|
+
|
|
49
|
+
Returns
|
|
50
|
+
-------
|
|
51
|
+
List of Chat objects (one per prompt) if complete, None if wait=False and incomplete.
|
|
52
|
+
Individual Chat objects may be None if their request failed.
|
|
53
|
+
|
|
54
|
+
Example
|
|
55
|
+
-------
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from chatlas import ChatOpenAI
|
|
59
|
+
|
|
60
|
+
chat = ChatOpenAI()
|
|
61
|
+
prompts = [
|
|
62
|
+
"What's the capital of France?",
|
|
63
|
+
"What's the capital of Germany?",
|
|
64
|
+
"What's the capital of Italy?",
|
|
65
|
+
]
|
|
66
|
+
|
|
67
|
+
chats = batch_chat(chat, prompts, "capitals.json")
|
|
68
|
+
for i, result_chat in enumerate(chats):
|
|
69
|
+
if result_chat:
|
|
70
|
+
print(f"Prompt {i + 1}: {result_chat.get_last_turn().text}")
|
|
71
|
+
```
|
|
72
|
+
"""
|
|
73
|
+
job = BatchJob(chat, prompts, path, wait=wait)
|
|
74
|
+
job.step_until_done()
|
|
75
|
+
|
|
76
|
+
chats = []
|
|
77
|
+
assistant_turns = job.result_turns()
|
|
78
|
+
for user, assistant in zip(job.user_turns, assistant_turns):
|
|
79
|
+
if assistant is not None:
|
|
80
|
+
new_chat = copy.deepcopy(chat)
|
|
81
|
+
new_chat.add_turn(user)
|
|
82
|
+
new_chat.add_turn(assistant)
|
|
83
|
+
chats.append(new_chat)
|
|
84
|
+
else:
|
|
85
|
+
chats.append(None)
|
|
86
|
+
|
|
87
|
+
return chats
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def batch_chat_text(
|
|
91
|
+
chat: Chat,
|
|
92
|
+
prompts: list[ContentT] | list[list[ContentT]],
|
|
93
|
+
path: Union[str, Path],
|
|
94
|
+
wait: bool = True,
|
|
95
|
+
) -> list[str | None]:
|
|
96
|
+
"""
|
|
97
|
+
Submit multiple chat requests in a batch and return text responses.
|
|
98
|
+
|
|
99
|
+
This is a convenience function that returns just the text of the responses
|
|
100
|
+
rather than full Chat objects.
|
|
101
|
+
|
|
102
|
+
Parameters
|
|
103
|
+
----------
|
|
104
|
+
chat
|
|
105
|
+
Chat instance to use for the batch
|
|
106
|
+
prompts
|
|
107
|
+
List of prompts to process
|
|
108
|
+
path
|
|
109
|
+
Path to file (with .json extension) to store batch state
|
|
110
|
+
wait
|
|
111
|
+
If True, wait for batch to complete
|
|
112
|
+
|
|
113
|
+
Return
|
|
114
|
+
------
|
|
115
|
+
List of text responses (or None for failed requests)
|
|
116
|
+
"""
|
|
117
|
+
chats = batch_chat(chat, prompts, path, wait=wait)
|
|
118
|
+
|
|
119
|
+
texts = []
|
|
120
|
+
for x in chats:
|
|
121
|
+
if x is None:
|
|
122
|
+
texts.append(None)
|
|
123
|
+
continue
|
|
124
|
+
last_turn = x.get_last_turn()
|
|
125
|
+
if last_turn is None:
|
|
126
|
+
texts.append(None)
|
|
127
|
+
continue
|
|
128
|
+
texts.append(last_turn.text)
|
|
129
|
+
|
|
130
|
+
return texts
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def batch_chat_structured(
|
|
134
|
+
chat: Chat,
|
|
135
|
+
prompts: list[ContentT] | list[list[ContentT]],
|
|
136
|
+
path: Union[str, Path],
|
|
137
|
+
data_model: type[BaseModelT],
|
|
138
|
+
wait: bool = True,
|
|
139
|
+
) -> list[BaseModelT | None]:
|
|
140
|
+
"""
|
|
141
|
+
Submit multiple structured data requests in a batch.
|
|
142
|
+
|
|
143
|
+
Parameters
|
|
144
|
+
----------
|
|
145
|
+
chat
|
|
146
|
+
Chat instance to use for the batch
|
|
147
|
+
prompts
|
|
148
|
+
List of prompts to process
|
|
149
|
+
path
|
|
150
|
+
Path to file (with .json extension) to store batch state
|
|
151
|
+
data_model
|
|
152
|
+
Pydantic model class for structured responses
|
|
153
|
+
wait
|
|
154
|
+
If True, wait for batch to complete
|
|
155
|
+
|
|
156
|
+
Return
|
|
157
|
+
------
|
|
158
|
+
List of structured data objects (or None for failed requests)
|
|
159
|
+
"""
|
|
160
|
+
job = BatchJob(chat, prompts, path, data_model=data_model, wait=wait)
|
|
161
|
+
result = job.step_until_done()
|
|
162
|
+
|
|
163
|
+
if result is None:
|
|
164
|
+
return []
|
|
165
|
+
|
|
166
|
+
res: list[BaseModelT | None] = []
|
|
167
|
+
assistant_turns = job.result_turns()
|
|
168
|
+
for turn in assistant_turns:
|
|
169
|
+
if turn is None:
|
|
170
|
+
res.append(None)
|
|
171
|
+
else:
|
|
172
|
+
json = chat._extract_turn_json(turn)
|
|
173
|
+
model = data_model.model_validate(json)
|
|
174
|
+
res.append(model)
|
|
175
|
+
|
|
176
|
+
return res
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def batch_chat_completed(
|
|
180
|
+
chat: Chat,
|
|
181
|
+
prompts: list[ContentT] | list[list[ContentT]],
|
|
182
|
+
path: Union[str, Path],
|
|
183
|
+
) -> bool:
|
|
184
|
+
"""
|
|
185
|
+
Check if a batch job is completed without waiting.
|
|
186
|
+
|
|
187
|
+
Parameters
|
|
188
|
+
----------
|
|
189
|
+
chat
|
|
190
|
+
Chat instance used for the batch
|
|
191
|
+
prompts
|
|
192
|
+
List of prompts used for the batch
|
|
193
|
+
path
|
|
194
|
+
Path to batch state file
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
True if batch is complete, False otherwise
|
|
199
|
+
"""
|
|
200
|
+
job = BatchJob(chat, prompts, path, wait=False)
|
|
201
|
+
stage = job.stage
|
|
202
|
+
|
|
203
|
+
if stage == "submitting":
|
|
204
|
+
return False
|
|
205
|
+
elif stage == "waiting":
|
|
206
|
+
status = job._poll()
|
|
207
|
+
return not status.working
|
|
208
|
+
elif stage == "retrieving" or stage == "done":
|
|
209
|
+
return True
|
|
210
|
+
else:
|
|
211
|
+
raise ValueError(f"Unknown batch stage: {stage}")
|