chatlas 0.9.2__tar.gz → 0.10.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- {chatlas-0.9.2 → chatlas-0.10.0}/CHANGELOG.md +20 -0
- chatlas-0.10.0/CLAUDE.md +167 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/PKG-INFO +3 -3
- {chatlas-0.9.2 → chatlas-0.10.0}/README.md +2 -2
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/__init__.py +12 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_chat.py +2 -2
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_anthropic.py +2 -3
- chatlas-0.10.0/chatlas/_provider_cloudflare.py +165 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_databricks.py +11 -0
- chatlas-0.10.0/chatlas/_provider_deepseek.py +171 -0
- chatlas-0.10.0/chatlas/_provider_huggingface.py +155 -0
- chatlas-0.10.0/chatlas/_provider_mistral.py +181 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_openai.py +9 -5
- chatlas-0.10.0/chatlas/_provider_openrouter.py +149 -0
- chatlas-0.10.0/chatlas/_provider_portkey.py +123 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_tokens.py +5 -5
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_typing_extensions.py +3 -3
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_version.py +16 -3
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/anthropic/_client.py +1 -1
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/anthropic/_client_bedrock.py +1 -1
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/openai/_client.py +1 -1
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/openai/_client_azure.py +1 -1
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/openai/_submit.py +3 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/_quarto.yml +8 -1
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/debug.qmd +3 -3
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/models.qmd +22 -12
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/monitor.qmd +3 -5
- {chatlas-0.9.2 → chatlas-0.10.0}/scripts/_generate_google_types.py +2 -1
- {chatlas-0.9.2 → chatlas-0.10.0}/scripts/_generate_openai_types.py +3 -3
- {chatlas-0.9.2 → chatlas-0.10.0}/scripts/_utils.py +19 -1
- chatlas-0.10.0/tests/test_provider_bedrock.py +73 -0
- chatlas-0.10.0/tests/test_provider_cloudflare.py +79 -0
- chatlas-0.10.0/tests/test_provider_deepseek.py +62 -0
- chatlas-0.10.0/tests/test_provider_huggingface.py +103 -0
- chatlas-0.10.0/tests/test_provider_mistral.py +73 -0
- chatlas-0.10.0/tests/test_provider_openrouter.py +68 -0
- chatlas-0.10.0/tests/test_provider_portkey.py +91 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_tokens.py +32 -2
- chatlas-0.9.2/tests/test_provider_bedrock.py +0 -72
- {chatlas-0.9.2 → chatlas-0.10.0}/.github/workflows/check-update-types.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.github/workflows/docs-publish.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.github/workflows/release.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.github/workflows/test.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.github/workflows/update-pricing.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.gitignore +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.vscode/extensions.json +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/.vscode/settings.json +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/LICENSE +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/Makefile +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_auto.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_callbacks.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_content.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_content_image.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_content_pdf.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_display.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_interpolate.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_live_render.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_logging.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_mcp_manager.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_merge.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_github.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_google.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_groq.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_ollama.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_perplexity.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_provider_snowflake.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_tokens_old.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_tools.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_turn.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/_utils.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/data/prices.json +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/py.typed +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/__init__.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/anthropic/__init__.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/anthropic/_submit.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/google/__init__.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/google/_client.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/google/_submit.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/chatlas/types/openai/__init__.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/.gitignore +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/_extensions/machow/interlinks/.gitignore +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/_extensions/machow/interlinks/_extension.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/_extensions/machow/interlinks/interlinks.lua +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/_sidebar.yml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/congressional-assets.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/async.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/chat.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/chatbots.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/parameters.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/stream.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/structured-data.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/system-prompt.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/get-started/tools.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chat-app.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chat-console.mp4 +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chat-console.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chat-notebook.mp4 +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chat-parameters.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chatbot-gradio.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chatbot-shiny.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chatbot-streamlit.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chatbot-textual.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/chatlas-hello.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/client-parameters.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/congressional-assets.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/hello-chat-console.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/model-parameters.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/model-type-hints.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/posit-logo.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/shiny-mcp-run-python.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/shiny-tool-call-display.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/shiny-tool-call-map.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/tool-calling-right.svg +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/images/tool-calling-wrong.svg +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/index.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/logos/hero/hero-old.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/logos/hero/hero.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/logos/hex/logo.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/logos/small/logo.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/misc/RAG.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/misc/examples.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/misc/mcp-tools.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/misc/vocabulary.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/Chat.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatAnthropic.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatAuto.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatAzureOpenAI.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatBedrockAnthropic.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatDatabricks.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatGithub.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatGoogle.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatGroq.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatOllama.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatOpenAI.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatPerplexity.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatSnowflake.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ChatVertex.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/Provider.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/Tool.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/ToolRejectError.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/Turn.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/content_image_file.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/content_image_plot.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/content_image_url.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/content_pdf_file.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/content_pdf_url.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/image_file.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/image_plot.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/image_url.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/index.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/interpolate.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/interpolate_file.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/token_usage.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ChatResponse.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ChatResponseAsync.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.Content.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentImage.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentImageInline.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentImageRemote.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentJson.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentText.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentToolRequest.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ContentToolResult.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.ImageContentTypes.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.MISSING.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.MISSING_TYPE.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.SubmitInputArgsT.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/reference/types.TokenUsage.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/structured-data/article-summary.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/structured-data/classification.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/structured-data/entity-recognition.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/structured-data/multi-modal.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/structured-data/sentiment-analysis.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/styles.scss +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/tool-calling/approval.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/tool-calling/displays.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/tool-calling/how-it-works.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/docs/why-chatlas.qmd +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/pyproject.toml +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/pytest.ini +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/scripts/_generate_anthropic_types.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/scripts/main.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/__init__.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/__snapshots__/test_chat.ambr +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/apples.pdf +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/conftest.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/images/dice.png +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/mcp_servers/http_add.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/mcp_servers/http_current_date.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/mcp_servers/stdio_current_date.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/mcp_servers/stdio_subtract_multiply.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_auto.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_callbacks.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_chat.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_content.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_content_html.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_content_image.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_content_pdf.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_content_tools.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_interpolate.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_mcp_client.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_provider_anthropic.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_provider_azure.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_provider_databricks.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_provider_google.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_provider_openai.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_provider_snowflake.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_set_model_params.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_tool_from_mcp.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_tools_enhanced.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_turns.py +0 -0
- {chatlas-0.9.2 → chatlas-0.10.0}/tests/test_utils_merge.py +0 -0
|
@@ -7,6 +7,26 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
|
|
7
7
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
8
8
|
-->
|
|
9
9
|
|
|
10
|
+
## [0.10.0] - 2025-08-19
|
|
11
|
+
|
|
12
|
+
### New features
|
|
13
|
+
|
|
14
|
+
* Added `ChatCloudflare()` for chatting via [Cloudflare AI](https://developers.cloudflare.com/workers-ai/get-started/rest-api/). (#150)
|
|
15
|
+
* Added `ChatDeepSeek()` for chatting via [DeepSeek](https://www.deepseek.com/). (#147)
|
|
16
|
+
* Added `ChatOpenRouter()` for chatting via [Open Router](https://openrouter.ai/). (#148)
|
|
17
|
+
* Added `ChatHuggingFace()` for chatting via [Hugging Face](https://huggingface.co/). (#144)
|
|
18
|
+
* Added `ChatMistral()` for chatting via [Mistral AI](https://mistral.ai/). (#145)
|
|
19
|
+
* Added `ChatPortkey()` for chatting via [Portkey AI](https://portkey.ai/). (#143)
|
|
20
|
+
|
|
21
|
+
### Changes
|
|
22
|
+
|
|
23
|
+
* `ChatAnthropic()` and `ChatBedrockAnthropic()` now default to Claude Sonnet 4.0.
|
|
24
|
+
|
|
25
|
+
### Bug fixes
|
|
26
|
+
|
|
27
|
+
* Fixed an issue where chatting with some models was leading to `KeyError: 'cached_input'`. (#149)
|
|
28
|
+
|
|
29
|
+
|
|
10
30
|
## [0.9.2] - 2025-08-08
|
|
11
31
|
|
|
12
32
|
### Improvements
|
chatlas-0.10.0/CLAUDE.md
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
# CLAUDE.md
|
|
2
|
+
|
|
3
|
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
4
|
+
|
|
5
|
+
## Development Commands
|
|
6
|
+
|
|
7
|
+
The project uses `uv` for package management and Make for common tasks:
|
|
8
|
+
|
|
9
|
+
- **Setup environment**: `make setup` (installs all dependencies with `uv sync --all-extras`)
|
|
10
|
+
- **Run tests**: `make check-tests` or `uv run pytest`
|
|
11
|
+
- **Type checking**: `make check-types` or `uv run pyright`
|
|
12
|
+
- **Linting/formatting**: `make check-format` (check) or `make format` (fix)
|
|
13
|
+
- **Full checks**: `make check` (runs format, type, and test checks)
|
|
14
|
+
- **Build package**: `make build` (creates dist/ with built package)
|
|
15
|
+
- **Run single test**: `uv run pytest tests/test_specific_file.py::TestClass::test_method -v`
|
|
16
|
+
- **Update snapshots**: `make update-snaps` (for syrupy snapshot tests)
|
|
17
|
+
- **Documentation**: `make docs` (build) or `make docs-preview` (serve locally)
|
|
18
|
+
|
|
19
|
+
## Project Architecture
|
|
20
|
+
|
|
21
|
+
### Core Components
|
|
22
|
+
|
|
23
|
+
**Chat System**: The main `Chat` class in `_chat.py` manages conversation state and provider interactions. It's a generic class that works with different providers through the `Provider` abstract base class.
|
|
24
|
+
|
|
25
|
+
**Provider Pattern**: All LLM providers (OpenAI, Anthropic, Google, etc.) inherit from `Provider` in `_provider.py`. Each provider (e.g., `_provider_openai.py`) implements:
|
|
26
|
+
- Model-specific parameter handling
|
|
27
|
+
- API client configuration
|
|
28
|
+
- Request/response transformation
|
|
29
|
+
- Tool calling integration
|
|
30
|
+
|
|
31
|
+
**Content System**: The `_content.py` module defines structured content types:
|
|
32
|
+
- `ContentText`: Plain text messages
|
|
33
|
+
- `ContentImage`: Image content (inline, remote, or file-based)
|
|
34
|
+
- `ContentToolRequest`/`ContentToolResult`: Tool interaction messages
|
|
35
|
+
- `ContentJson`: Structured data responses
|
|
36
|
+
|
|
37
|
+
**Tool System**: Tools are defined in `_tools.py` and allow LLMs to call Python functions. The system supports:
|
|
38
|
+
- Function registration with automatic schema generation
|
|
39
|
+
- Tool approval workflows
|
|
40
|
+
- MCP (Model Context Protocol) server integration via `_mcp_manager.py`
|
|
41
|
+
|
|
42
|
+
**Turn Management**: `Turn` objects in `_turn.py` represent individual conversation exchanges, containing sequences of `Content` objects.
|
|
43
|
+
|
|
44
|
+
### Key Patterns
|
|
45
|
+
|
|
46
|
+
1. **Provider Abstraction**: All providers implement the same interface but handle model-specific details internally
|
|
47
|
+
2. **Generic Typing**: Heavy use of TypeVars and generics for type safety across providers
|
|
48
|
+
3. **Streaming Support**: Both sync and async streaming responses via `ChatResponse`/`ChatResponseAsync`
|
|
49
|
+
4. **Content-Based Messaging**: All communication uses structured `Content` objects rather than raw strings
|
|
50
|
+
5. **Tool Integration**: Seamless function calling with automatic JSON schema generation from Python type hints
|
|
51
|
+
|
|
52
|
+
### Testing Structure
|
|
53
|
+
|
|
54
|
+
- Tests are organized by component (e.g., `test_provider_openai.py`, `test_tools.py`)
|
|
55
|
+
- Snapshot testing with `syrupy` for response validation
|
|
56
|
+
- MCP server tests use local test servers in `tests/mcp_servers/`
|
|
57
|
+
- Async tests configured via `pytest.ini` with `asyncio_mode=strict`
|
|
58
|
+
|
|
59
|
+
### Documentation
|
|
60
|
+
|
|
61
|
+
Documentation is built with Quarto and quartodoc:
|
|
62
|
+
- API reference generated from docstrings in `chatlas/` modules
|
|
63
|
+
- Guides and examples in `docs/` as `.qmd` files
|
|
64
|
+
- Type definitions in `chatlas/types/` provide provider-specific parameter types
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
## Adding New Providers
|
|
68
|
+
|
|
69
|
+
When implementing a new LLM provider, follow this systematic approach:
|
|
70
|
+
|
|
71
|
+
### 1. Research Phase
|
|
72
|
+
- **Check ellmer first**: Look in `../ellmer/R/provider-*.R` for existing implementations
|
|
73
|
+
- **Identify base provider**: Most providers inherit from either `OpenAIProvider` (for OpenAI-compatible APIs) or implement `Provider` directly
|
|
74
|
+
- **Check existing patterns**: Review similar providers in `chatlas/_provider_*.py`
|
|
75
|
+
|
|
76
|
+
### 2. Implementation Steps
|
|
77
|
+
1. **Create provider file**: `chatlas/_provider_[name].py`
|
|
78
|
+
- Use PascalCase for class names (e.g., `MistralProvider`)
|
|
79
|
+
- Use snake_case for function names (e.g., `ChatMistral`)
|
|
80
|
+
- Follow existing docstring patterns with Prerequisites, Examples, Parameters, Returns sections
|
|
81
|
+
|
|
82
|
+
2. **Provider class structure**:
|
|
83
|
+
```python
|
|
84
|
+
class [Name]Provider(OpenAIProvider): # or Provider if custom
|
|
85
|
+
def __init__(self, ...):
|
|
86
|
+
super().__init__(...)
|
|
87
|
+
# Provider-specific initialization
|
|
88
|
+
|
|
89
|
+
def _chat_perform_args(self, ...):
|
|
90
|
+
# Customize request parameters if needed
|
|
91
|
+
kwargs = super()._chat_perform_args(...)
|
|
92
|
+
# Apply provider-specific modifications
|
|
93
|
+
return kwargs
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
3. **Chat function signature**:
|
|
97
|
+
```python
|
|
98
|
+
def Chat[Name](
|
|
99
|
+
*,
|
|
100
|
+
system_prompt: Optional[str] = None,
|
|
101
|
+
model: Optional[str] = None,
|
|
102
|
+
api_key: Optional[str] = None,
|
|
103
|
+
base_url: str = "https://...",
|
|
104
|
+
seed: int | None | MISSING_TYPE = MISSING,
|
|
105
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
106
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### 3. Testing Setup
|
|
110
|
+
1. **Create test file**: `tests/test_provider_[name].py`
|
|
111
|
+
2. **Add environment variable skip pattern**:
|
|
112
|
+
```python
|
|
113
|
+
import os
|
|
114
|
+
import pytest
|
|
115
|
+
|
|
116
|
+
do_test = os.getenv("TEST_[NAME]", "true")
|
|
117
|
+
if do_test.lower() == "false":
|
|
118
|
+
pytest.skip("Skipping [Name] tests", allow_module_level=True)
|
|
119
|
+
```
|
|
120
|
+
3. **Use standard test patterns**:
|
|
121
|
+
- `test_[name]_simple_request()`
|
|
122
|
+
- `test_[name]_simple_streaming_request()`
|
|
123
|
+
- `test_[name]_respects_turns_interface()`
|
|
124
|
+
- `test_[name]_tool_variations()` (if supported)
|
|
125
|
+
- `test_data_extraction()`
|
|
126
|
+
- `test_[name]_images()` (if vision supported)
|
|
127
|
+
|
|
128
|
+
### 4. Package Integration
|
|
129
|
+
1. **Update `chatlas/__init__.py`**:
|
|
130
|
+
- Add import: `from ._provider_[name] import Chat[Name]`
|
|
131
|
+
- Add to `__all__` tuple: `"Chat[Name]"`
|
|
132
|
+
|
|
133
|
+
2. **Run validation**:
|
|
134
|
+
```bash
|
|
135
|
+
uv run pyright chatlas/_provider_[name].py
|
|
136
|
+
TEST_[NAME]=false uv run pytest tests/test_provider_[name].py -v
|
|
137
|
+
uv run python -c "from chatlas import Chat[Name]; print('Import successful')"
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### 5. Provider-Specific Customizations
|
|
141
|
+
|
|
142
|
+
**OpenAI-Compatible Providers**:
|
|
143
|
+
- Inherit from `OpenAIProvider`
|
|
144
|
+
- Override `_chat_perform_args()` for API differences
|
|
145
|
+
- Common customizations: remove `stream_options`, adjust parameter names, modify headers
|
|
146
|
+
|
|
147
|
+
**Custom API Providers**:
|
|
148
|
+
- Inherit from `Provider` directly
|
|
149
|
+
- Implement all abstract methods: `chat_perform()`, `chat_perform_async()`, `stream_text()`, etc.
|
|
150
|
+
- Handle model-specific response formats
|
|
151
|
+
|
|
152
|
+
### 6. Common Patterns
|
|
153
|
+
- **Environment variables**: Use `[PROVIDER]_API_KEY` format
|
|
154
|
+
- **Default models**: Use provider's recommended general-purpose model
|
|
155
|
+
- **Seed handling**: `seed = 1014 if is_testing() else None` when MISSING
|
|
156
|
+
- **Error handling**: Provider APIs often return different error formats
|
|
157
|
+
- **Rate limiting**: Consider implementing client-side throttling for providers that need it
|
|
158
|
+
|
|
159
|
+
### 7. Documentation Requirements
|
|
160
|
+
- Include provider description and prerequisites
|
|
161
|
+
- Document known limitations (tool calling, vision support, etc.)
|
|
162
|
+
- Provide working examples with environment variable usage
|
|
163
|
+
- Note any special model requirements (e.g., vision models for images)
|
|
164
|
+
|
|
165
|
+
## Connections to ellmer
|
|
166
|
+
|
|
167
|
+
This project is the Python equivalent of the R package ellmer. The source code for ellmer is available in a sibling directory to this project. Before implementing new features or bug fixes in chatlas, it may be useful to consult the ellmer codebase to: (1) check whether the feature/fix already exists on the R side and (2) make sure the projects are aligned in terms of stylistic approaches. Note also that ellmer itself has a CLAUDE.md file which has a useful overview of the project.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: chatlas
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.10.0
|
|
4
4
|
Summary: A simple and consistent interface for chatting with LLMs
|
|
5
5
|
Project-URL: Homepage, https://posit-dev.github.io/chatlas
|
|
6
6
|
Project-URL: Documentation, https://posit-dev.github.io/chatlas
|
|
@@ -79,7 +79,7 @@ Provides-Extra: vertex
|
|
|
79
79
|
Requires-Dist: google-genai>=1.14.0; extra == 'vertex'
|
|
80
80
|
Description-Content-Type: text/markdown
|
|
81
81
|
|
|
82
|
-
# chatlas <a href="https://posit-dev.github.io/chatlas"><img src="
|
|
82
|
+
# chatlas <a href="https://posit-dev.github.io/chatlas"><img src="https://posit-dev.github.io/chatlas/logos/hex/logo.png" align="right" height="138" alt="chatlas website" /></a>
|
|
83
83
|
|
|
84
84
|
<p>
|
|
85
85
|
<!-- badges start -->
|
|
@@ -135,7 +135,7 @@ chat.chat("How's the weather in San Francisco?")
|
|
|
135
135
|
```
|
|
136
136
|
|
|
137
137
|
|
|
138
|
-
<img src="
|
|
138
|
+
<img src="https://posit-dev.github.io/chatlas/images/chatlas-hello.png" alt="Model response output to the user query: 'How's the weather in San Francisco?'" width="67%" style="display: block; margin-left: auto; margin-right: auto">
|
|
139
139
|
|
|
140
140
|
|
|
141
141
|
Learn more at <https://posit-dev.github.io/chatlas>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# chatlas <a href="https://posit-dev.github.io/chatlas"><img src="
|
|
1
|
+
# chatlas <a href="https://posit-dev.github.io/chatlas"><img src="https://posit-dev.github.io/chatlas/logos/hex/logo.png" align="right" height="138" alt="chatlas website" /></a>
|
|
2
2
|
|
|
3
3
|
<p>
|
|
4
4
|
<!-- badges start -->
|
|
@@ -54,7 +54,7 @@ chat.chat("How's the weather in San Francisco?")
|
|
|
54
54
|
```
|
|
55
55
|
|
|
56
56
|
|
|
57
|
-
<img src="
|
|
57
|
+
<img src="https://posit-dev.github.io/chatlas/images/chatlas-hello.png" alt="Model response output to the user query: 'How's the weather in San Francisco?'" width="67%" style="display: block; margin-left: auto; margin-right: auto">
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
Learn more at <https://posit-dev.github.io/chatlas>
|
|
@@ -7,13 +7,19 @@ from ._content_pdf import content_pdf_file, content_pdf_url
|
|
|
7
7
|
from ._interpolate import interpolate, interpolate_file
|
|
8
8
|
from ._provider import Provider
|
|
9
9
|
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
10
|
+
from ._provider_cloudflare import ChatCloudflare
|
|
10
11
|
from ._provider_databricks import ChatDatabricks
|
|
12
|
+
from ._provider_deepseek import ChatDeepSeek
|
|
11
13
|
from ._provider_github import ChatGithub
|
|
12
14
|
from ._provider_google import ChatGoogle, ChatVertex
|
|
13
15
|
from ._provider_groq import ChatGroq
|
|
16
|
+
from ._provider_huggingface import ChatHuggingFace
|
|
17
|
+
from ._provider_mistral import ChatMistral
|
|
14
18
|
from ._provider_ollama import ChatOllama
|
|
15
19
|
from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
|
|
20
|
+
from ._provider_openrouter import ChatOpenRouter
|
|
16
21
|
from ._provider_perplexity import ChatPerplexity
|
|
22
|
+
from ._provider_portkey import ChatPortkey
|
|
17
23
|
from ._provider_snowflake import ChatSnowflake
|
|
18
24
|
from ._tokens import token_usage
|
|
19
25
|
from ._tools import Tool, ToolRejectError
|
|
@@ -28,14 +34,20 @@ __all__ = (
|
|
|
28
34
|
"ChatAnthropic",
|
|
29
35
|
"ChatAuto",
|
|
30
36
|
"ChatBedrockAnthropic",
|
|
37
|
+
"ChatCloudflare",
|
|
31
38
|
"ChatDatabricks",
|
|
39
|
+
"ChatDeepSeek",
|
|
32
40
|
"ChatGithub",
|
|
33
41
|
"ChatGoogle",
|
|
34
42
|
"ChatGroq",
|
|
43
|
+
"ChatHuggingFace",
|
|
44
|
+
"ChatMistral",
|
|
35
45
|
"ChatOllama",
|
|
36
46
|
"ChatOpenAI",
|
|
47
|
+
"ChatOpenRouter",
|
|
37
48
|
"ChatAzureOpenAI",
|
|
38
49
|
"ChatPerplexity",
|
|
50
|
+
"ChatPortkey",
|
|
39
51
|
"ChatSnowflake",
|
|
40
52
|
"ChatVertex",
|
|
41
53
|
"Chat",
|
|
@@ -395,8 +395,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
395
395
|
)
|
|
396
396
|
|
|
397
397
|
input_token_price = price_token["input"] / 1e6
|
|
398
|
-
output_token_price = price_token
|
|
399
|
-
cached_token_price = price_token
|
|
398
|
+
output_token_price = price_token.get("output", 0) / 1e6
|
|
399
|
+
cached_token_price = price_token.get("cached_input", 0) / 1e6
|
|
400
400
|
|
|
401
401
|
if len(turns_tokens) == 0:
|
|
402
402
|
return 0.0
|
|
@@ -163,7 +163,7 @@ def ChatAnthropic(
|
|
|
163
163
|
"""
|
|
164
164
|
|
|
165
165
|
if model is None:
|
|
166
|
-
model = log_model_default("claude-
|
|
166
|
+
model = log_model_default("claude-sonnet-4-0")
|
|
167
167
|
|
|
168
168
|
return Chat(
|
|
169
169
|
provider=AnthropicProvider(
|
|
@@ -742,8 +742,7 @@ def ChatBedrockAnthropic(
|
|
|
742
742
|
"""
|
|
743
743
|
|
|
744
744
|
if model is None:
|
|
745
|
-
|
|
746
|
-
model = log_model_default("anthropic.claude-3-5-sonnet-20241022-v2:0")
|
|
745
|
+
model = log_model_default("us.anthropic.claude-sonnet-4-20250514-v1:0")
|
|
747
746
|
|
|
748
747
|
return Chat(
|
|
749
748
|
provider=AnthropicBedrockProvider(
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def ChatCloudflare(
|
|
17
|
+
*,
|
|
18
|
+
account: Optional[str] = None,
|
|
19
|
+
system_prompt: Optional[str] = None,
|
|
20
|
+
model: Optional[str] = None,
|
|
21
|
+
api_key: Optional[str] = None,
|
|
22
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
23
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
24
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
25
|
+
"""
|
|
26
|
+
Chat with a model hosted on Cloudflare Workers AI.
|
|
27
|
+
|
|
28
|
+
Cloudflare Workers AI hosts a variety of open-source AI models.
|
|
29
|
+
|
|
30
|
+
Prerequisites
|
|
31
|
+
-------------
|
|
32
|
+
|
|
33
|
+
::: {.callout-note}
|
|
34
|
+
## API credentials
|
|
35
|
+
|
|
36
|
+
To use the Cloudflare API, you must have an Account ID and an Access Token,
|
|
37
|
+
which you can obtain by following the instructions at
|
|
38
|
+
<https://developers.cloudflare.com/workers-ai/get-started/rest-api/>.
|
|
39
|
+
:::
|
|
40
|
+
|
|
41
|
+
Examples
|
|
42
|
+
--------
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
import os
|
|
46
|
+
from chatlas import ChatCloudflare
|
|
47
|
+
|
|
48
|
+
chat = ChatCloudflare(
|
|
49
|
+
api_key=os.getenv("CLOUDFLARE_API_KEY"),
|
|
50
|
+
account=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
|
|
51
|
+
)
|
|
52
|
+
chat.chat("What is the capital of France?")
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Known limitations
|
|
56
|
+
-----------------
|
|
57
|
+
|
|
58
|
+
- Tool calling does not appear to work.
|
|
59
|
+
- Images don't appear to work.
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
account
|
|
64
|
+
The Cloudflare account ID. You generally should not supply this directly,
|
|
65
|
+
but instead set the `CLOUDFLARE_ACCOUNT_ID` environment variable.
|
|
66
|
+
system_prompt
|
|
67
|
+
A system prompt to set the behavior of the assistant.
|
|
68
|
+
model
|
|
69
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
70
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
71
|
+
a model for all but the most casual use.
|
|
72
|
+
api_key
|
|
73
|
+
The API key to use for authentication. You generally should not supply
|
|
74
|
+
this directly, but instead set the `CLOUDFLARE_API_KEY` environment
|
|
75
|
+
variable.
|
|
76
|
+
seed
|
|
77
|
+
Optional integer seed that ChatGPT uses to try and make output more
|
|
78
|
+
reproducible.
|
|
79
|
+
kwargs
|
|
80
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
81
|
+
|
|
82
|
+
Returns
|
|
83
|
+
-------
|
|
84
|
+
Chat
|
|
85
|
+
A chat object that retains the state of the conversation.
|
|
86
|
+
|
|
87
|
+
Note
|
|
88
|
+
----
|
|
89
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
90
|
+
the defaults tweaked for Cloudflare.
|
|
91
|
+
|
|
92
|
+
Note
|
|
93
|
+
----
|
|
94
|
+
Pasting credentials into a chat constructor (e.g.,
|
|
95
|
+
`ChatCloudflare(api_key="...", account="...")`) is the simplest way to get
|
|
96
|
+
started, and is fine for interactive use, but is problematic for code that
|
|
97
|
+
may be shared with others.
|
|
98
|
+
|
|
99
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
100
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
101
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
102
|
+
into your environment.
|
|
103
|
+
|
|
104
|
+
```shell
|
|
105
|
+
pip install python-dotenv
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```shell
|
|
109
|
+
# .env
|
|
110
|
+
CLOUDFLARE_API_KEY=...
|
|
111
|
+
CLOUDFLARE_ACCOUNT_ID=...
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from chatlas import ChatCloudflare
|
|
116
|
+
from dotenv import load_dotenv
|
|
117
|
+
|
|
118
|
+
load_dotenv()
|
|
119
|
+
chat = ChatCloudflare()
|
|
120
|
+
chat.console()
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
124
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
125
|
+
|
|
126
|
+
```shell
|
|
127
|
+
export CLOUDFLARE_API_KEY=...
|
|
128
|
+
export CLOUDFLARE_ACCOUNT_ID=...
|
|
129
|
+
```
|
|
130
|
+
"""
|
|
131
|
+
# List at https://developers.cloudflare.com/workers-ai/models/
|
|
132
|
+
# `@cf` appears to be part of the model name
|
|
133
|
+
if model is None:
|
|
134
|
+
model = log_model_default("@cf/meta/llama-3.3-70b-instruct-fp8-fast")
|
|
135
|
+
|
|
136
|
+
if api_key is None:
|
|
137
|
+
api_key = os.getenv("CLOUDFLARE_API_KEY")
|
|
138
|
+
|
|
139
|
+
if account is None:
|
|
140
|
+
account = os.getenv("CLOUDFLARE_ACCOUNT_ID")
|
|
141
|
+
|
|
142
|
+
if account is None:
|
|
143
|
+
raise ValueError(
|
|
144
|
+
"Cloudflare account ID is required. Set the CLOUDFLARE_ACCOUNT_ID "
|
|
145
|
+
"environment variable or pass the `account` parameter."
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if isinstance(seed, MISSING_TYPE):
|
|
149
|
+
seed = 1014 if is_testing() else None
|
|
150
|
+
|
|
151
|
+
# https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/
|
|
152
|
+
cloudflare_api = "https://api.cloudflare.com/client/v4/accounts"
|
|
153
|
+
base_url = f"{cloudflare_api}/{account}/ai/v1/"
|
|
154
|
+
|
|
155
|
+
return Chat(
|
|
156
|
+
provider=OpenAIProvider(
|
|
157
|
+
api_key=api_key,
|
|
158
|
+
model=model,
|
|
159
|
+
base_url=base_url,
|
|
160
|
+
seed=seed,
|
|
161
|
+
name="Cloudflare",
|
|
162
|
+
kwargs=kwargs,
|
|
163
|
+
),
|
|
164
|
+
system_prompt=system_prompt,
|
|
165
|
+
)
|
|
@@ -127,3 +127,14 @@ class DatabricksProvider(OpenAIProvider):
|
|
|
127
127
|
api_key="no-token", # A placeholder to pass validations, this will not be used
|
|
128
128
|
http_client=httpx.AsyncClient(auth=client._client.auth),
|
|
129
129
|
)
|
|
130
|
+
|
|
131
|
+
# Databricks doesn't support stream_options
|
|
132
|
+
def _chat_perform_args(
|
|
133
|
+
self, stream, turns, tools, data_model=None, kwargs=None
|
|
134
|
+
) -> "SubmitInputArgs":
|
|
135
|
+
kwargs2 = super()._chat_perform_args(stream, turns, tools, data_model, kwargs)
|
|
136
|
+
|
|
137
|
+
if "stream_options" in kwargs2:
|
|
138
|
+
del kwargs2["stream_options"]
|
|
139
|
+
|
|
140
|
+
return kwargs2
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional, cast
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._turn import Turn
|
|
10
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from openai.types.chat import ChatCompletion, ChatCompletionMessageParam
|
|
14
|
+
|
|
15
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def ChatDeepSeek(
|
|
19
|
+
*,
|
|
20
|
+
system_prompt: Optional[str] = None,
|
|
21
|
+
model: Optional[str] = None,
|
|
22
|
+
api_key: Optional[str] = None,
|
|
23
|
+
base_url: str = "https://api.deepseek.com",
|
|
24
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
25
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
26
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
27
|
+
"""
|
|
28
|
+
Chat with a model hosted on DeepSeek.
|
|
29
|
+
|
|
30
|
+
DeepSeek is a platform for AI inference with competitive pricing
|
|
31
|
+
and performance.
|
|
32
|
+
|
|
33
|
+
Prerequisites
|
|
34
|
+
-------------
|
|
35
|
+
|
|
36
|
+
::: {.callout-note}
|
|
37
|
+
## API key
|
|
38
|
+
|
|
39
|
+
Sign up at <https://platform.deepseek.com> to get an API key.
|
|
40
|
+
:::
|
|
41
|
+
|
|
42
|
+
Examples
|
|
43
|
+
--------
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
import os
|
|
47
|
+
from chatlas import ChatDeepSeek
|
|
48
|
+
|
|
49
|
+
chat = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"))
|
|
50
|
+
chat.chat("What is the capital of France?")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Known limitations
|
|
54
|
+
--------------
|
|
55
|
+
|
|
56
|
+
* Structured data extraction is not supported.
|
|
57
|
+
* Images are not supported.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
system_prompt
|
|
62
|
+
A system prompt to set the behavior of the assistant.
|
|
63
|
+
model
|
|
64
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
65
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
66
|
+
a model for all but the most casual use.
|
|
67
|
+
api_key
|
|
68
|
+
The API key to use for authentication. You generally should not supply
|
|
69
|
+
this directly, but instead set the `DEEPSEEK_API_KEY` environment variable.
|
|
70
|
+
base_url
|
|
71
|
+
The base URL to the endpoint; the default uses DeepSeek's API.
|
|
72
|
+
seed
|
|
73
|
+
Optional integer seed that DeepSeek uses to try and make output more
|
|
74
|
+
reproducible.
|
|
75
|
+
kwargs
|
|
76
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
77
|
+
|
|
78
|
+
Returns
|
|
79
|
+
-------
|
|
80
|
+
Chat
|
|
81
|
+
A chat object that retains the state of the conversation.
|
|
82
|
+
|
|
83
|
+
Note
|
|
84
|
+
----
|
|
85
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
86
|
+
the defaults tweaked for DeepSeek.
|
|
87
|
+
|
|
88
|
+
Note
|
|
89
|
+
----
|
|
90
|
+
Pasting an API key into a chat constructor (e.g., `ChatDeepSeek(api_key="...")`)
|
|
91
|
+
is the simplest way to get started, and is fine for interactive use, but is
|
|
92
|
+
problematic for code that may be shared with others.
|
|
93
|
+
|
|
94
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
95
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
96
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
97
|
+
into your environment.
|
|
98
|
+
|
|
99
|
+
```shell
|
|
100
|
+
pip install python-dotenv
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
```shell
|
|
104
|
+
# .env
|
|
105
|
+
DEEPSEEK_API_KEY=...
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
from chatlas import ChatDeepSeek
|
|
110
|
+
from dotenv import load_dotenv
|
|
111
|
+
|
|
112
|
+
load_dotenv()
|
|
113
|
+
chat = ChatDeepSeek()
|
|
114
|
+
chat.console()
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
118
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
119
|
+
|
|
120
|
+
```shell
|
|
121
|
+
export DEEPSEEK_API_KEY=...
|
|
122
|
+
```
|
|
123
|
+
"""
|
|
124
|
+
if model is None:
|
|
125
|
+
model = log_model_default("deepseek-chat")
|
|
126
|
+
|
|
127
|
+
if api_key is None:
|
|
128
|
+
api_key = os.getenv("DEEPSEEK_API_KEY")
|
|
129
|
+
|
|
130
|
+
if isinstance(seed, MISSING_TYPE):
|
|
131
|
+
seed = 1014 if is_testing() else None
|
|
132
|
+
|
|
133
|
+
return Chat(
|
|
134
|
+
provider=DeepSeekProvider(
|
|
135
|
+
api_key=api_key,
|
|
136
|
+
model=model,
|
|
137
|
+
base_url=base_url,
|
|
138
|
+
seed=seed,
|
|
139
|
+
name="DeepSeek",
|
|
140
|
+
kwargs=kwargs,
|
|
141
|
+
),
|
|
142
|
+
system_prompt=system_prompt,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class DeepSeekProvider(OpenAIProvider):
|
|
147
|
+
@staticmethod
|
|
148
|
+
def _as_message_param(turns: list[Turn]) -> list["ChatCompletionMessageParam"]:
|
|
149
|
+
from openai.types.chat import (
|
|
150
|
+
ChatCompletionAssistantMessageParam,
|
|
151
|
+
ChatCompletionUserMessageParam,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
params = OpenAIProvider._as_message_param(turns)
|
|
155
|
+
|
|
156
|
+
# Content must be a string
|
|
157
|
+
for i, param in enumerate(params):
|
|
158
|
+
if param["role"] in ["assistant", "user"]:
|
|
159
|
+
param = cast(
|
|
160
|
+
ChatCompletionAssistantMessageParam
|
|
161
|
+
| ChatCompletionUserMessageParam,
|
|
162
|
+
param,
|
|
163
|
+
)
|
|
164
|
+
contents = param.get("content", None)
|
|
165
|
+
if not isinstance(contents, list):
|
|
166
|
+
continue
|
|
167
|
+
params[i]["content"] = "".join(
|
|
168
|
+
content.get("text", "") for content in contents
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return params
|