chatlas 0.8.1__tar.gz → 0.9.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

Files changed (248) hide show
  1. {chatlas-0.8.1 → chatlas-0.9.1}/.github/workflows/check-update-types.yml +1 -1
  2. chatlas-0.9.1/.github/workflows/update-pricing.yml +38 -0
  3. {chatlas-0.8.1 → chatlas-0.9.1}/.gitignore +0 -3
  4. {chatlas-0.8.1 → chatlas-0.9.1}/CHANGELOG.md +54 -0
  5. chatlas-0.9.1/PKG-INFO +141 -0
  6. chatlas-0.9.1/README.md +60 -0
  7. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/__init__.py +2 -1
  8. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_anthropic.py +79 -45
  9. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_auto.py +3 -12
  10. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_chat.py +774 -148
  11. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_content.py +149 -29
  12. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_databricks.py +4 -14
  13. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_github.py +21 -25
  14. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_google.py +71 -32
  15. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_groq.py +15 -18
  16. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_interpolate.py +3 -4
  17. chatlas-0.9.1/chatlas/_mcp_manager.py +306 -0
  18. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_ollama.py +14 -18
  19. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_openai.py +74 -39
  20. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_perplexity.py +14 -18
  21. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_provider.py +78 -8
  22. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_snowflake.py +29 -18
  23. chatlas-0.9.1/chatlas/_tokens.py +167 -0
  24. chatlas-0.9.1/chatlas/_tools.py +351 -0
  25. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_turn.py +2 -18
  26. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_utils.py +27 -1
  27. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_version.py +2 -2
  28. chatlas-0.9.1/chatlas/data/prices.json +264 -0
  29. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/anthropic/_submit.py +2 -0
  30. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/openai/_client.py +1 -0
  31. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/openai/_client_azure.py +1 -0
  32. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/openai/_submit.py +4 -1
  33. {chatlas-0.8.1 → chatlas-0.9.1}/docs/_quarto.yml +63 -32
  34. {chatlas-0.8.1 → chatlas-0.9.1}/docs/_sidebar.yml +8 -0
  35. chatlas-0.9.1/docs/get-started/async.qmd +35 -0
  36. chatlas-0.9.1/docs/get-started/chat.qmd +230 -0
  37. chatlas-0.9.1/docs/get-started/chatbots.qmd +304 -0
  38. chatlas-0.9.1/docs/get-started/debug.qmd +44 -0
  39. chatlas-0.9.1/docs/get-started/models.qmd +66 -0
  40. chatlas-0.9.1/docs/get-started/monitor.qmd +112 -0
  41. chatlas-0.9.1/docs/get-started/parameters.qmd +37 -0
  42. chatlas-0.9.1/docs/get-started/stream.qmd +111 -0
  43. chatlas-0.9.1/docs/get-started/structured-data.qmd +231 -0
  44. chatlas-0.9.1/docs/get-started/system-prompt.qmd +147 -0
  45. chatlas-0.9.1/docs/get-started/tools.qmd +125 -0
  46. chatlas-0.9.1/docs/images/chat-app.png +0 -0
  47. chatlas-0.9.1/docs/images/chat-console.mp4 +0 -0
  48. chatlas-0.9.1/docs/images/chat-console.png +0 -0
  49. chatlas-0.9.1/docs/images/chat-notebook.mp4 +0 -0
  50. chatlas-0.9.1/docs/images/chat-parameters.png +0 -0
  51. chatlas-0.9.1/docs/images/chatbot-gradio.png +0 -0
  52. chatlas-0.9.1/docs/images/chatbot-shiny.png +0 -0
  53. chatlas-0.9.1/docs/images/chatbot-streamlit.png +0 -0
  54. chatlas-0.9.1/docs/images/chatbot-textual.png +0 -0
  55. chatlas-0.9.1/docs/images/chatlas-hello.png +0 -0
  56. chatlas-0.9.1/docs/images/client-parameters.png +0 -0
  57. chatlas-0.9.1/docs/images/hello-chat-console.png +0 -0
  58. chatlas-0.9.1/docs/images/model-parameters.png +0 -0
  59. chatlas-0.9.1/docs/images/model-type-hints.png +0 -0
  60. chatlas-0.9.1/docs/images/shiny-mcp-run-python.png +0 -0
  61. chatlas-0.9.1/docs/images/shiny-tool-call-display.png +0 -0
  62. chatlas-0.9.1/docs/images/shiny-tool-call-map.png +0 -0
  63. chatlas-0.9.1/docs/index.qmd +121 -0
  64. chatlas-0.9.1/docs/logos/hero/hero-old.png +0 -0
  65. chatlas-0.9.1/docs/logos/hero/hero.png +0 -0
  66. chatlas-0.9.1/docs/logos/hex/logo.png +0 -0
  67. chatlas-0.9.1/docs/logos/small/logo.png +0 -0
  68. chatlas-0.9.1/docs/misc/RAG.qmd +218 -0
  69. chatlas-0.9.1/docs/misc/examples.qmd +43 -0
  70. chatlas-0.9.1/docs/misc/mcp-tools.qmd +245 -0
  71. chatlas-0.9.1/docs/misc/vocabulary.qmd +46 -0
  72. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/Chat.qmd +115 -55
  73. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatAnthropic.qmd +2 -2
  74. chatlas-0.9.1/docs/reference/ChatAuto.qmd +78 -0
  75. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatAzureOpenAI.qmd +0 -9
  76. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatBedrockAnthropic.qmd +2 -2
  77. chatlas-0.9.1/docs/reference/ChatDatabricks.qmd +63 -0
  78. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatGithub.qmd +0 -6
  79. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatGoogle.qmd +1 -2
  80. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatGroq.qmd +0 -6
  81. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatOllama.qmd +0 -6
  82. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatOpenAI.qmd +0 -6
  83. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatPerplexity.qmd +0 -6
  84. chatlas-0.9.1/docs/reference/ChatSnowflake.qmd +62 -0
  85. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/ChatVertex.qmd +1 -2
  86. chatlas-0.9.1/docs/reference/Tool.qmd +74 -0
  87. chatlas-0.9.1/docs/reference/ToolRejectError.qmd +51 -0
  88. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/Turn.qmd +9 -1
  89. chatlas-0.9.1/docs/reference/content_pdf_file.qmd +22 -0
  90. chatlas-0.9.1/docs/reference/content_pdf_url.qmd +22 -0
  91. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/index.qmd +13 -0
  92. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/interpolate.qmd +1 -1
  93. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/interpolate_file.qmd +1 -1
  94. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.ChatResponse.qmd +1 -1
  95. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.ChatResponseAsync.qmd +1 -1
  96. chatlas-0.9.1/docs/reference/types.ContentImageInline.qmd +18 -0
  97. chatlas-0.9.1/docs/reference/types.ContentImageRemote.qmd +17 -0
  98. chatlas-0.9.1/docs/reference/types.ContentJson.qmd +16 -0
  99. chatlas-0.9.1/docs/reference/types.ContentText.qmd +7 -0
  100. chatlas-0.9.1/docs/reference/types.ContentToolRequest.qmd +33 -0
  101. chatlas-0.9.1/docs/reference/types.ContentToolResult.qmd +57 -0
  102. chatlas-0.8.1/docs/examples/third-party-testing.txt → chatlas-0.9.1/docs/structured-data/article-summary.qmd +64 -0
  103. chatlas-0.9.1/docs/structured-data/classification.qmd +35 -0
  104. chatlas-0.9.1/docs/structured-data/entity-recognition.qmd +36 -0
  105. chatlas-0.9.1/docs/structured-data/multi-modal.qmd +94 -0
  106. chatlas-0.9.1/docs/structured-data/sentiment-analysis.qmd +50 -0
  107. chatlas-0.9.1/docs/styles.scss +93 -0
  108. chatlas-0.9.1/docs/tool-calling/approval.qmd +108 -0
  109. chatlas-0.9.1/docs/tool-calling/displays.qmd +176 -0
  110. chatlas-0.9.1/docs/tool-calling/how-it-works.qmd +28 -0
  111. chatlas-0.9.1/docs/why-chatlas.qmd +68 -0
  112. {chatlas-0.8.1 → chatlas-0.9.1}/pyproject.toml +4 -1
  113. {chatlas-0.8.1 → chatlas-0.9.1}/scripts/_utils.py +8 -2
  114. {chatlas-0.8.1 → chatlas-0.9.1}/tests/__snapshots__/test_chat.ambr +1 -1
  115. {chatlas-0.8.1 → chatlas-0.9.1}/tests/conftest.py +20 -12
  116. chatlas-0.9.1/tests/mcp_servers/http_add.py +13 -0
  117. chatlas-0.9.1/tests/mcp_servers/http_current_date.py +16 -0
  118. chatlas-0.9.1/tests/mcp_servers/stdio_current_date.py +16 -0
  119. chatlas-0.9.1/tests/mcp_servers/stdio_subtract_multiply.py +18 -0
  120. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_chat.py +81 -15
  121. chatlas-0.9.1/tests/test_content_html.py +214 -0
  122. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_content_tools.py +46 -14
  123. chatlas-0.9.1/tests/test_mcp_client.py +544 -0
  124. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_anthropic.py +12 -14
  125. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_azure.py +1 -0
  126. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_google.py +73 -37
  127. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_openai.py +5 -0
  128. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_snowflake.py +7 -3
  129. chatlas-0.9.1/tests/test_set_model_params.py +578 -0
  130. chatlas-0.9.1/tests/test_tokens.py +113 -0
  131. chatlas-0.9.1/tests/test_tool_from_mcp.py +443 -0
  132. chatlas-0.9.1/tests/test_tools_enhanced.py +590 -0
  133. chatlas-0.9.1/tests/test_turns.py +92 -0
  134. chatlas-0.8.1/PKG-INFO +0 -383
  135. chatlas-0.8.1/README.md +0 -304
  136. chatlas-0.8.1/chatlas/_tokens.py +0 -87
  137. chatlas-0.8.1/chatlas/_tools.py +0 -192
  138. chatlas-0.8.1/docs/get-started.qmd +0 -108
  139. chatlas-0.8.1/docs/images/logo.png +0 -0
  140. chatlas-0.8.1/docs/index.py +0 -25
  141. chatlas-0.8.1/docs/logos/favicon/android-chrome-192x192.png +0 -0
  142. chatlas-0.8.1/docs/logos/favicon/android-chrome-512x512.png +0 -0
  143. chatlas-0.8.1/docs/logos/favicon/apple-touch-icon.png +0 -0
  144. chatlas-0.8.1/docs/logos/favicon/favicon-16x16.png +0 -0
  145. chatlas-0.8.1/docs/logos/favicon/favicon-32x32.png +0 -0
  146. chatlas-0.8.1/docs/logos/favicon/favicon.ico +0 -0
  147. chatlas-0.8.1/docs/logos/icon/brand-yml-icon-black.png +0 -0
  148. chatlas-0.8.1/docs/logos/icon/brand-yml-icon-black.svg +0 -12
  149. chatlas-0.8.1/docs/logos/icon/brand-yml-icon-color.png +0 -0
  150. chatlas-0.8.1/docs/logos/icon/brand-yml-icon-color.svg +0 -36
  151. chatlas-0.8.1/docs/logos/icon/brand-yml-icon-white.png +0 -0
  152. chatlas-0.8.1/docs/logos/icon/brand-yml-icon-white.svg +0 -12
  153. chatlas-0.8.1/docs/logos/tall/brand-yml-tall-black.png +0 -0
  154. chatlas-0.8.1/docs/logos/tall/brand-yml-tall-black.svg +0 -35
  155. chatlas-0.8.1/docs/logos/tall/brand-yml-tall-color.png +0 -0
  156. chatlas-0.8.1/docs/logos/tall/brand-yml-tall-color.svg +0 -81
  157. chatlas-0.8.1/docs/logos/tall/brand-yml-tall-white.png +0 -0
  158. chatlas-0.8.1/docs/logos/tall/brand-yml-tall-white.svg +0 -35
  159. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-black.png +0 -0
  160. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-black.svg +0 -35
  161. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-color.png +0 -0
  162. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-color.svg +0 -81
  163. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-large-black.png +0 -0
  164. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-large-color.png +0 -0
  165. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-large-white.png +0 -0
  166. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-white.png +0 -0
  167. chatlas-0.8.1/docs/logos/wide/brand-yml-wide-white.svg +0 -35
  168. chatlas-0.8.1/docs/prompt-design.qmd +0 -344
  169. chatlas-0.8.1/docs/rag.qmd +0 -78
  170. chatlas-0.8.1/docs/reference/Tool.qmd +0 -17
  171. chatlas-0.8.1/docs/reference/types.ContentImageInline.qmd +0 -18
  172. chatlas-0.8.1/docs/reference/types.ContentImageRemote.qmd +0 -17
  173. chatlas-0.8.1/docs/reference/types.ContentJson.qmd +0 -16
  174. chatlas-0.8.1/docs/reference/types.ContentText.qmd +0 -7
  175. chatlas-0.8.1/docs/reference/types.ContentToolRequest.qmd +0 -19
  176. chatlas-0.8.1/docs/reference/types.ContentToolResult.qmd +0 -20
  177. chatlas-0.8.1/docs/structured-data.qmd +0 -339
  178. chatlas-0.8.1/docs/styles.scss +0 -16
  179. chatlas-0.8.1/docs/tool-calling.qmd +0 -110
  180. chatlas-0.8.1/docs/web-apps.qmd +0 -77
  181. chatlas-0.8.1/tests/test_tokens.py +0 -72
  182. chatlas-0.8.1/tests/test_turns.py +0 -38
  183. {chatlas-0.8.1 → chatlas-0.9.1}/.github/workflows/docs-publish.yml +0 -0
  184. {chatlas-0.8.1 → chatlas-0.9.1}/.github/workflows/release.yml +0 -0
  185. {chatlas-0.8.1 → chatlas-0.9.1}/.github/workflows/test.yml +0 -0
  186. {chatlas-0.8.1 → chatlas-0.9.1}/.vscode/extensions.json +0 -0
  187. {chatlas-0.8.1 → chatlas-0.9.1}/.vscode/settings.json +0 -0
  188. {chatlas-0.8.1 → chatlas-0.9.1}/LICENSE +0 -0
  189. {chatlas-0.8.1 → chatlas-0.9.1}/Makefile +0 -0
  190. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_callbacks.py +0 -0
  191. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_content_image.py +0 -0
  192. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_content_pdf.py +0 -0
  193. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_display.py +0 -0
  194. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_live_render.py +0 -0
  195. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_logging.py +0 -0
  196. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_merge.py +0 -0
  197. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_tokens_old.py +0 -0
  198. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/_typing_extensions.py +0 -0
  199. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/py.typed +0 -0
  200. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/__init__.py +0 -0
  201. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/anthropic/__init__.py +0 -0
  202. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/anthropic/_client.py +0 -0
  203. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/anthropic/_client_bedrock.py +0 -0
  204. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/google/__init__.py +0 -0
  205. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/google/_client.py +0 -0
  206. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/google/_submit.py +0 -0
  207. {chatlas-0.8.1 → chatlas-0.9.1}/chatlas/types/openai/__init__.py +0 -0
  208. {chatlas-0.8.1 → chatlas-0.9.1}/docs/.gitignore +0 -0
  209. {chatlas-0.8.1 → chatlas-0.9.1}/docs/_extensions/machow/interlinks/.gitignore +0 -0
  210. {chatlas-0.8.1 → chatlas-0.9.1}/docs/_extensions/machow/interlinks/_extension.yml +0 -0
  211. {chatlas-0.8.1 → chatlas-0.9.1}/docs/_extensions/machow/interlinks/interlinks.lua +0 -0
  212. {chatlas-0.8.1 → chatlas-0.9.1}/docs/congressional-assets.png +0 -0
  213. {chatlas-0.8.1 → chatlas-0.9.1}/docs/images/congressional-assets.png +0 -0
  214. {chatlas-0.8.1 → chatlas-0.9.1}/docs/images/posit-logo.png +0 -0
  215. {chatlas-0.8.1 → chatlas-0.9.1}/docs/images/tool-calling-right.svg +0 -0
  216. {chatlas-0.8.1 → chatlas-0.9.1}/docs/images/tool-calling-wrong.svg +0 -0
  217. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/Provider.qmd +0 -0
  218. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/content_image_file.qmd +0 -0
  219. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/content_image_plot.qmd +0 -0
  220. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/content_image_url.qmd +0 -0
  221. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/image_file.qmd +0 -0
  222. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/image_plot.qmd +0 -0
  223. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/image_url.qmd +0 -0
  224. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/token_usage.qmd +0 -0
  225. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.Content.qmd +0 -0
  226. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.ContentImage.qmd +0 -0
  227. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.ImageContentTypes.qmd +0 -0
  228. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.MISSING.qmd +0 -0
  229. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.MISSING_TYPE.qmd +0 -0
  230. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.SubmitInputArgsT.qmd +0 -0
  231. {chatlas-0.8.1 → chatlas-0.9.1}/docs/reference/types.TokenUsage.qmd +0 -0
  232. {chatlas-0.8.1 → chatlas-0.9.1}/pytest.ini +0 -0
  233. {chatlas-0.8.1 → chatlas-0.9.1}/scripts/_generate_anthropic_types.py +0 -0
  234. {chatlas-0.8.1 → chatlas-0.9.1}/scripts/_generate_google_types.py +0 -0
  235. {chatlas-0.8.1 → chatlas-0.9.1}/scripts/_generate_openai_types.py +0 -0
  236. {chatlas-0.8.1 → chatlas-0.9.1}/scripts/main.py +0 -0
  237. {chatlas-0.8.1 → chatlas-0.9.1}/tests/__init__.py +0 -0
  238. {chatlas-0.8.1 → chatlas-0.9.1}/tests/apples.pdf +0 -0
  239. {chatlas-0.8.1 → chatlas-0.9.1}/tests/images/dice.png +0 -0
  240. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_auto.py +0 -0
  241. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_callbacks.py +0 -0
  242. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_content.py +0 -0
  243. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_content_image.py +0 -0
  244. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_content_pdf.py +0 -0
  245. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_interpolate.py +0 -0
  246. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_bedrock.py +0 -0
  247. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_provider_databricks.py +0 -0
  248. {chatlas-0.8.1 → chatlas-0.9.1}/tests/test_utils_merge.py +0 -0
@@ -1,4 +1,4 @@
1
- name: Test - Python
1
+ name: Check Provider Types
2
2
 
3
3
  on:
4
4
  workflow_dispatch:
@@ -0,0 +1,38 @@
1
+ name: Update Pricing
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ pull_request:
6
+ types: [opened, synchronize, reopened, ready_for_review]
7
+
8
+ jobs:
9
+ check-pricing:
10
+ name: Check for pricing updates in Ellmer
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+
15
+ - name: Checkout current prices.json in chatlas
16
+ uses: actions/checkout@v4
17
+ with:
18
+ sparse-checkout: /chatlas/data/prices.json
19
+ sparse-checkout-cone-mode: false
20
+ path: main
21
+
22
+ - name: Get Ellmer prices.json
23
+ uses: actions/checkout@v4
24
+ with:
25
+ sparse-checkout: /data-raw/prices.json
26
+ sparse-checkout-cone-mode: false
27
+ repository: tidyverse/ellmer
28
+ path: ellmer
29
+
30
+ - name: Check for differences
31
+ run: |
32
+ echo "Checking diff between prices.json"
33
+ git diff --no-index --stat ellmer/data-raw/prices.json main/chatlas/data/prices.json
34
+ if [[ -n $(git diff --no-index --stat ellmer/data-raw/prices.json main/chatlas/data/prices.json) ]]; then
35
+ echo "Changes detected:"
36
+ echo "::error::Ellmer's prices.json does not match the current Chatlas prices.json"
37
+ exit 1
38
+ fi
@@ -10,8 +10,5 @@ sandbox/
10
10
 
11
11
  /.luarc.json
12
12
 
13
- # Automatically generated by docs/index.py
14
- docs/index.qmd
15
-
16
13
  # setuptools_scm
17
14
  chatlas/_version.py
@@ -7,6 +7,60 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
7
7
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
8
8
  -->
9
9
 
10
+ ## [0.9.1] - 2025-07-09
11
+
12
+ ### Bug fixes
13
+
14
+ * Fixed an issue where `.chat()` wasn't streaming output properly in (the latest build of) Positron's Jupyter notebook. (#131)
15
+
16
+ * Needless warnings and errors are no longer thrown when model pricing info is unavailable. (#132)
17
+
18
+ ## [0.9.0] - 2025-07-02
19
+
20
+ ### New features
21
+
22
+ * `Chat` gains a handful of new methods:
23
+ * `.register_mcp_tools_http_stream_async()` and `.register_mcp_tools_stdio_async()`: for registering tools from a [MCP server](https://modelcontextprotocol.io/). (#39)
24
+ * `.get_tools()` and `.set_tools()`: for fine-grained control over registered tools. (#39)
25
+ * `.set_model_params()`: for setting common LLM parameters in a model-agnostic fashion. (#127)
26
+ * `.get_cost()`: to get the estimated cost of the chat. Only popular models are supported, but you can also supply your own token prices. (#106)
27
+ * `.add_turn()`: to add `Turn`(s) to the current chat history. (#126)
28
+ * Tool functions passed to `.register_tool()` can now `yield` numerous results. (#39)
29
+ * A `ContentToolResultImage` content class was added for returning images from tools. It is currently only works with `ChatAnthropic`. (#39)
30
+ * A `Tool` can now be constructed from a pre-existing tool schema (via a new `__init__` method). (#39)
31
+ * The `Chat.app()` method gains a `host` parameter. (#122)
32
+ * `ChatGithub()` now supports the more standard `GITHUB_TOKEN` environment variable for storing the API key. (#123)
33
+
34
+ ### Changes
35
+
36
+ #### Breaking Changes
37
+
38
+ * `Chat` constructors (`ChatOpenAI()`, `ChatAnthropic()`, etc) no longer have a `turns` keyword parameter. Use the `.set_turns()` method instead to set the (initial) chat history. (#126)
39
+ * `Chat`'s `.tokens()` methods have been removed in favor of `.get_tokens()` which returns both cumulative tokens in the turn and discrete tokens. (#106)
40
+
41
+ #### Other Changes
42
+
43
+ * `Tool`'s constructor no longer takes a function as input. Use the new `.from_func()` method instead to create a `Tool` from a function. (#39)
44
+ * `.register_tool()` now throws an exception when the tool has the same name as an already registered tool. Set the new `force` parameter to `True` to force the registration. (#39)
45
+
46
+ ### Improvements
47
+
48
+ * `ChatGoogle()` and `ChatVertex()` now default to Gemini 2.5 (instead of 2.0). (#125)
49
+ * `ChatOpenAI()` and `ChatGithub()` now default to GPT 4.1 (instead of 4o). (#115)
50
+ * `ChatAnthropic()` now supports `content_image_url()`. (#112)
51
+ * HTML styling improvements for `ContentToolResult` and `ContentToolRequest`. (#39)
52
+ * `Chat`'s representation now includes cost information if it can be calculated. (#106)
53
+ * `token_usage()` includes cost if it can be calculated. (#106)
54
+
55
+ ### Bug fixes
56
+
57
+ * Fixed an issue where `httpx` client customization (e.g., `ChatOpenAI(kwargs = {"http_client": httpx.Client()})`) wasn't working as expected (#108)
58
+
59
+ ### Developer APIs
60
+
61
+ * The base `Provider` class now includes a `name` and `model` property. In order for them to work properly, provider implementations should pass a `name` and `model` along to the `__init__()` method. (#106)
62
+ * `Provider` implementations must implement two new abstract methods: `translate_model_params()` and `supported_model_params()`.
63
+
10
64
  ## [0.8.1] - 2025-05-30
11
65
 
12
66
  * Fixed `@overload` definitions for `.stream()` and `.stream_async()`.
chatlas-0.9.1/PKG-INFO ADDED
@@ -0,0 +1,141 @@
1
+ Metadata-Version: 2.4
2
+ Name: chatlas
3
+ Version: 0.9.1
4
+ Summary: A simple and consistent interface for chatting with LLMs
5
+ Project-URL: Homepage, https://posit-dev.github.io/chatlas
6
+ Project-URL: Documentation, https://posit-dev.github.io/chatlas
7
+ Project-URL: Repository, https://github.com/posit-dev/chatlas
8
+ Project-URL: Issues, https://github.com/posit-dev/chatlas/issues/
9
+ Project-URL: Changelog, https://github.com/posit-dev/chatlas/blob/main/CHANGELOG.md
10
+ Author-email: Carson Sievert <carson@posit.co>
11
+ License-Expression: MIT
12
+ License-File: LICENSE
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Requires-Python: >=3.9
22
+ Requires-Dist: jinja2
23
+ Requires-Dist: openai
24
+ Requires-Dist: orjson
25
+ Requires-Dist: pydantic>=2.0
26
+ Requires-Dist: requests
27
+ Requires-Dist: rich
28
+ Provides-Extra: anthropic
29
+ Requires-Dist: anthropic; extra == 'anthropic'
30
+ Provides-Extra: azure-openai
31
+ Provides-Extra: bedrock-anthropic
32
+ Requires-Dist: anthropic[bedrock]; extra == 'bedrock-anthropic'
33
+ Provides-Extra: databricks
34
+ Requires-Dist: databricks-sdk; extra == 'databricks'
35
+ Provides-Extra: dev
36
+ Requires-Dist: anthropic[bedrock]; extra == 'dev'
37
+ Requires-Dist: databricks-sdk; extra == 'dev'
38
+ Requires-Dist: google-genai>=1.14.0; extra == 'dev'
39
+ Requires-Dist: htmltools; extra == 'dev'
40
+ Requires-Dist: matplotlib; extra == 'dev'
41
+ Requires-Dist: numpy>1.24.4; extra == 'dev'
42
+ Requires-Dist: openai; extra == 'dev'
43
+ Requires-Dist: pillow; extra == 'dev'
44
+ Requires-Dist: python-dotenv; extra == 'dev'
45
+ Requires-Dist: ruff>=0.6.5; extra == 'dev'
46
+ Requires-Dist: shiny; extra == 'dev'
47
+ Requires-Dist: snowflake-ml-python>=1.8.4; extra == 'dev'
48
+ Requires-Dist: tenacity; extra == 'dev'
49
+ Requires-Dist: tiktoken; extra == 'dev'
50
+ Requires-Dist: torch; (python_version <= '3.11') and extra == 'dev'
51
+ Provides-Extra: docs
52
+ Requires-Dist: griffe>=1; extra == 'docs'
53
+ Requires-Dist: ipykernel; extra == 'docs'
54
+ Requires-Dist: ipywidgets; extra == 'docs'
55
+ Requires-Dist: nbclient; extra == 'docs'
56
+ Requires-Dist: nbformat; extra == 'docs'
57
+ Requires-Dist: numpy; extra == 'docs'
58
+ Requires-Dist: pandas; extra == 'docs'
59
+ Requires-Dist: pyyaml; extra == 'docs'
60
+ Requires-Dist: quartodoc>=0.7; extra == 'docs'
61
+ Requires-Dist: sentence-transformers; extra == 'docs'
62
+ Provides-Extra: github
63
+ Provides-Extra: google
64
+ Requires-Dist: google-genai>=1.14.0; extra == 'google'
65
+ Provides-Extra: groq
66
+ Provides-Extra: mcp
67
+ Requires-Dist: mcp>=1.4.0; (python_version >= '3.10') and extra == 'mcp'
68
+ Provides-Extra: ollama
69
+ Provides-Extra: openai
70
+ Provides-Extra: perplexity
71
+ Provides-Extra: snowflake
72
+ Requires-Dist: snowflake-ml-python; extra == 'snowflake'
73
+ Provides-Extra: test
74
+ Requires-Dist: pyright>=1.1.379; extra == 'test'
75
+ Requires-Dist: pytest-asyncio; extra == 'test'
76
+ Requires-Dist: pytest>=8.3.2; extra == 'test'
77
+ Requires-Dist: syrupy>=4; extra == 'test'
78
+ Provides-Extra: vertex
79
+ Requires-Dist: google-genai>=1.14.0; extra == 'vertex'
80
+ Description-Content-Type: text/markdown
81
+
82
+ # chatlas <a href="https://posit-dev.github.io/chatlas"><img src="docs/logos/hex/logo.png" align="right" height="138" alt="chatlas website" /></a>
83
+
84
+ <p>
85
+ <!-- badges start -->
86
+ <a href="https://pypi.org/project/chatlas/"><img alt="PyPI" src="https://img.shields.io/pypi/v/chatlas?logo=python&logoColor=white&color=orange"></a>
87
+ <a href="https://choosealicense.com/licenses/mit/"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="MIT License"></a>
88
+ <a href="https://pypi.org/project/chatlas"><img src="https://img.shields.io/pypi/pyversions/chatlas.svg" alt="versions"></a>
89
+ <a href="https://github.com/posit-dev/chatlas"><img src="https://github.com/posit-dev/chatlas/actions/workflows/test.yml/badge.svg?branch=main" alt="Python Tests"></a>
90
+ <!-- badges end -->
91
+ </p>
92
+
93
+ Your friendly guide to building LLM chat apps in Python with less effort and more clarity.
94
+
95
+ ## Install
96
+
97
+ Install the latest stable release [from PyPI](https://pypi.org/project/chatlas/):
98
+
99
+ ```bash
100
+ pip install -U chatlas
101
+ ```
102
+
103
+ Or, install the latest development version from GitHub:
104
+
105
+ ```bash
106
+ pip install -U git+https://github.com/posit-dev/chatlas
107
+ ```
108
+
109
+ ## Quick start
110
+
111
+ Get started in 3 simple steps:
112
+
113
+ 1. Choose a model provider, such as [ChatOpenAI](https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html) or [ChatAnthropic](https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html).
114
+ 2. Visit the provider's [reference](https://posit-dev.github.io/chatlas/reference) page to get setup with necessary credentials.
115
+ 3. Create the relevant `Chat` client and start chatting!
116
+
117
+ ```python
118
+ from chatlas import ChatOpenAI
119
+
120
+ # Optional (but recommended) model and system_prompt
121
+ chat = ChatOpenAI(
122
+ model="gpt-4.1-mini",
123
+ system_prompt="You are a helpful assistant.",
124
+ )
125
+
126
+ # Optional tool registration
127
+ def get_current_weather(lat: float, lng: float):
128
+ "Get the current weather for a given location."
129
+ return "sunny"
130
+
131
+ chat.register_tool(get_current_weather)
132
+
133
+ # Send user prompt to the model for a response.
134
+ chat.chat("How's the weather in San Francisco?")
135
+ ```
136
+
137
+
138
+ <img src="docs/images/chatlas-hello.png" alt="Model response output to the user query: 'How's the weather in San Francisco?'" width="67%" style="display: block; margin-left: auto; margin-right: auto">
139
+
140
+
141
+ Learn more at <https://posit-dev.github.io/chatlas>
@@ -0,0 +1,60 @@
1
+ # chatlas <a href="https://posit-dev.github.io/chatlas"><img src="docs/logos/hex/logo.png" align="right" height="138" alt="chatlas website" /></a>
2
+
3
+ <p>
4
+ <!-- badges start -->
5
+ <a href="https://pypi.org/project/chatlas/"><img alt="PyPI" src="https://img.shields.io/pypi/v/chatlas?logo=python&logoColor=white&color=orange"></a>
6
+ <a href="https://choosealicense.com/licenses/mit/"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="MIT License"></a>
7
+ <a href="https://pypi.org/project/chatlas"><img src="https://img.shields.io/pypi/pyversions/chatlas.svg" alt="versions"></a>
8
+ <a href="https://github.com/posit-dev/chatlas"><img src="https://github.com/posit-dev/chatlas/actions/workflows/test.yml/badge.svg?branch=main" alt="Python Tests"></a>
9
+ <!-- badges end -->
10
+ </p>
11
+
12
+ Your friendly guide to building LLM chat apps in Python with less effort and more clarity.
13
+
14
+ ## Install
15
+
16
+ Install the latest stable release [from PyPI](https://pypi.org/project/chatlas/):
17
+
18
+ ```bash
19
+ pip install -U chatlas
20
+ ```
21
+
22
+ Or, install the latest development version from GitHub:
23
+
24
+ ```bash
25
+ pip install -U git+https://github.com/posit-dev/chatlas
26
+ ```
27
+
28
+ ## Quick start
29
+
30
+ Get started in 3 simple steps:
31
+
32
+ 1. Choose a model provider, such as [ChatOpenAI](https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html) or [ChatAnthropic](https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html).
33
+ 2. Visit the provider's [reference](https://posit-dev.github.io/chatlas/reference) page to get setup with necessary credentials.
34
+ 3. Create the relevant `Chat` client and start chatting!
35
+
36
+ ```python
37
+ from chatlas import ChatOpenAI
38
+
39
+ # Optional (but recommended) model and system_prompt
40
+ chat = ChatOpenAI(
41
+ model="gpt-4.1-mini",
42
+ system_prompt="You are a helpful assistant.",
43
+ )
44
+
45
+ # Optional tool registration
46
+ def get_current_weather(lat: float, lng: float):
47
+ "Get the current weather for a given location."
48
+ return "sunny"
49
+
50
+ chat.register_tool(get_current_weather)
51
+
52
+ # Send user prompt to the model for a response.
53
+ chat.chat("How's the weather in San Francisco?")
54
+ ```
55
+
56
+
57
+ <img src="docs/images/chatlas-hello.png" alt="Model response output to the user query: 'How's the weather in San Francisco?'" width="67%" style="display: block; margin-left: auto; margin-right: auto">
58
+
59
+
60
+ Learn more at <https://posit-dev.github.io/chatlas>
@@ -2,7 +2,7 @@ from . import types
2
2
  from ._anthropic import ChatAnthropic, ChatBedrockAnthropic
3
3
  from ._auto import ChatAuto
4
4
  from ._chat import Chat
5
- from ._content import ContentToolRequest, ContentToolResult
5
+ from ._content import ContentToolRequest, ContentToolResult, ContentToolResultImage
6
6
  from ._content_image import content_image_file, content_image_plot, content_image_url
7
7
  from ._content_pdf import content_pdf_file, content_pdf_url
8
8
  from ._databricks import ChatDatabricks
@@ -46,6 +46,7 @@ __all__ = (
46
46
  "content_pdf_url",
47
47
  "ContentToolRequest",
48
48
  "ContentToolResult",
49
+ "ContentToolResultImage",
49
50
  "interpolate",
50
51
  "interpolate_file",
51
52
  "Provider",
@@ -17,12 +17,15 @@ from ._content import (
17
17
  ContentText,
18
18
  ContentToolRequest,
19
19
  ContentToolResult,
20
+ ContentToolResultImage,
21
+ ContentToolResultResource,
20
22
  )
21
23
  from ._logging import log_model_default
22
- from ._provider import Provider
24
+ from ._provider import Provider, StandardModelParamNames, StandardModelParams
23
25
  from ._tokens import tokens_log
24
26
  from ._tools import Tool, basemodel_to_param_schema
25
- from ._turn import Turn, normalize_turns, user_turn
27
+ from ._turn import Turn, user_turn
28
+ from ._utils import split_http_client_kwargs
26
29
 
27
30
  if TYPE_CHECKING:
28
31
  from anthropic.types import (
@@ -58,7 +61,6 @@ else:
58
61
  def ChatAnthropic(
59
62
  *,
60
63
  system_prompt: Optional[str] = None,
61
- turns: Optional[list[Turn]] = None,
62
64
  model: "Optional[ModelParam]" = None,
63
65
  api_key: Optional[str] = None,
64
66
  max_tokens: int = 4096,
@@ -104,13 +106,6 @@ def ChatAnthropic(
104
106
  ----------
105
107
  system_prompt
106
108
  A system prompt to set the behavior of the assistant.
107
- turns
108
- A list of turns to start the chat with (i.e., continuing a previous
109
- conversation). If not provided, the conversation begins from scratch. Do
110
- not provide non-None values for both `turns` and `system_prompt`. Each
111
- message in the list should be a dictionary with at least `role` (usually
112
- `system`, `user`, or `assistant`, but `tool` is also possible). Normally
113
- there is also a `content` field, which is a string.
114
109
  model
115
110
  The model to use for the chat. The default, None, will pick a reasonable
116
111
  default, and warn you about it. We strongly recommend explicitly
@@ -177,22 +172,23 @@ def ChatAnthropic(
177
172
  max_tokens=max_tokens,
178
173
  kwargs=kwargs,
179
174
  ),
180
- turns=normalize_turns(
181
- turns or [],
182
- system_prompt,
183
- ),
175
+ system_prompt=system_prompt,
184
176
  )
185
177
 
186
178
 
187
- class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
179
+ class AnthropicProvider(
180
+ Provider[Message, RawMessageStreamEvent, Message, "SubmitInputArgs"]
181
+ ):
188
182
  def __init__(
189
183
  self,
190
184
  *,
191
- max_tokens: int,
185
+ max_tokens: int = 4096,
192
186
  model: str,
193
- api_key: str | None,
187
+ api_key: Optional[str] = None,
188
+ name: str = "Anthropic",
194
189
  kwargs: Optional["ChatClientArgs"] = None,
195
190
  ):
191
+ super().__init__(name=name, model=model)
196
192
  try:
197
193
  from anthropic import Anthropic, AsyncAnthropic
198
194
  except ImportError:
@@ -200,8 +196,6 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
200
196
  "`ChatAnthropic()` requires the `anthropic` package. "
201
197
  "You can install it with 'pip install anthropic'."
202
198
  )
203
-
204
- self._model = model
205
199
  self._max_tokens = max_tokens
206
200
 
207
201
  kwargs_full: "ChatClientArgs" = {
@@ -209,9 +203,11 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
209
203
  **(kwargs or {}),
210
204
  }
211
205
 
206
+ sync_kwargs, async_kwargs = split_http_client_kwargs(kwargs_full)
207
+
212
208
  # TODO: worth bringing in sync types?
213
- self._client = Anthropic(**kwargs_full) # type: ignore
214
- self._async_client = AsyncAnthropic(**kwargs_full)
209
+ self._client = Anthropic(**sync_kwargs) # type: ignore
210
+ self._async_client = AsyncAnthropic(**async_kwargs)
215
211
 
216
212
  @overload
217
213
  def chat_perform(
@@ -301,7 +297,7 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
301
297
  """Extract structured data"""
302
298
  pass
303
299
 
304
- data_model_tool = Tool(_structured_tool_call)
300
+ data_model_tool = Tool.from_func(_structured_tool_call)
305
301
 
306
302
  data_model_tool.schema["function"]["parameters"] = {
307
303
  "type": "object",
@@ -322,7 +318,7 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
322
318
  kwargs_full: "SubmitInputArgs" = {
323
319
  "stream": stream,
324
320
  "messages": self._as_message_params(turns),
325
- "model": self._model,
321
+ "model": self.model,
326
322
  "max_tokens": self._max_tokens,
327
323
  "tools": tool_schemas,
328
324
  **(kwargs or {}),
@@ -435,6 +431,34 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
435
431
 
436
432
  return {arg: kwargs[arg] for arg in args_to_keep if arg in kwargs}
437
433
 
434
+ def translate_model_params(self, params: StandardModelParams) -> "SubmitInputArgs":
435
+ res: "SubmitInputArgs" = {}
436
+ if "temperature" in params:
437
+ res["temperature"] = params["temperature"]
438
+
439
+ if "top_p" in params:
440
+ res["top_p"] = params["top_p"]
441
+
442
+ if "top_k" in params:
443
+ res["top_k"] = params["top_k"]
444
+
445
+ if "max_tokens" in params:
446
+ res["max_tokens"] = params["max_tokens"]
447
+
448
+ if "stop_sequences" in params:
449
+ res["stop_sequences"] = params["stop_sequences"]
450
+
451
+ return res
452
+
453
+ def supported_model_params(self) -> set[StandardModelParamNames]:
454
+ return {
455
+ "temperature",
456
+ "top_p",
457
+ "top_k",
458
+ "max_tokens",
459
+ "stop_sequences",
460
+ }
461
+
438
462
  def _as_message_params(self, turns: list[Turn]) -> list["MessageParam"]:
439
463
  messages: list["MessageParam"] = []
440
464
  for turn in turns:
@@ -473,10 +497,13 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
473
497
  },
474
498
  }
475
499
  elif isinstance(content, ContentImageRemote):
476
- raise NotImplementedError(
477
- "Remote images aren't supported by Anthropic (Claude). "
478
- "Consider downloading the image and using content_image_file() instead."
479
- )
500
+ return {
501
+ "type": "image",
502
+ "source": {
503
+ "type": "url",
504
+ "url": content.url,
505
+ },
506
+ }
480
507
  elif isinstance(content, ContentToolRequest):
481
508
  return {
482
509
  "type": "tool_use",
@@ -490,8 +517,26 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
490
517
  "tool_use_id": content.id,
491
518
  "is_error": content.error is not None,
492
519
  }
493
- # Anthropic supports non-text contents like ImageBlockParam
494
- res["content"] = content.get_model_value() # type: ignore
520
+
521
+ if isinstance(content, ContentToolResultImage):
522
+ res["content"] = [
523
+ {
524
+ "type": "image",
525
+ "source": {
526
+ "type": "base64",
527
+ "media_type": content.mime_type,
528
+ "data": content.value,
529
+ },
530
+ }
531
+ ]
532
+ elif isinstance(content, ContentToolResultResource):
533
+ raise NotImplementedError(
534
+ "ContentToolResultResource is not currently supported by Anthropic."
535
+ )
536
+ else:
537
+ # Anthropic supports non-text contents like ImageBlockParam
538
+ res["content"] = content.get_model_value() # type: ignore
539
+
495
540
  return res
496
541
 
497
542
  raise ValueError(f"Unknown content type: {type(content)}")
@@ -565,7 +610,6 @@ def ChatBedrockAnthropic(
565
610
  aws_session_token: Optional[str] = None,
566
611
  base_url: Optional[str] = None,
567
612
  system_prompt: Optional[str] = None,
568
- turns: Optional[list[Turn]] = None,
569
613
  kwargs: Optional["ChatBedrockClientArgs"] = None,
570
614
  ) -> Chat["SubmitInputArgs", Message]:
571
615
  """
@@ -631,13 +675,6 @@ def ChatBedrockAnthropic(
631
675
  `f"https://bedrock-runtime.{aws_region}.amazonaws.com"`.
632
676
  system_prompt
633
677
  A system prompt to set the behavior of the assistant.
634
- turns
635
- A list of turns to start the chat with (i.e., continuing a previous
636
- conversation). If not provided, the conversation begins from scratch. Do
637
- not provide non-None values for both `turns` and `system_prompt`. Each
638
- message in the list should be a dictionary with at least `role` (usually
639
- `system`, `user`, or `assistant`, but `tool` is also possible). Normally
640
- there is also a `content` field, which is a string.
641
678
  kwargs
642
679
  Additional arguments to pass to the `anthropic.AnthropicBedrock()`
643
680
  client constructor.
@@ -711,10 +748,7 @@ def ChatBedrockAnthropic(
711
748
  base_url=base_url,
712
749
  kwargs=kwargs,
713
750
  ),
714
- turns=normalize_turns(
715
- turns or [],
716
- system_prompt,
717
- ),
751
+ system_prompt=system_prompt,
718
752
  )
719
753
 
720
754
 
@@ -728,10 +762,13 @@ class AnthropicBedrockProvider(AnthropicProvider):
728
762
  aws_region: str | None,
729
763
  aws_profile: str | None,
730
764
  aws_session_token: str | None,
731
- max_tokens: int,
765
+ max_tokens: int = 4096,
732
766
  base_url: str | None,
767
+ name: str = "AnthropicBedrock",
733
768
  kwargs: Optional["ChatBedrockClientArgs"] = None,
734
769
  ):
770
+ super().__init__(name=name, model=model, max_tokens=max_tokens)
771
+
735
772
  try:
736
773
  from anthropic import AnthropicBedrock, AsyncAnthropicBedrock
737
774
  except ImportError:
@@ -740,9 +777,6 @@ class AnthropicBedrockProvider(AnthropicProvider):
740
777
  "Install it with `pip install anthropic[bedrock]`."
741
778
  )
742
779
 
743
- self._model = model
744
- self._max_tokens = max_tokens
745
-
746
780
  kwargs_full: "ChatBedrockClientArgs" = {
747
781
  "aws_secret_key": aws_secret_key,
748
782
  "aws_access_key": aws_access_key,
@@ -15,7 +15,6 @@ from ._ollama import ChatOllama
15
15
  from ._openai import ChatAzureOpenAI, ChatOpenAI
16
16
  from ._perplexity import ChatPerplexity
17
17
  from ._snowflake import ChatSnowflake
18
- from ._turn import Turn
19
18
 
20
19
  AutoProviders = Literal[
21
20
  "anthropic",
@@ -50,7 +49,6 @@ _provider_chat_model_map: dict[AutoProviders, Callable[..., Chat]] = {
50
49
 
51
50
  def ChatAuto(
52
51
  system_prompt: Optional[str] = None,
53
- turns: Optional[list[Turn]] = None,
54
52
  *,
55
53
  provider: Optional[AutoProviders] = None,
56
54
  model: Optional[str] = None,
@@ -111,6 +109,8 @@ def ChatAuto(
111
109
 
112
110
  Parameters
113
111
  ----------
112
+ system_prompt
113
+ A system prompt to set the behavior of the assistant.
114
114
  provider
115
115
  The name of the default chat provider to use. Providers are strings
116
116
  formatted in kebab-case, e.g. to use `ChatBedrockAnthropic` set
@@ -123,15 +123,6 @@ def ChatAuto(
123
123
  The name of the default model to use. This value can also be provided
124
124
  via the `CHATLAS_CHAT_MODEL` environment variable, which takes
125
125
  precedence over `model` when set.
126
- system_prompt
127
- A system prompt to set the behavior of the assistant.
128
- turns
129
- A list of turns to start the chat with (i.e., continuing a previous
130
- conversation). If not provided, the conversation begins from scratch. Do
131
- not provide non-`None` values for both `turns` and `system_prompt`. Each
132
- message in the list should be a dictionary with at least `role` (usually
133
- `system`, `user`, or `assistant`, but `tool` is also possible). Normally
134
- there is also a `content` field, which is a string.
135
126
  **kwargs
136
127
  Additional keyword arguments to pass to the Chat constructor. See the
137
128
  documentation for each provider for more details on the available
@@ -169,7 +160,7 @@ def ChatAuto(
169
160
  )
170
161
 
171
162
  # `system_prompt` and `turns` always come from `ChatAuto()`
172
- base_args = {"system_prompt": system_prompt, "turns": turns}
163
+ base_args = {"system_prompt": system_prompt}
173
164
 
174
165
  if env_model := os.environ.get("CHATLAS_CHAT_MODEL"):
175
166
  model = env_model