chatlas 0.11.1__tar.gz → 0.12.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

Files changed (171) hide show
  1. {chatlas-0.11.1 → chatlas-0.12.0}/.vscode/settings.json +9 -2
  2. {chatlas-0.11.1 → chatlas-0.12.0}/CHANGELOG.md +23 -6
  3. {chatlas-0.11.1 → chatlas-0.12.0}/PKG-INFO +1 -1
  4. chatlas-0.12.0/chatlas/_auto.py +290 -0
  5. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_chat.py +10 -1
  6. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_content.py +11 -6
  7. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_tools.py +11 -3
  8. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_version.py +2 -2
  9. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/anthropic/_submit.py +2 -2
  10. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/openai/_client.py +2 -2
  11. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/openai/_client_azure.py +2 -2
  12. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/openai/_submit.py +2 -2
  13. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/models.qmd +28 -7
  14. {chatlas-0.11.1 → chatlas-0.12.0}/scripts/_generate_openai_types.py +14 -3
  15. {chatlas-0.11.1 → chatlas-0.12.0}/tests/conftest.py +2 -2
  16. chatlas-0.12.0/tests/test_auto.py +266 -0
  17. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_tools_enhanced.py +131 -0
  18. chatlas-0.11.1/chatlas/_auto.py +0 -178
  19. chatlas-0.11.1/tests/test_auto.py +0 -109
  20. {chatlas-0.11.1 → chatlas-0.12.0}/.github/workflows/check-update-types.yml +0 -0
  21. {chatlas-0.11.1 → chatlas-0.12.0}/.github/workflows/docs-publish.yml +0 -0
  22. {chatlas-0.11.1 → chatlas-0.12.0}/.github/workflows/release.yml +0 -0
  23. {chatlas-0.11.1 → chatlas-0.12.0}/.github/workflows/test.yml +0 -0
  24. {chatlas-0.11.1 → chatlas-0.12.0}/.github/workflows/update-pricing.yml +0 -0
  25. {chatlas-0.11.1 → chatlas-0.12.0}/.gitignore +0 -0
  26. {chatlas-0.11.1 → chatlas-0.12.0}/.vscode/extensions.json +0 -0
  27. {chatlas-0.11.1 → chatlas-0.12.0}/CLAUDE.md +0 -0
  28. {chatlas-0.11.1 → chatlas-0.12.0}/LICENSE +0 -0
  29. {chatlas-0.11.1 → chatlas-0.12.0}/Makefile +0 -0
  30. {chatlas-0.11.1 → chatlas-0.12.0}/README.md +0 -0
  31. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/__init__.py +0 -0
  32. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_callbacks.py +0 -0
  33. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_content_image.py +0 -0
  34. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_content_pdf.py +0 -0
  35. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_display.py +0 -0
  36. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_interpolate.py +0 -0
  37. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_live_render.py +0 -0
  38. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_logging.py +0 -0
  39. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_mcp_manager.py +0 -0
  40. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_merge.py +0 -0
  41. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider.py +0 -0
  42. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_anthropic.py +0 -0
  43. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_cloudflare.py +0 -0
  44. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_databricks.py +0 -0
  45. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_deepseek.py +0 -0
  46. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_github.py +0 -0
  47. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_google.py +0 -0
  48. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_groq.py +0 -0
  49. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_huggingface.py +0 -0
  50. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_mistral.py +0 -0
  51. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_ollama.py +0 -0
  52. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_openai.py +0 -0
  53. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_openrouter.py +0 -0
  54. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_perplexity.py +0 -0
  55. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_portkey.py +0 -0
  56. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_provider_snowflake.py +0 -0
  57. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_tokens.py +0 -0
  58. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_tokens_old.py +0 -0
  59. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_turn.py +0 -0
  60. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_typing_extensions.py +0 -0
  61. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/_utils.py +0 -0
  62. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/data/prices.json +0 -0
  63. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/py.typed +0 -0
  64. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/__init__.py +0 -0
  65. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/anthropic/__init__.py +0 -0
  66. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/anthropic/_client.py +0 -0
  67. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/anthropic/_client_bedrock.py +0 -0
  68. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/google/__init__.py +0 -0
  69. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/google/_client.py +0 -0
  70. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/google/_submit.py +0 -0
  71. {chatlas-0.11.1 → chatlas-0.12.0}/chatlas/types/openai/__init__.py +0 -0
  72. {chatlas-0.11.1 → chatlas-0.12.0}/docs/.gitignore +0 -0
  73. {chatlas-0.11.1 → chatlas-0.12.0}/docs/_extensions/machow/interlinks/.gitignore +0 -0
  74. {chatlas-0.11.1 → chatlas-0.12.0}/docs/_extensions/machow/interlinks/_extension.yml +0 -0
  75. {chatlas-0.11.1 → chatlas-0.12.0}/docs/_extensions/machow/interlinks/interlinks.lua +0 -0
  76. {chatlas-0.11.1 → chatlas-0.12.0}/docs/_quarto.yml +0 -0
  77. {chatlas-0.11.1 → chatlas-0.12.0}/docs/_sidebar.yml +0 -0
  78. {chatlas-0.11.1 → chatlas-0.12.0}/docs/congressional-assets.png +0 -0
  79. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/async.qmd +0 -0
  80. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/chat.qmd +0 -0
  81. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/chatbots.qmd +0 -0
  82. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/debug.qmd +0 -0
  83. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/monitor.qmd +0 -0
  84. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/parameters.qmd +0 -0
  85. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/stream.qmd +0 -0
  86. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/structured-data.qmd +0 -0
  87. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/system-prompt.qmd +0 -0
  88. {chatlas-0.11.1 → chatlas-0.12.0}/docs/get-started/tools.qmd +0 -0
  89. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chat-app.png +0 -0
  90. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chat-console.mp4 +0 -0
  91. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chat-console.png +0 -0
  92. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chat-notebook.mp4 +0 -0
  93. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chat-parameters.png +0 -0
  94. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chatbot-gradio.png +0 -0
  95. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chatbot-shiny.png +0 -0
  96. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chatbot-streamlit.png +0 -0
  97. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chatbot-textual.png +0 -0
  98. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/chatlas-hello.png +0 -0
  99. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/client-parameters.png +0 -0
  100. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/congressional-assets.png +0 -0
  101. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/hello-chat-console.png +0 -0
  102. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/model-parameters.png +0 -0
  103. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/model-type-hints.png +0 -0
  104. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/posit-logo.png +0 -0
  105. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/shiny-mcp-run-python.png +0 -0
  106. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/shiny-tool-call-display.png +0 -0
  107. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/shiny-tool-call-map.png +0 -0
  108. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/tool-calling-right.svg +0 -0
  109. {chatlas-0.11.1 → chatlas-0.12.0}/docs/images/tool-calling-wrong.svg +0 -0
  110. {chatlas-0.11.1 → chatlas-0.12.0}/docs/index.qmd +0 -0
  111. {chatlas-0.11.1 → chatlas-0.12.0}/docs/logos/hero/hero-old.png +0 -0
  112. {chatlas-0.11.1 → chatlas-0.12.0}/docs/logos/hero/hero.png +0 -0
  113. {chatlas-0.11.1 → chatlas-0.12.0}/docs/logos/hex/logo.png +0 -0
  114. {chatlas-0.11.1 → chatlas-0.12.0}/docs/logos/small/logo.png +0 -0
  115. {chatlas-0.11.1 → chatlas-0.12.0}/docs/misc/RAG.qmd +0 -0
  116. {chatlas-0.11.1 → chatlas-0.12.0}/docs/misc/examples.qmd +0 -0
  117. {chatlas-0.11.1 → chatlas-0.12.0}/docs/misc/mcp-tools.qmd +0 -0
  118. {chatlas-0.11.1 → chatlas-0.12.0}/docs/misc/vocabulary.qmd +0 -0
  119. {chatlas-0.11.1 → chatlas-0.12.0}/docs/structured-data/article-summary.qmd +0 -0
  120. {chatlas-0.11.1 → chatlas-0.12.0}/docs/structured-data/classification.qmd +0 -0
  121. {chatlas-0.11.1 → chatlas-0.12.0}/docs/structured-data/entity-recognition.qmd +0 -0
  122. {chatlas-0.11.1 → chatlas-0.12.0}/docs/structured-data/multi-modal.qmd +0 -0
  123. {chatlas-0.11.1 → chatlas-0.12.0}/docs/structured-data/sentiment-analysis.qmd +0 -0
  124. {chatlas-0.11.1 → chatlas-0.12.0}/docs/styles.scss +0 -0
  125. {chatlas-0.11.1 → chatlas-0.12.0}/docs/tool-calling/approval.qmd +0 -0
  126. {chatlas-0.11.1 → chatlas-0.12.0}/docs/tool-calling/displays.qmd +0 -0
  127. {chatlas-0.11.1 → chatlas-0.12.0}/docs/tool-calling/how-it-works.qmd +0 -0
  128. {chatlas-0.11.1 → chatlas-0.12.0}/docs/why-chatlas.qmd +0 -0
  129. {chatlas-0.11.1 → chatlas-0.12.0}/pyproject.toml +0 -0
  130. {chatlas-0.11.1 → chatlas-0.12.0}/pytest.ini +0 -0
  131. {chatlas-0.11.1 → chatlas-0.12.0}/scripts/_generate_anthropic_types.py +0 -0
  132. {chatlas-0.11.1 → chatlas-0.12.0}/scripts/_generate_google_types.py +0 -0
  133. {chatlas-0.11.1 → chatlas-0.12.0}/scripts/_utils.py +0 -0
  134. {chatlas-0.11.1 → chatlas-0.12.0}/scripts/main.py +0 -0
  135. {chatlas-0.11.1 → chatlas-0.12.0}/tests/__init__.py +0 -0
  136. {chatlas-0.11.1 → chatlas-0.12.0}/tests/__snapshots__/test_chat.ambr +0 -0
  137. {chatlas-0.11.1 → chatlas-0.12.0}/tests/apples.pdf +0 -0
  138. {chatlas-0.11.1 → chatlas-0.12.0}/tests/images/dice.png +0 -0
  139. {chatlas-0.11.1 → chatlas-0.12.0}/tests/mcp_servers/http_add.py +0 -0
  140. {chatlas-0.11.1 → chatlas-0.12.0}/tests/mcp_servers/http_current_date.py +0 -0
  141. {chatlas-0.11.1 → chatlas-0.12.0}/tests/mcp_servers/stdio_current_date.py +0 -0
  142. {chatlas-0.11.1 → chatlas-0.12.0}/tests/mcp_servers/stdio_subtract_multiply.py +0 -0
  143. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_callbacks.py +0 -0
  144. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_chat.py +0 -0
  145. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_content.py +0 -0
  146. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_content_html.py +0 -0
  147. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_content_image.py +0 -0
  148. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_content_pdf.py +0 -0
  149. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_content_tools.py +0 -0
  150. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_interpolate.py +0 -0
  151. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_mcp_client.py +0 -0
  152. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_anthropic.py +0 -0
  153. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_azure.py +0 -0
  154. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_bedrock.py +0 -0
  155. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_cloudflare.py +0 -0
  156. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_databricks.py +0 -0
  157. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_deepseek.py +0 -0
  158. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_github.py +0 -0
  159. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_google.py +0 -0
  160. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_huggingface.py +0 -0
  161. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_mistral.py +0 -0
  162. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_openai.py +0 -0
  163. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_openrouter.py +0 -0
  164. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_portkey.py +0 -0
  165. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_provider_snowflake.py +0 -0
  166. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_register_tool_models.py +0 -0
  167. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_set_model_params.py +0 -0
  168. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_tokens.py +0 -0
  169. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_tool_from_mcp.py +0 -0
  170. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_turns.py +0 -0
  171. {chatlas-0.11.1 → chatlas-0.12.0}/tests/test_utils_merge.py +0 -0
@@ -7,5 +7,12 @@
7
7
  },
8
8
  "editor.defaultFormatter": "charliermarsh.ruff",
9
9
  },
10
- "flake8.args": ["--max-line-length=120"]
11
- }
10
+ "flake8.args": [
11
+ "--max-line-length=120"
12
+ ],
13
+ "python.testing.pytestArgs": [
14
+ "tests"
15
+ ],
16
+ "python.testing.unittestEnabled": false,
17
+ "python.testing.pytestEnabled": true
18
+ }
@@ -7,6 +7,24 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
7
7
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
8
8
  -->
9
9
 
10
+ ## [0.12.0] - 2025-09-08
11
+
12
+ ### Breaking changes
13
+
14
+ * `ChatAuto()`'s first (optional) positional parameter has changed from `system_prompt` to `provider_model`, and `system_prompt` is now a keyword parameter. As a result, you may need to change `ChatAuto("[system prompt]")` -> `ChatAuto(system_prompt="[system prompt]")`. In addition, the `provider` and `model` keyword arguments are now deprecated, but continue to work with a warning, as are the previous `CHATLAS_CHAT_PROVIDER` and `CHATLAS_CHAT_MODEL` environment variables. (#159)
15
+
16
+ ### New features
17
+
18
+ * `ChatAuto()`'s new `provider_model` takes both provider and model in a single string in the format `"{provider}/{model}"`, e.g. `"openai/gpt-5"`. If not provided, `ChatAuto()` looks for the `CHATLAS_CHAT_PROVIDER_MODEL` environment variable, defaulting to `"openai"` if neither are provided. Unlike previous versions of `ChatAuto()`, the environment variables are now used *only if function arguments are not provided*. In other words, if `provider_model` is given, the `CHATLAS_CHAT_PROVIDER_MODEL` environment variable is ignored. Similarly, `CHATLAS_CHAT_ARGS` are only used if no `kwargs` are provided. This improves interactive use cases, makes it easier to introduce application-specific environment variables, and puts more control in the hands of the developer. (#159)
19
+ * The `.register_tool()` method now:
20
+ * Accepts a `Tool` instance as input. This is primarily useful for binding things like `annotations` to the `Tool` in one place, and registering it in another. (#172)
21
+ * Supports function parameter names that start with an underscore. (#174)
22
+ * The `ToolAnnotations` type gains an `extra` key field -- providing a place for providing additional information that other consumers of tool annotations (e.g., [shinychat](https://posit-dev.github.io/shinychat/)) may make use of.
23
+
24
+ ### Bug fixes
25
+
26
+ * `ChatAuto()` now supports recently added providers such as `ChatCloudflare()`, `ChatDeepseek()`, `ChatHuggingFace()`, etc. (#159)
27
+
10
28
  ## [0.11.1] - 2025-08-29
11
29
 
12
30
  ### New features
@@ -22,7 +40,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
22
40
 
23
41
  * `.register_tool(annotations=annotations)` drops support for `mcp.types.ToolAnnotations()` and instead expects a dictionary of the same info. (#164)
24
42
 
25
-
26
43
  ## [0.11.0] - 2025-08-26
27
44
 
28
45
  ### New features
@@ -42,7 +59,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
42
59
 
43
60
  ### New features
44
61
 
45
- * Added `ChatCloudflare()` for chatting via [Cloudflare AI](https://developers.cloudflare.com/workers-ai/get-started/rest-api/). (#150)
62
+ * Added `ChatCloudflare()` for chatting via [Cloudflare AI](https://developers.cloudflare.com/workers-ai/get-started/rest-api/). (#150)
46
63
  * Added `ChatDeepSeek()` for chatting via [DeepSeek](https://www.deepseek.com/). (#147)
47
64
  * Added `ChatOpenRouter()` for chatting via [Open Router](https://openrouter.ai/). (#148)
48
65
  * Added `ChatHuggingFace()` for chatting via [Hugging Face](https://huggingface.co/). (#144)
@@ -78,7 +95,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
78
95
 
79
96
  ### New features
80
97
 
81
- * `Chat` gains a handful of new methods:
98
+ * `Chat` gains a handful of new methods:
82
99
  * `.register_mcp_tools_http_stream_async()` and `.register_mcp_tools_stdio_async()`: for registering tools from a [MCP server](https://modelcontextprotocol.io/). (#39)
83
100
  * `.get_tools()` and `.set_tools()`: for fine-grained control over registered tools. (#39)
84
101
  * `.set_model_params()`: for setting common LLM parameters in a model-agnostic fashion. (#127)
@@ -87,7 +104,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
87
104
  * Tool functions passed to `.register_tool()` can now `yield` numerous results. (#39)
88
105
  * A `ContentToolResultImage` content class was added for returning images from tools. It is currently only works with `ChatAnthropic`. (#39)
89
106
  * A `Tool` can now be constructed from a pre-existing tool schema (via a new `__init__` method). (#39)
90
- * The `Chat.app()` method gains a `host` parameter. (#122)
107
+ * The `Chat.app()` method gains a `host` parameter. (#122)
91
108
  * `ChatGithub()` now supports the more standard `GITHUB_TOKEN` environment variable for storing the API key. (#123)
92
109
 
93
110
  ### Changes
@@ -149,7 +166,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
149
166
 
150
167
  ## [0.7.1] - 2025-05-10
151
168
 
152
- * Added `openai` as a hard dependency, making installation easier for a wide range of use cases. (#91)
169
+ * Added `openai` as a hard dependency, making installation easier for a wide range of use cases. (#91)
153
170
 
154
171
  ## [0.7.0] - 2025-04-22
155
172
 
@@ -159,7 +176,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
159
176
  * `.stream()` and `.stream_async()` gain a `content` argument. Set this to `"all"` to include `ContentToolResult`/`ContentToolRequest` objects in the stream. (#75)
160
177
  * `ContentToolResult`/`ContentToolRequest` are now exported to `chatlas` namespace. (#75)
161
178
  * `ContentToolResult`/`ContentToolRequest` gain a `.tagify()` method so they render sensibly in a Shiny app. (#75)
162
- * A tool can now return a `ContentToolResult`. This is useful for:
179
+ * A tool can now return a `ContentToolResult`. This is useful for:
163
180
  * Specifying the format used for sending the tool result to the chat model (`model_format`). (#87)
164
181
  * Custom rendering of the tool result (by overriding relevant methods in a subclass). (#75)
165
182
  * `Chat` gains a new `.current_display` property. When a `.chat()` or `.stream()` is currently active, this property returns an object with a `.echo()` method (to echo new content to the display). This is primarily useful for displaying custom content during a tool call. (#79)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chatlas
3
- Version: 0.11.1
3
+ Version: 0.12.0
4
4
  Summary: A simple and consistent interface for chatting with LLMs
5
5
  Project-URL: Homepage, https://posit-dev.github.io/chatlas
6
6
  Project-URL: Documentation, https://posit-dev.github.io/chatlas
@@ -0,0 +1,290 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import warnings
5
+ from typing import Callable, Literal, Optional
6
+
7
+ import orjson
8
+
9
+ from ._chat import Chat
10
+ from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
11
+ from ._provider_cloudflare import ChatCloudflare
12
+ from ._provider_databricks import ChatDatabricks
13
+ from ._provider_deepseek import ChatDeepSeek
14
+ from ._provider_github import ChatGithub
15
+ from ._provider_google import ChatGoogle, ChatVertex
16
+ from ._provider_groq import ChatGroq
17
+ from ._provider_huggingface import ChatHuggingFace
18
+ from ._provider_mistral import ChatMistral
19
+ from ._provider_ollama import ChatOllama
20
+ from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
21
+ from ._provider_openrouter import ChatOpenRouter
22
+ from ._provider_perplexity import ChatPerplexity
23
+ from ._provider_portkey import ChatPortkey
24
+ from ._provider_snowflake import ChatSnowflake
25
+ from ._utils import MISSING_TYPE as DEPRECATED_TYPE
26
+
27
+ AutoProviders = Literal[
28
+ "anthropic",
29
+ "bedrock-anthropic",
30
+ "cloudflare",
31
+ "databricks",
32
+ "deep-seek",
33
+ "github",
34
+ "google",
35
+ "groq",
36
+ "hugging-face",
37
+ "mistral",
38
+ "ollama",
39
+ "openai",
40
+ "azure-openai",
41
+ "open-router",
42
+ "perplexity",
43
+ "portkey",
44
+ "snowflake",
45
+ "vertex",
46
+ ]
47
+
48
+ _provider_chat_model_map: dict[AutoProviders, Callable[..., Chat]] = {
49
+ "anthropic": ChatAnthropic,
50
+ "bedrock-anthropic": ChatBedrockAnthropic,
51
+ "cloudflare": ChatCloudflare,
52
+ "databricks": ChatDatabricks,
53
+ "deep-seek": ChatDeepSeek,
54
+ "github": ChatGithub,
55
+ "google": ChatGoogle,
56
+ "groq": ChatGroq,
57
+ "hugging-face": ChatHuggingFace,
58
+ "mistral": ChatMistral,
59
+ "ollama": ChatOllama,
60
+ "openai": ChatOpenAI,
61
+ "azure-openai": ChatAzureOpenAI,
62
+ "open-router": ChatOpenRouter,
63
+ "perplexity": ChatPerplexity,
64
+ "portkey": ChatPortkey,
65
+ "snowflake": ChatSnowflake,
66
+ "vertex": ChatVertex,
67
+ }
68
+
69
+ DEPRECATED = DEPRECATED_TYPE()
70
+
71
+
72
+ def ChatAuto(
73
+ provider_model: Optional[str] = None,
74
+ *,
75
+ system_prompt: Optional[str] = None,
76
+ provider: AutoProviders | DEPRECATED_TYPE = DEPRECATED,
77
+ model: str | DEPRECATED_TYPE = DEPRECATED,
78
+ **kwargs,
79
+ ) -> Chat:
80
+ """
81
+ Chat with any provider.
82
+
83
+ This is a generic interface to all the other `Chat*()` functions, allowing
84
+ you to pick the provider (and model) with a simple string.
85
+
86
+ Prerequisites
87
+ -------------
88
+
89
+ ::: {.callout-note}
90
+ ## API key
91
+
92
+ Follow the instructions for the specific provider to obtain an API key.
93
+ :::
94
+
95
+ ::: {.callout-note}
96
+ ## Python requirements
97
+
98
+ Follow the instructions for the specific provider to install the required
99
+ Python packages.
100
+ :::
101
+
102
+ Examples
103
+ --------
104
+
105
+ `ChatAuto()` makes it easy to switch between different chat providers and models.
106
+
107
+ ```python
108
+ import pandas as pd
109
+ from chatlas import ChatAuto
110
+
111
+ # Default provider (OpenAI) & model
112
+ chat = ChatAuto()
113
+ print(chat.provider.name)
114
+ print(chat.provider.model)
115
+
116
+ # Different provider (Anthropic) & default model
117
+ chat = ChatAuto("anthropic")
118
+
119
+ # List models available through the provider
120
+ models = chat.list_models()
121
+ print(pd.DataFrame(models))
122
+
123
+ # Choose specific provider/model (Claude Sonnet 4)
124
+ chat = ChatAuto("anthropic/claude-sonnet-4-0")
125
+ ```
126
+
127
+ The default provider/model can also be controlled through an environment variable:
128
+
129
+ ```bash
130
+ export CHATLAS_CHAT_PROVIDER_MODEL="anthropic/claude-sonnet-4-0"
131
+ ```
132
+
133
+ ```python
134
+ from chatlas import ChatAuto
135
+
136
+ chat = ChatAuto()
137
+ print(chat.provider.name) # anthropic
138
+ print(chat.provider.model) # claude-sonnet-4-0
139
+ ```
140
+
141
+ For application-specific configurations, consider defining your own environment variables:
142
+
143
+ ```bash
144
+ export MYAPP_PROVIDER_MODEL="google/gemini-2.5-flash"
145
+ ```
146
+
147
+ And passing them to `ChatAuto()` as an alternative way to configure the provider/model:
148
+
149
+ ```python
150
+ import os
151
+ from chatlas import ChatAuto
152
+
153
+ chat = ChatAuto(os.getenv("MYAPP_PROVIDER_MODEL"))
154
+ print(chat.provider.name) # google
155
+ print(chat.provider.model) # gemini-2.5-flash
156
+ ```
157
+
158
+ Parameters
159
+ ----------
160
+ provider_model
161
+ The name of the provider and model to use in the format
162
+ `"{provider}/{model}"`. Providers are strings formatted in kebab-case,
163
+ e.g. to use `ChatBedrockAnthropic` set `provider="bedrock-anthropic"`,
164
+ and models are the provider-specific model names, e.g.
165
+ `"claude-3-7-sonnet-20250219"`. The `/{model}` portion may also be
166
+ omitted, in which case, the default model for that provider will be
167
+ used.
168
+
169
+ If no value is provided, the `CHATLAS_CHAT_PROVIDER_MODEL` environment
170
+ variable will be consulted for a fallback value. If this variable is also
171
+ not set, a default value of `"openai"` is used.
172
+ system_prompt
173
+ A system prompt to set the behavior of the assistant.
174
+ provider
175
+ Deprecated; use `provider_model` instead.
176
+ model
177
+ Deprecated; use `provider_model` instead.
178
+ **kwargs
179
+ Additional keyword arguments to pass to the `Chat` constructor. See the
180
+ documentation for each provider for more details on the available
181
+ options.
182
+
183
+ These arguments can also be provided via the `CHATLAS_CHAT_ARGS`
184
+ environment variable as a JSON string. When any additional arguments are
185
+ provided to `ChatAuto()`, the env var is ignored.
186
+
187
+ Note that `system_prompt` and `turns` can't be set via environment variables.
188
+ They must be provided/set directly to/on `ChatAuto()`.
189
+
190
+ Note
191
+ ----
192
+ If you want to work with a specific provider, but don't know what models are
193
+ available (or the exact model name), use
194
+ `ChatAuto('provider_name').list_models()` to list available models. Another
195
+ option is to use the provider more directly (e.g., `ChatAnthropic()`). There,
196
+ the `model` parameter may have type hints for available models.
197
+
198
+ Returns
199
+ -------
200
+ Chat
201
+ A chat instance using the specified provider.
202
+
203
+ Raises
204
+ ------
205
+ ValueError
206
+ If no valid provider is specified either through parameters or
207
+ environment variables.
208
+ """
209
+ if provider is not DEPRECATED:
210
+ warn_deprecated_param("provider")
211
+
212
+ if model is not DEPRECATED:
213
+ if provider is DEPRECATED:
214
+ raise ValueError(
215
+ "The `model` parameter is deprecated and cannot be used without the `provider` parameter. "
216
+ "Use `provider_model` instead."
217
+ )
218
+ warn_deprecated_param("model")
219
+
220
+ if provider_model is None:
221
+ provider_model = os.environ.get("CHATLAS_CHAT_PROVIDER_MODEL")
222
+
223
+ # Backwards compatibility: construct from old env vars as a fallback
224
+ if provider_model is None:
225
+ env_provider = get_legacy_env_var("CHATLAS_CHAT_PROVIDER", provider)
226
+ env_model = get_legacy_env_var("CHATLAS_CHAT_MODEL", model)
227
+
228
+ if env_provider:
229
+ provider_model = env_provider
230
+ if env_model:
231
+ provider_model += f"/{env_model}"
232
+
233
+ # Fall back to OpenAI if nothing is specified
234
+ if provider_model is None:
235
+ provider_model = "openai"
236
+
237
+ if "/" in provider_model:
238
+ the_provider, the_model = provider_model.split("/", 1)
239
+ else:
240
+ the_provider, the_model = provider_model, None
241
+
242
+ if the_provider not in _provider_chat_model_map:
243
+ raise ValueError(
244
+ f"Provider name '{the_provider}' is not a known chatlas provider: "
245
+ f"{', '.join(_provider_chat_model_map.keys())}"
246
+ )
247
+
248
+ # `system_prompt`, `turns` and `model` always come from `ChatAuto()`
249
+ base_args = {
250
+ "system_prompt": system_prompt,
251
+ "turns": None,
252
+ "model": the_model,
253
+ }
254
+
255
+ # Environment kwargs, used only if no kwargs provided
256
+ env_kwargs = {}
257
+ if not kwargs:
258
+ env_kwargs = orjson.loads(os.environ.get("CHATLAS_CHAT_ARGS", "{}"))
259
+
260
+ final_kwargs = {**env_kwargs, **kwargs, **base_args}
261
+ final_kwargs = {k: v for k, v in final_kwargs.items() if v is not None}
262
+
263
+ return _provider_chat_model_map[the_provider](**final_kwargs)
264
+
265
+
266
+ def get_legacy_env_var(
267
+ env_var_name: str,
268
+ default: str | DEPRECATED_TYPE,
269
+ ) -> str | None:
270
+ env_value = os.environ.get(env_var_name)
271
+ if env_value:
272
+ warnings.warn(
273
+ f"The '{env_var_name}' environment variable is deprecated. "
274
+ "Use 'CHATLAS_CHAT_PROVIDER_MODEL' instead.",
275
+ DeprecationWarning,
276
+ stacklevel=3,
277
+ )
278
+ return env_value
279
+ elif isinstance(default, DEPRECATED_TYPE):
280
+ return None
281
+ else:
282
+ return default
283
+
284
+
285
+ def warn_deprecated_param(param_name: str, stacklevel: int = 3) -> None:
286
+ warnings.warn(
287
+ f"The '{param_name}' parameter is deprecated. Use 'provider_model' instead.",
288
+ DeprecationWarning,
289
+ stacklevel=stacklevel,
290
+ )
@@ -1535,7 +1535,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1535
1535
 
1536
1536
  def register_tool(
1537
1537
  self,
1538
- func: Callable[..., Any] | Callable[..., Awaitable[Any]],
1538
+ func: Callable[..., Any] | Callable[..., Awaitable[Any]] | Tool,
1539
1539
  *,
1540
1540
  force: bool = False,
1541
1541
  name: Optional[str] = None,
@@ -1629,6 +1629,15 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1629
1629
  ValueError
1630
1630
  If a tool with the same name already exists and `force` is `False`.
1631
1631
  """
1632
+ if isinstance(func, Tool):
1633
+ name = name or func.name
1634
+ annotations = annotations or func.annotations
1635
+ if model is not None:
1636
+ func = Tool.from_func(
1637
+ func.func, name=name, model=model, annotations=annotations
1638
+ )
1639
+ func = func.func
1640
+
1632
1641
  tool = Tool.from_func(func, name=name, model=model, annotations=annotations)
1633
1642
  if tool.name in self._tools and not force:
1634
1643
  raise ValueError(
@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING, Any, Literal, Optional, Union
6
6
  import orjson
7
7
  from pydantic import BaseModel, ConfigDict
8
8
 
9
- from ._typing_extensions import NotRequired, TypedDict
9
+ from ._typing_extensions import TypedDict
10
10
 
11
11
  if TYPE_CHECKING:
12
12
  from ._tools import Tool
@@ -24,16 +24,16 @@ class ToolAnnotations(TypedDict, total=False):
24
24
  received from untrusted servers.
25
25
  """
26
26
 
27
- title: NotRequired[str]
27
+ title: str
28
28
  """A human-readable title for the tool."""
29
29
 
30
- readOnlyHint: NotRequired[bool]
30
+ readOnlyHint: bool
31
31
  """
32
32
  If true, the tool does not modify its environment.
33
33
  Default: false
34
34
  """
35
35
 
36
- destructiveHint: NotRequired[bool]
36
+ destructiveHint: bool
37
37
  """
38
38
  If true, the tool may perform destructive updates to its environment.
39
39
  If false, the tool performs only additive updates.
@@ -41,7 +41,7 @@ class ToolAnnotations(TypedDict, total=False):
41
41
  Default: true
42
42
  """
43
43
 
44
- idempotentHint: NotRequired[bool]
44
+ idempotentHint: bool
45
45
  """
46
46
  If true, calling the tool repeatedly with the same arguments
47
47
  will have no additional effect on the its environment.
@@ -49,7 +49,7 @@ class ToolAnnotations(TypedDict, total=False):
49
49
  Default: false
50
50
  """
51
51
 
52
- openWorldHint: NotRequired[bool]
52
+ openWorldHint: bool
53
53
  """
54
54
  If true, this tool may interact with an "open world" of external
55
55
  entities. If false, the tool's domain of interaction is closed.
@@ -58,6 +58,11 @@ class ToolAnnotations(TypedDict, total=False):
58
58
  Default: true
59
59
  """
60
60
 
61
+ extra: dict[str, Any]
62
+ """
63
+ Additional metadata about the tool.
64
+ """
65
+
61
66
 
62
67
  ImageContentTypes = Literal[
63
68
  "image/png",
@@ -326,13 +326,21 @@ def func_to_basemodel(func: Callable) -> type[BaseModel]:
326
326
  )
327
327
  annotation = Any
328
328
 
329
+ # create_model() will error if the field name starts with `_` (since Pydantic
330
+ # uses this to indicate private fields). We can work around this by using an alias.
331
+ alias = None
332
+ if name.startswith("_"):
333
+ field_name, alias = (name.lstrip("_"), name)
334
+ else:
335
+ field_name, alias = (name, None)
336
+
329
337
  if param.default != inspect.Parameter.empty:
330
- field = Field(default=param.default)
338
+ field = Field(default=param.default, alias=alias)
331
339
  else:
332
- field = Field()
340
+ field = Field(alias=alias)
333
341
 
334
342
  # Add the field to our fields dict
335
- fields[name] = (annotation, field)
343
+ fields[field_name] = (annotation, field)
336
344
 
337
345
  return create_model(func.__name__, **fields)
338
346
 
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.11.1'
32
- __version_tuple__ = version_tuple = (0, 11, 1)
31
+ __version__ = version = '0.12.0'
32
+ __version_tuple__ = version_tuple = (0, 12, 0)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -3,7 +3,7 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
 
6
- from typing import Iterable, Literal, Mapping, Optional, TypedDict, Union
6
+ from typing import Iterable, Literal, Mapping, Optional, Sequence, TypedDict, Union
7
7
 
8
8
  import anthropic
9
9
  import anthropic.types.message_param
@@ -48,7 +48,7 @@ class SubmitInputArgs(TypedDict, total=False):
48
48
  str,
49
49
  ]
50
50
  service_tier: Union[Literal["auto", "standard_only"], anthropic.NotGiven]
51
- stop_sequences: Union[list[str], anthropic.NotGiven]
51
+ stop_sequences: Union[Sequence[str], anthropic.NotGiven]
52
52
  stream: Union[Literal[False], Literal[True], anthropic.NotGiven]
53
53
  system: Union[
54
54
  str,
@@ -3,14 +3,14 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
 
6
- from typing import Mapping, Optional, TypedDict, Union
6
+ from typing import Awaitable, Callable, Mapping, Optional, TypedDict, Union
7
7
 
8
8
  import httpx
9
9
  import openai
10
10
 
11
11
 
12
12
  class ChatClientArgs(TypedDict, total=False):
13
- api_key: str | None
13
+ api_key: Union[str, Callable[[], Awaitable[str]], None]
14
14
  organization: str | None
15
15
  project: str | None
16
16
  webhook_secret: str | None
@@ -2,7 +2,7 @@
2
2
  # Do not modify this file. It was generated by `scripts/generate_typed_dicts.py`.
3
3
  # ---------------------------------------------------------
4
4
 
5
- from typing import Mapping, Optional, TypedDict
5
+ from typing import Awaitable, Callable, Mapping, Optional, TypedDict, Union
6
6
 
7
7
  import httpx
8
8
  import openai
@@ -12,7 +12,7 @@ class ChatAzureClientArgs(TypedDict, total=False):
12
12
  azure_endpoint: str | None
13
13
  azure_deployment: str | None
14
14
  api_version: str | None
15
- api_key: str | None
15
+ api_key: Union[str, Callable[[], Awaitable[str]], None]
16
16
  azure_ad_token: str | None
17
17
  organization: str | None
18
18
  project: str | None
@@ -3,7 +3,7 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
 
6
- from typing import Iterable, Literal, Mapping, Optional, TypedDict, Union
6
+ from typing import Iterable, Literal, Mapping, Optional, Sequence, TypedDict, Union
7
7
 
8
8
  import openai
9
9
  import openai.types.chat.chat_completion_allowed_tool_choice_param
@@ -148,7 +148,7 @@ class SubmitInputArgs(TypedDict, total=False):
148
148
  service_tier: Union[
149
149
  Literal["auto", "default", "flex", "scale", "priority"], None, openai.NotGiven
150
150
  ]
151
- stop: Union[str, None, list[str], openai.NotGiven]
151
+ stop: Union[str, None, Sequence[str], openai.NotGiven]
152
152
  store: Union[bool, None, openai.NotGiven]
153
153
  stream: Union[Literal[False], None, Literal[True], openai.NotGiven]
154
154
  stream_options: Union[
@@ -64,12 +64,38 @@ If you're using `chatlas` inside your organisation, you'll be limited to what yo
64
64
  - `ChatOllama()`, which uses [Ollama](https://ollama.com), allows you to run models on your own computer. The biggest models you can run locally aren't as good as the state of the art hosted models, but they also don't share your data and and are effectively free.
65
65
 
66
66
 
67
- ### Auto complete
67
+ ### Model type hints
68
68
 
69
- Some providers like `ChatOpenAI()` and `ChatAnthropic()` provide autocompletion for the `model` parameter. This makes it quick and easy to find the right model id -- just enter `model=""` and you'll get a list of available models to choose from (assuming your IDE supports type hints).
69
+ Some providers like `ChatOpenAI()` and `ChatAnthropic()` provide type hints for the `model` parameter. This makes it quick and easy to find the right model id -- just enter `model=""` and you'll get a list of available models to choose from (assuming your IDE supports type hints).
70
70
 
71
71
  ![Screenshot of model autocompletion](/images/model-type-hints.png){class="shadow rounded mb-3" width="67%" }
72
72
 
73
+ ::: callout-tip
74
+ If the provider doesn't provide these type hints, try using the `.list_models()` method (mentioned below) to find available models.
75
+ :::
76
+
77
+
78
+ ### Auto provider
79
+
80
+ [`ChatAuto()`](../reference/ChatAuto.qmd) provides access to any provider/model combination through one simple string.
81
+ This makes for a nice interactive/prototyping experience, where you can quickly switch between different models and providers, and leverage `chatlas`' smart defaults:
82
+
83
+ ```python
84
+ from chatlas import ChatAuto
85
+
86
+ # Default provider (OpenAI) & model
87
+ chat = ChatAuto()
88
+ print(chat.provider.name)
89
+ print(chat.provider.model)
90
+
91
+ # Different provider (Anthropic) & default model
92
+ chat = ChatAuto("anthropic")
93
+
94
+ # Choose specific provider/model (Claude Sonnet 4)
95
+ chat = ChatAuto("anthropic/claude-sonnet-4-0")
96
+ ```
97
+
98
+
73
99
  ### Listing model info
74
100
 
75
101
  Most providers support the `.list_models()` method, which returns detailed information about all available models, including model IDs, pricing, and metadata. This is particularly useful for:
@@ -107,8 +133,3 @@ Different providers may include different metadata fields in the model informati
107
133
 
108
134
  - **`id`**: Model identifier to use with the `Chat` constructor
109
135
  - **`input`/`output`/`cached_input`**: Token pricing in USD per million tokens
110
-
111
-
112
- ### Auto provider
113
-
114
- [`ChatAuto()`](../reference/ChatAuto.qmd) is a special model provider that allows one to configure the model provider through environment variables. This is useful for having a single, simple, script that can run on any model provider, without having to change the code.
@@ -1,10 +1,9 @@
1
1
  from pathlib import Path
2
2
 
3
+ from _utils import generate_typeddict_code, write_code_to_file
3
4
  from openai import AsyncAzureOpenAI, AsyncOpenAI
4
5
  from openai.resources.chat import Completions
5
6
 
6
- from _utils import generate_typeddict_code, write_code_to_file
7
-
8
7
  types_dir = Path(__file__).parent.parent / "chatlas" / "types"
9
8
  provider_dir = types_dir / "openai"
10
9
 
@@ -28,12 +27,22 @@ init_args = generate_typeddict_code(
28
27
  excluded_fields={"self"},
29
28
  )
30
29
 
30
+
31
+ # Temporary workaround for an issue where a type like
32
+ # Callable[[], Awaitable[str]]
33
+ # is getting incorrectly transpiled as
34
+ # Callable[Awaitable[str]]
35
+ def fix_callable_types(text: str):
36
+ return text.replace("Callable[Awaitable[str]]", "Callable[[], Awaitable[str]]")
37
+
38
+
39
+ init_args = fix_callable_types(init_args)
40
+
31
41
  write_code_to_file(
32
42
  init_args,
33
43
  provider_dir / "_client.py",
34
44
  )
35
45
 
36
-
37
46
  init_args = generate_typeddict_code(
38
47
  AsyncAzureOpenAI.__init__,
39
48
  "ChatAzureClientArgs",
@@ -44,6 +53,8 @@ init_args = generate_typeddict_code(
44
53
  },
45
54
  )
46
55
 
56
+ init_args = fix_callable_types(init_args)
57
+
47
58
  write_code_to_file(
48
59
  init_args,
49
60
  provider_dir / "_client_azure.py",