llms-py 2.0.20__py3-none-any.whl → 3.0.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (207) hide show
  1. llms/__init__.py +3 -1
  2. llms/db.py +359 -0
  3. llms/{ui/Analytics.mjs → extensions/analytics/ui/index.mjs} +254 -327
  4. llms/extensions/app/README.md +20 -0
  5. llms/extensions/app/__init__.py +588 -0
  6. llms/extensions/app/db.py +540 -0
  7. llms/{ui → extensions/app/ui}/Recents.mjs +99 -73
  8. llms/{ui/Sidebar.mjs → extensions/app/ui/index.mjs} +139 -68
  9. llms/extensions/app/ui/threadStore.mjs +440 -0
  10. llms/extensions/computer/README.md +96 -0
  11. llms/extensions/computer/__init__.py +59 -0
  12. llms/extensions/computer/base.py +80 -0
  13. llms/extensions/computer/bash.py +185 -0
  14. llms/extensions/computer/computer.py +523 -0
  15. llms/extensions/computer/edit.py +299 -0
  16. llms/extensions/computer/filesystem.py +542 -0
  17. llms/extensions/computer/platform.py +461 -0
  18. llms/extensions/computer/run.py +37 -0
  19. llms/extensions/core_tools/CALCULATOR.md +32 -0
  20. llms/extensions/core_tools/__init__.py +599 -0
  21. llms/extensions/core_tools/ui/codemirror/addon/edit/closebrackets.js +201 -0
  22. llms/extensions/core_tools/ui/codemirror/addon/edit/closetag.js +185 -0
  23. llms/extensions/core_tools/ui/codemirror/addon/edit/continuelist.js +101 -0
  24. llms/extensions/core_tools/ui/codemirror/addon/edit/matchbrackets.js +160 -0
  25. llms/extensions/core_tools/ui/codemirror/addon/edit/matchtags.js +66 -0
  26. llms/extensions/core_tools/ui/codemirror/addon/edit/trailingspace.js +27 -0
  27. llms/extensions/core_tools/ui/codemirror/addon/selection/active-line.js +72 -0
  28. llms/extensions/core_tools/ui/codemirror/addon/selection/mark-selection.js +119 -0
  29. llms/extensions/core_tools/ui/codemirror/addon/selection/selection-pointer.js +98 -0
  30. llms/extensions/core_tools/ui/codemirror/codemirror.css +344 -0
  31. llms/extensions/core_tools/ui/codemirror/codemirror.js +9884 -0
  32. llms/extensions/core_tools/ui/codemirror/doc/docs.css +225 -0
  33. llms/extensions/core_tools/ui/codemirror/doc/source_sans.woff +0 -0
  34. llms/extensions/core_tools/ui/codemirror/mode/clike/clike.js +942 -0
  35. llms/extensions/core_tools/ui/codemirror/mode/javascript/index.html +118 -0
  36. llms/extensions/core_tools/ui/codemirror/mode/javascript/javascript.js +962 -0
  37. llms/extensions/core_tools/ui/codemirror/mode/javascript/typescript.html +62 -0
  38. llms/extensions/core_tools/ui/codemirror/mode/python/python.js +402 -0
  39. llms/extensions/core_tools/ui/codemirror/theme/dracula.css +40 -0
  40. llms/extensions/core_tools/ui/codemirror/theme/mocha.css +135 -0
  41. llms/extensions/core_tools/ui/index.mjs +650 -0
  42. llms/extensions/gallery/README.md +61 -0
  43. llms/extensions/gallery/__init__.py +63 -0
  44. llms/extensions/gallery/db.py +243 -0
  45. llms/extensions/gallery/ui/index.mjs +482 -0
  46. llms/extensions/katex/README.md +39 -0
  47. llms/extensions/katex/__init__.py +6 -0
  48. llms/extensions/katex/ui/README.md +125 -0
  49. llms/extensions/katex/ui/contrib/auto-render.js +338 -0
  50. llms/extensions/katex/ui/contrib/auto-render.min.js +1 -0
  51. llms/extensions/katex/ui/contrib/auto-render.mjs +244 -0
  52. llms/extensions/katex/ui/contrib/copy-tex.js +127 -0
  53. llms/extensions/katex/ui/contrib/copy-tex.min.js +1 -0
  54. llms/extensions/katex/ui/contrib/copy-tex.mjs +105 -0
  55. llms/extensions/katex/ui/contrib/mathtex-script-type.js +109 -0
  56. llms/extensions/katex/ui/contrib/mathtex-script-type.min.js +1 -0
  57. llms/extensions/katex/ui/contrib/mathtex-script-type.mjs +24 -0
  58. llms/extensions/katex/ui/contrib/mhchem.js +3213 -0
  59. llms/extensions/katex/ui/contrib/mhchem.min.js +1 -0
  60. llms/extensions/katex/ui/contrib/mhchem.mjs +3109 -0
  61. llms/extensions/katex/ui/contrib/render-a11y-string.js +887 -0
  62. llms/extensions/katex/ui/contrib/render-a11y-string.min.js +1 -0
  63. llms/extensions/katex/ui/contrib/render-a11y-string.mjs +800 -0
  64. llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.ttf +0 -0
  65. llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.woff +0 -0
  66. llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.woff2 +0 -0
  67. llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.ttf +0 -0
  68. llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.woff +0 -0
  69. llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.woff2 +0 -0
  70. llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.ttf +0 -0
  71. llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.woff +0 -0
  72. llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.woff2 +0 -0
  73. llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.ttf +0 -0
  74. llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.woff +0 -0
  75. llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.woff2 +0 -0
  76. llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.ttf +0 -0
  77. llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.woff +0 -0
  78. llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.woff2 +0 -0
  79. llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.ttf +0 -0
  80. llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.woff +0 -0
  81. llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.woff2 +0 -0
  82. llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.ttf +0 -0
  83. llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.woff +0 -0
  84. llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.woff2 +0 -0
  85. llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.ttf +0 -0
  86. llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.woff +0 -0
  87. llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.woff2 +0 -0
  88. llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.ttf +0 -0
  89. llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.woff +0 -0
  90. llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.woff2 +0 -0
  91. llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.ttf +0 -0
  92. llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.woff +0 -0
  93. llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.woff2 +0 -0
  94. llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.ttf +0 -0
  95. llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.woff +0 -0
  96. llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.woff2 +0 -0
  97. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.ttf +0 -0
  98. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.woff +0 -0
  99. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.woff2 +0 -0
  100. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.ttf +0 -0
  101. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.woff +0 -0
  102. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.woff2 +0 -0
  103. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.ttf +0 -0
  104. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.woff +0 -0
  105. llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.woff2 +0 -0
  106. llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.ttf +0 -0
  107. llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.woff +0 -0
  108. llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.woff2 +0 -0
  109. llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.ttf +0 -0
  110. llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.woff +0 -0
  111. llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.woff2 +0 -0
  112. llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.ttf +0 -0
  113. llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.woff +0 -0
  114. llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.woff2 +0 -0
  115. llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.ttf +0 -0
  116. llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.woff +0 -0
  117. llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.woff2 +0 -0
  118. llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.ttf +0 -0
  119. llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.woff +0 -0
  120. llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.woff2 +0 -0
  121. llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.ttf +0 -0
  122. llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.woff +0 -0
  123. llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.woff2 +0 -0
  124. llms/extensions/katex/ui/index.mjs +92 -0
  125. llms/extensions/katex/ui/katex-swap.css +1230 -0
  126. llms/extensions/katex/ui/katex-swap.min.css +1 -0
  127. llms/extensions/katex/ui/katex.css +1230 -0
  128. llms/extensions/katex/ui/katex.js +19080 -0
  129. llms/extensions/katex/ui/katex.min.css +1 -0
  130. llms/extensions/katex/ui/katex.min.js +1 -0
  131. llms/extensions/katex/ui/katex.min.mjs +1 -0
  132. llms/extensions/katex/ui/katex.mjs +18547 -0
  133. llms/extensions/providers/__init__.py +22 -0
  134. llms/extensions/providers/anthropic.py +260 -0
  135. llms/extensions/providers/cerebras.py +36 -0
  136. llms/extensions/providers/chutes.py +153 -0
  137. llms/extensions/providers/google.py +559 -0
  138. llms/extensions/providers/nvidia.py +103 -0
  139. llms/extensions/providers/openai.py +154 -0
  140. llms/extensions/providers/openrouter.py +74 -0
  141. llms/extensions/providers/zai.py +182 -0
  142. llms/extensions/skills/LICENSE +202 -0
  143. llms/extensions/skills/__init__.py +130 -0
  144. llms/extensions/skills/errors.py +25 -0
  145. llms/extensions/skills/models.py +39 -0
  146. llms/extensions/skills/parser.py +178 -0
  147. llms/extensions/skills/ui/index.mjs +376 -0
  148. llms/extensions/skills/ui/skills/create-plan/SKILL.md +74 -0
  149. llms/extensions/skills/validator.py +177 -0
  150. llms/extensions/system_prompts/README.md +22 -0
  151. llms/extensions/system_prompts/__init__.py +45 -0
  152. llms/extensions/system_prompts/ui/index.mjs +276 -0
  153. llms/extensions/system_prompts/ui/prompts.json +1067 -0
  154. llms/extensions/tools/__init__.py +67 -0
  155. llms/extensions/tools/ui/index.mjs +837 -0
  156. llms/index.html +36 -62
  157. llms/llms.json +180 -879
  158. llms/main.py +4009 -912
  159. llms/providers-extra.json +394 -0
  160. llms/providers.json +1 -0
  161. llms/ui/App.mjs +176 -8
  162. llms/ui/ai.mjs +156 -20
  163. llms/ui/app.css +3768 -321
  164. llms/ui/ctx.mjs +459 -0
  165. llms/ui/index.mjs +131 -0
  166. llms/ui/lib/chart.js +14 -0
  167. llms/ui/lib/charts.mjs +16 -0
  168. llms/ui/lib/color.js +14 -0
  169. llms/ui/lib/highlight.min.mjs +1243 -0
  170. llms/ui/lib/idb.min.mjs +8 -0
  171. llms/ui/lib/marked.min.mjs +8 -0
  172. llms/ui/lib/servicestack-client.mjs +1 -0
  173. llms/ui/lib/servicestack-vue.mjs +37 -0
  174. llms/ui/lib/vue-router.min.mjs +6 -0
  175. llms/ui/lib/vue.min.mjs +13 -0
  176. llms/ui/lib/vue.mjs +18530 -0
  177. llms/ui/markdown.mjs +25 -14
  178. llms/ui/modules/chat/ChatBody.mjs +1156 -0
  179. llms/ui/{SettingsDialog.mjs → modules/chat/SettingsDialog.mjs} +74 -74
  180. llms/ui/modules/chat/index.mjs +995 -0
  181. llms/ui/modules/icons.mjs +46 -0
  182. llms/ui/modules/layout.mjs +271 -0
  183. llms/ui/modules/model-selector.mjs +811 -0
  184. llms/ui/tailwind.input.css +560 -78
  185. llms/ui/typography.css +54 -36
  186. llms/ui/utils.mjs +221 -92
  187. llms_py-3.0.18.dist-info/METADATA +49 -0
  188. llms_py-3.0.18.dist-info/RECORD +194 -0
  189. {llms_py-2.0.20.dist-info → llms_py-3.0.18.dist-info}/WHEEL +1 -1
  190. {llms_py-2.0.20.dist-info → llms_py-3.0.18.dist-info}/licenses/LICENSE +1 -2
  191. llms/ui/Avatar.mjs +0 -28
  192. llms/ui/Brand.mjs +0 -34
  193. llms/ui/ChatPrompt.mjs +0 -443
  194. llms/ui/Main.mjs +0 -740
  195. llms/ui/ModelSelector.mjs +0 -60
  196. llms/ui/ProviderIcon.mjs +0 -29
  197. llms/ui/ProviderStatus.mjs +0 -105
  198. llms/ui/SignIn.mjs +0 -64
  199. llms/ui/SystemPromptEditor.mjs +0 -31
  200. llms/ui/SystemPromptSelector.mjs +0 -36
  201. llms/ui/Welcome.mjs +0 -8
  202. llms/ui/threadStore.mjs +0 -524
  203. llms/ui.json +0 -1069
  204. llms_py-2.0.20.dist-info/METADATA +0 -931
  205. llms_py-2.0.20.dist-info/RECORD +0 -36
  206. {llms_py-2.0.20.dist-info → llms_py-3.0.18.dist-info}/entry_points.txt +0 -0
  207. {llms_py-2.0.20.dist-info → llms_py-3.0.18.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,22 @@
1
+ from .anthropic import install_anthropic
2
+ from .cerebras import install_cerebras
3
+ from .chutes import install_chutes
4
+ from .google import install_google
5
+ from .nvidia import install_nvidia
6
+ from .openai import install_openai
7
+ from .openrouter import install_openrouter
8
+ from .zai import install_zai
9
+
10
+
11
+ def install(ctx):
12
+ install_anthropic(ctx)
13
+ install_cerebras(ctx)
14
+ install_chutes(ctx)
15
+ install_google(ctx)
16
+ install_nvidia(ctx)
17
+ install_openai(ctx)
18
+ install_openrouter(ctx)
19
+ install_zai(ctx)
20
+
21
+
22
+ __install__ = install
@@ -0,0 +1,260 @@
1
+ import json
2
+ import time
3
+
4
+ import aiohttp
5
+
6
+
7
+ def install_anthropic(ctx):
8
+ from llms.main import OpenAiCompatible
9
+
10
+ class AnthropicProvider(OpenAiCompatible):
11
+ sdk = "@ai-sdk/anthropic"
12
+
13
+ def __init__(self, **kwargs):
14
+ if "api" not in kwargs:
15
+ kwargs["api"] = "https://api.anthropic.com/v1"
16
+ super().__init__(**kwargs)
17
+
18
+ # Anthropic uses x-api-key header instead of Authorization
19
+ if self.api_key:
20
+ self.headers = self.headers.copy()
21
+ if "Authorization" in self.headers:
22
+ del self.headers["Authorization"]
23
+ self.headers["x-api-key"] = self.api_key
24
+
25
+ if "anthropic-version" not in self.headers:
26
+ self.headers = self.headers.copy()
27
+ self.headers["anthropic-version"] = "2023-06-01"
28
+ self.chat_url = f"{self.api}/messages"
29
+
30
+ async def chat(self, chat, context=None):
31
+ chat["model"] = self.provider_model(chat["model"]) or chat["model"]
32
+
33
+ chat = await self.process_chat(chat, provider_id=self.id)
34
+
35
+ # Transform OpenAI format to Anthropic format
36
+ anthropic_request = {
37
+ "model": chat["model"],
38
+ "messages": [],
39
+ }
40
+
41
+ # Extract system message (Anthropic uses top-level 'system' parameter)
42
+ system_messages = []
43
+ for message in chat.get("messages", []):
44
+ if message.get("role") == "system":
45
+ content = message.get("content", "")
46
+ if isinstance(content, str):
47
+ system_messages.append(content)
48
+ elif isinstance(content, list):
49
+ for item in content:
50
+ if item.get("type") == "text":
51
+ system_messages.append(item.get("text", ""))
52
+
53
+ if system_messages:
54
+ anthropic_request["system"] = "\n".join(system_messages)
55
+
56
+ # Transform messages (exclude system messages)
57
+ for message in chat.get("messages", []):
58
+ if message.get("role") == "system":
59
+ continue
60
+
61
+ if message.get("role") == "tool":
62
+ # Convert OpenAI tool response to Anthropic tool_result
63
+ tool_call_id = message.get("tool_call_id")
64
+ content = ctx.to_content(message.get("content", ""))
65
+ if not isinstance(content, (str, list)):
66
+ content = str(content)
67
+
68
+ tool_result = {"type": "tool_result", "tool_use_id": tool_call_id, "content": content}
69
+
70
+ # Anthropic requires tool results to be in a user message
71
+ # Check if the last message was a user message, if so append to it
72
+ if anthropic_request["messages"] and anthropic_request["messages"][-1]["role"] == "user":
73
+ anthropic_request["messages"][-1]["content"].append(tool_result)
74
+ else:
75
+ anthropic_request["messages"].append({"role": "user", "content": [tool_result]})
76
+ continue
77
+
78
+ anthropic_message = {"role": message.get("role"), "content": []}
79
+
80
+ # Handle interleaved thinking (must always be a list if present)
81
+ if "thinking" in message and message["thinking"]:
82
+ anthropic_message["content"].append({"type": "thinking", "thinking": message["thinking"]})
83
+
84
+ content = message.get("content", "")
85
+ if isinstance(content, str):
86
+ if anthropic_message["content"] or message.get("tool_calls"):
87
+ # If we have thinking or tools, we must use blocks for text
88
+ if content:
89
+ anthropic_message["content"].append({"type": "text", "text": content})
90
+ else:
91
+ anthropic_message["content"] = content
92
+ elif isinstance(content, list):
93
+ for item in content:
94
+ if item.get("type") == "text":
95
+ anthropic_message["content"].append({"type": "text", "text": item.get("text", "")})
96
+ elif item.get("type") == "image_url" and "image_url" in item:
97
+ # Transform OpenAI image_url format to Anthropic format
98
+ image_url = item["image_url"].get("url", "")
99
+ if image_url.startswith("data:"):
100
+ # Extract media type and base64 data
101
+ parts = image_url.split(";base64,", 1)
102
+ if len(parts) == 2:
103
+ media_type = parts[0].replace("data:", "")
104
+ base64_data = parts[1]
105
+ anthropic_message["content"].append(
106
+ {
107
+ "type": "image",
108
+ "source": {"type": "base64", "media_type": media_type, "data": base64_data},
109
+ }
110
+ )
111
+
112
+ # Handle tool_calls
113
+ if "tool_calls" in message and message["tool_calls"]:
114
+ # specific check for content being a string and not empty, because we might have converted it above
115
+ if isinstance(anthropic_message["content"], str):
116
+ anthropic_message["content"] = []
117
+ if content:
118
+ anthropic_message["content"].append({"type": "text", "text": content})
119
+
120
+ for tool_call in message["tool_calls"]:
121
+ function = tool_call.get("function", {})
122
+ tool_use = {
123
+ "type": "tool_use",
124
+ "id": tool_call.get("id"),
125
+ "name": function.get("name"),
126
+ "input": json.loads(function.get("arguments", "{}")),
127
+ }
128
+ anthropic_message["content"].append(tool_use)
129
+
130
+ anthropic_request["messages"].append(anthropic_message)
131
+
132
+ # Handle max_tokens (required by Anthropic, uses max_tokens not max_completion_tokens)
133
+ if "max_completion_tokens" in chat:
134
+ anthropic_request["max_tokens"] = chat["max_completion_tokens"]
135
+ elif "max_tokens" in chat:
136
+ anthropic_request["max_tokens"] = chat["max_tokens"]
137
+ else:
138
+ # Anthropic requires max_tokens, set a default
139
+ anthropic_request["max_tokens"] = 4096
140
+
141
+ # Copy other supported parameters
142
+ if "temperature" in chat:
143
+ anthropic_request["temperature"] = chat["temperature"]
144
+ if "top_p" in chat:
145
+ anthropic_request["top_p"] = chat["top_p"]
146
+ if "top_k" in chat:
147
+ anthropic_request["top_k"] = chat["top_k"]
148
+ if "stop" in chat:
149
+ anthropic_request["stop_sequences"] = chat["stop"] if isinstance(chat["stop"], list) else [chat["stop"]]
150
+ if "stream" in chat:
151
+ anthropic_request["stream"] = chat["stream"]
152
+ if "tools" in chat:
153
+ anthropic_tools = []
154
+ for tool in chat["tools"]:
155
+ if tool.get("type") == "function":
156
+ function = tool.get("function", {})
157
+ anthropic_tool = {
158
+ "name": function.get("name"),
159
+ "description": function.get("description"),
160
+ "input_schema": function.get("parameters"),
161
+ }
162
+ anthropic_tools.append(anthropic_tool)
163
+ if anthropic_tools:
164
+ anthropic_request["tools"] = anthropic_tools
165
+ if "tool_choice" in chat:
166
+ anthropic_request["tool_choice"] = chat["tool_choice"]
167
+
168
+ ctx.log(f"POST {self.chat_url}")
169
+ ctx.log(json.dumps(anthropic_request, indent=2))
170
+
171
+ async with aiohttp.ClientSession() as session:
172
+ started_at = time.time()
173
+ async with session.post(
174
+ self.chat_url,
175
+ headers=self.headers,
176
+ data=json.dumps(anthropic_request),
177
+ timeout=aiohttp.ClientTimeout(total=120),
178
+ ) as response:
179
+ return ctx.log_json(
180
+ self.to_response(await self.response_json(response), chat, started_at, context=context)
181
+ )
182
+
183
+ def to_response(self, response, chat, started_at, context=None):
184
+ """Convert Anthropic response format to OpenAI-compatible format."""
185
+ if context is not None:
186
+ context["providerResponse"] = response
187
+ # Transform Anthropic response to OpenAI format
188
+ ret = {
189
+ "id": response.get("id", ""),
190
+ "object": "chat.completion",
191
+ "created": int(started_at),
192
+ "model": response.get("model", ""),
193
+ "choices": [],
194
+ "usage": {},
195
+ }
196
+
197
+ # Transform content blocks to message content
198
+ content_parts = []
199
+ thinking_parts = []
200
+ tool_calls = []
201
+
202
+ for block in response.get("content", []):
203
+ if block.get("type") == "text":
204
+ content_parts.append(block.get("text", ""))
205
+ elif block.get("type") == "thinking":
206
+ # Store thinking blocks separately (some models include reasoning)
207
+ thinking_parts.append(block.get("thinking", ""))
208
+ elif block.get("type") == "tool_use":
209
+ tool_call = {
210
+ "id": block.get("id"),
211
+ "type": "function",
212
+ "function": {
213
+ "name": block.get("name"),
214
+ "arguments": json.dumps(block.get("input", {})),
215
+ },
216
+ }
217
+ tool_calls.append(tool_call)
218
+
219
+ # Combine all text content
220
+ message_content = "\n".join(content_parts) if content_parts else ""
221
+
222
+ # Create the choice object
223
+ choice = {
224
+ "index": 0,
225
+ "message": {"role": "assistant", "content": message_content},
226
+ "finish_reason": response.get("stop_reason", "stop"),
227
+ }
228
+
229
+ # Add thinking as metadata if present
230
+ if thinking_parts:
231
+ choice["message"]["thinking"] = "\n".join(thinking_parts)
232
+
233
+ # Add tool_calls if present
234
+ if tool_calls:
235
+ choice["message"]["tool_calls"] = tool_calls
236
+
237
+ ret["choices"].append(choice)
238
+
239
+ # Transform usage
240
+ if "usage" in response:
241
+ usage = response["usage"]
242
+ ret["usage"] = {
243
+ "prompt_tokens": usage.get("input_tokens", 0),
244
+ "completion_tokens": usage.get("output_tokens", 0),
245
+ "total_tokens": usage.get("input_tokens", 0) + usage.get("output_tokens", 0),
246
+ }
247
+
248
+ # Add metadata
249
+ if "metadata" not in ret:
250
+ ret["metadata"] = {}
251
+ ret["metadata"]["duration"] = int(time.time() - started_at)
252
+
253
+ if chat is not None and "model" in chat:
254
+ cost = self.model_cost(chat["model"])
255
+ if cost and "input" in cost and "output" in cost:
256
+ ret["metadata"]["pricing"] = f"{cost['input']}/{cost['output']}"
257
+
258
+ return ret
259
+
260
+ ctx.add_provider(AnthropicProvider)
@@ -0,0 +1,36 @@
1
+ def install_cerebras(ctx):
2
+ from llms.main import OpenAiCompatible
3
+
4
+ class CerebrasProvider(OpenAiCompatible):
5
+ sdk = "@ai-sdk/cerebras"
6
+
7
+ def __init__(self, **kwargs):
8
+ if "api" not in kwargs:
9
+ kwargs["api"] = "https://api.cerebras.ai/v1"
10
+ super().__init__(**kwargs)
11
+
12
+ async def chat(self, chat, context=None):
13
+ # Cerebras only supports string content for text-only models
14
+ clean_chat = chat.copy()
15
+ clean_chat["messages"] = []
16
+ for msg in chat.get("messages", []):
17
+ new_msg = msg.copy()
18
+ content = msg.get("content")
19
+ if isinstance(content, list):
20
+ # Check if text only
21
+ is_text_only = True
22
+ text_parts = []
23
+ for part in content:
24
+ if part.get("type") != "text":
25
+ is_text_only = False
26
+ break
27
+ text_parts.append(part.get("text", ""))
28
+
29
+ if is_text_only:
30
+ new_msg["content"] = "".join(text_parts)
31
+ clean_chat["messages"].append(new_msg)
32
+
33
+ clean_chat.pop("modalities", None)
34
+ return await super().chat(clean_chat, context)
35
+
36
+ ctx.add_provider(CerebrasProvider)
@@ -0,0 +1,153 @@
1
+ import json
2
+ import mimetypes
3
+ import time
4
+
5
+ import aiohttp
6
+
7
+
8
+ def install_chutes(ctx):
9
+ from llms.main import GeneratorBase
10
+
11
+ class ChutesImage(GeneratorBase):
12
+ sdk = "chutes/image"
13
+
14
+ def __init__(self, **kwargs):
15
+ super().__init__(**kwargs)
16
+ self.width = int(kwargs.get("width", 1024))
17
+ self.height = int(kwargs.get("height", 1024))
18
+ self.cfg_scale = float(kwargs.get("cfg_scale", 7.5))
19
+ self.steps = int(kwargs.get("steps", 50))
20
+ self.negative_prompt = kwargs.get("negative_prompt", "blur, distortion, low quality")
21
+ self.gen_url = kwargs.get("api", "https://image.chutes.ai/generate")
22
+ self.model_resolutions = {
23
+ "chutes-hidream": {
24
+ "1:1": "1024x1024",
25
+ "9:16": "768x1360",
26
+ "16:9": "1360x768",
27
+ "3:4": "880x1168",
28
+ "4:3": "1168x880",
29
+ "2:3": "832x1248",
30
+ "3:2": "1248x832",
31
+ }
32
+ }
33
+ self.model_sizes = ["chutes-hunyuan-image-3"]
34
+ self.model_negative_prompt = [
35
+ "chroma",
36
+ "qwen-image-edit-2509",
37
+ "JuggernautXL-Ragnarok",
38
+ "JuggernautXL",
39
+ "Animij",
40
+ "iLustMix",
41
+ ]
42
+
43
+ async def chat(self, chat, provider=None, context=None):
44
+ headers = {"Authorization": f"Bearer {self.api_key}"}
45
+ if provider is not None:
46
+ headers["Authorization"] = f"Bearer {provider.api_key}"
47
+ chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
48
+
49
+ aspect_ratio = "1:1"
50
+ if "messages" in chat and len(chat["messages"]) > 0:
51
+ aspect_ratio = chat["messages"][0].get("aspect_ratio", "1:1")
52
+ cfg_scale = self.cfg_scale
53
+ steps = self.steps
54
+ width = self.width
55
+ height = self.height
56
+ if chat["model"] == "chutes-z-image-turbo":
57
+ cfg_scale = min(self.cfg_scale, 5)
58
+ payload = {
59
+ "model": chat["model"],
60
+ "prompt": ctx.last_user_prompt(chat),
61
+ "guidance_scale": cfg_scale,
62
+ "width": width,
63
+ "height": height,
64
+ "num_inference_steps": steps,
65
+ }
66
+ if chat["model"] in self.model_negative_prompt:
67
+ payload["negative_prompt"] = self.negative_prompt
68
+
69
+ aspect_ratio = ctx.chat_to_aspect_ratio(chat) or "1:1"
70
+ dimension = ctx.app.aspect_ratios.get(aspect_ratio)
71
+ if dimension:
72
+ w, h = dimension.split("×")
73
+ width, height = int(w), int(h)
74
+ payload["width"] = width
75
+ payload["height"] = height
76
+
77
+ if chat["model"] in self.model_resolutions:
78
+ # if models use resolution, remove width and height
79
+ del payload["width"]
80
+ del payload["height"]
81
+ resolution = self.model_resolutions[chat["model"]][aspect_ratio]
82
+ payload["resolution"] = resolution
83
+ elif chat["model"] in self.model_sizes:
84
+ del payload["width"]
85
+ del payload["height"]
86
+ payload["size"] = aspect_ratio
87
+
88
+ gen_url = self.gen_url
89
+ if chat["model"].startswith("chutes-"):
90
+ model = payload["model"]
91
+ gen_url = f"https://{model}.chutes.ai/generate"
92
+ del payload["model"]
93
+
94
+ ctx.log(f"POST {gen_url}")
95
+ ctx.log(json.dumps(payload, indent=2))
96
+ async with aiohttp.ClientSession() as session, session.post(
97
+ gen_url, headers=headers, json=payload
98
+ ) as response:
99
+ if response.status < 300:
100
+ image_data = await response.read()
101
+ content_type = response.headers.get("Content-Type")
102
+ if content_type:
103
+ ext = mimetypes.guess_extension(content_type)
104
+ if ext:
105
+ ext = ext.lstrip(".") # remove leading dot
106
+ if not ext:
107
+ ext = "png"
108
+
109
+ relative_url, info = ctx.save_image_to_cache(
110
+ image_data,
111
+ f"{chat['model']}.{ext}",
112
+ ctx.to_file_info(
113
+ chat,
114
+ {
115
+ "aspect_ratio": aspect_ratio,
116
+ "width": width,
117
+ "height": height,
118
+ "cfg_scale": cfg_scale,
119
+ "steps": steps,
120
+ },
121
+ ),
122
+ )
123
+ return {
124
+ "choices": [
125
+ {
126
+ "message": {
127
+ "role": "assistant",
128
+ "content": self.default_content,
129
+ "images": [
130
+ {
131
+ "type": "image_url",
132
+ "image_url": {
133
+ "url": relative_url,
134
+ },
135
+ }
136
+ ],
137
+ }
138
+ }
139
+ ],
140
+ "created": int(time.time()),
141
+ }
142
+ else:
143
+ text = await response.text()
144
+ try:
145
+ data = json.loads(text)
146
+ ctx.log(data)
147
+ if "detail" in data:
148
+ raise Exception(data["detail"])
149
+ except json.JSONDecodeError:
150
+ pass
151
+ raise Exception(f"Failed to generate image {response.status}")
152
+
153
+ ctx.add_provider(ChutesImage)