llms-py 2.0.20__py3-none-any.whl → 3.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llms/__init__.py +3 -1
- llms/db.py +359 -0
- llms/{ui/Analytics.mjs → extensions/analytics/ui/index.mjs} +254 -327
- llms/extensions/app/README.md +20 -0
- llms/extensions/app/__init__.py +589 -0
- llms/extensions/app/db.py +536 -0
- llms/{ui → extensions/app/ui}/Recents.mjs +99 -73
- llms/{ui/Sidebar.mjs → extensions/app/ui/index.mjs} +139 -68
- llms/extensions/app/ui/threadStore.mjs +433 -0
- llms/extensions/core_tools/CALCULATOR.md +32 -0
- llms/extensions/core_tools/__init__.py +637 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/closebrackets.js +201 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/closetag.js +185 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/continuelist.js +101 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/matchbrackets.js +160 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/matchtags.js +66 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/trailingspace.js +27 -0
- llms/extensions/core_tools/ui/codemirror/addon/selection/active-line.js +72 -0
- llms/extensions/core_tools/ui/codemirror/addon/selection/mark-selection.js +119 -0
- llms/extensions/core_tools/ui/codemirror/addon/selection/selection-pointer.js +98 -0
- llms/extensions/core_tools/ui/codemirror/codemirror.css +344 -0
- llms/extensions/core_tools/ui/codemirror/codemirror.js +9884 -0
- llms/extensions/core_tools/ui/codemirror/doc/docs.css +225 -0
- llms/extensions/core_tools/ui/codemirror/doc/source_sans.woff +0 -0
- llms/extensions/core_tools/ui/codemirror/mode/clike/clike.js +942 -0
- llms/extensions/core_tools/ui/codemirror/mode/javascript/index.html +118 -0
- llms/extensions/core_tools/ui/codemirror/mode/javascript/javascript.js +962 -0
- llms/extensions/core_tools/ui/codemirror/mode/javascript/typescript.html +62 -0
- llms/extensions/core_tools/ui/codemirror/mode/python/python.js +402 -0
- llms/extensions/core_tools/ui/codemirror/theme/dracula.css +40 -0
- llms/extensions/core_tools/ui/codemirror/theme/mocha.css +135 -0
- llms/extensions/core_tools/ui/index.mjs +650 -0
- llms/extensions/gallery/README.md +61 -0
- llms/extensions/gallery/__init__.py +63 -0
- llms/extensions/gallery/db.py +243 -0
- llms/extensions/gallery/ui/index.mjs +482 -0
- llms/extensions/katex/README.md +39 -0
- llms/extensions/katex/__init__.py +6 -0
- llms/extensions/katex/ui/README.md +125 -0
- llms/extensions/katex/ui/contrib/auto-render.js +338 -0
- llms/extensions/katex/ui/contrib/auto-render.min.js +1 -0
- llms/extensions/katex/ui/contrib/auto-render.mjs +244 -0
- llms/extensions/katex/ui/contrib/copy-tex.js +127 -0
- llms/extensions/katex/ui/contrib/copy-tex.min.js +1 -0
- llms/extensions/katex/ui/contrib/copy-tex.mjs +105 -0
- llms/extensions/katex/ui/contrib/mathtex-script-type.js +109 -0
- llms/extensions/katex/ui/contrib/mathtex-script-type.min.js +1 -0
- llms/extensions/katex/ui/contrib/mathtex-script-type.mjs +24 -0
- llms/extensions/katex/ui/contrib/mhchem.js +3213 -0
- llms/extensions/katex/ui/contrib/mhchem.min.js +1 -0
- llms/extensions/katex/ui/contrib/mhchem.mjs +3109 -0
- llms/extensions/katex/ui/contrib/render-a11y-string.js +887 -0
- llms/extensions/katex/ui/contrib/render-a11y-string.min.js +1 -0
- llms/extensions/katex/ui/contrib/render-a11y-string.mjs +800 -0
- llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.woff2 +0 -0
- llms/extensions/katex/ui/index.mjs +92 -0
- llms/extensions/katex/ui/katex-swap.css +1230 -0
- llms/extensions/katex/ui/katex-swap.min.css +1 -0
- llms/extensions/katex/ui/katex.css +1230 -0
- llms/extensions/katex/ui/katex.js +19080 -0
- llms/extensions/katex/ui/katex.min.css +1 -0
- llms/extensions/katex/ui/katex.min.js +1 -0
- llms/extensions/katex/ui/katex.min.mjs +1 -0
- llms/extensions/katex/ui/katex.mjs +18547 -0
- llms/extensions/providers/__init__.py +22 -0
- llms/extensions/providers/anthropic.py +233 -0
- llms/extensions/providers/cerebras.py +37 -0
- llms/extensions/providers/chutes.py +153 -0
- llms/extensions/providers/google.py +481 -0
- llms/extensions/providers/nvidia.py +103 -0
- llms/extensions/providers/openai.py +154 -0
- llms/extensions/providers/openrouter.py +74 -0
- llms/extensions/providers/zai.py +182 -0
- llms/extensions/system_prompts/README.md +22 -0
- llms/extensions/system_prompts/__init__.py +45 -0
- llms/extensions/system_prompts/ui/index.mjs +280 -0
- llms/extensions/system_prompts/ui/prompts.json +1067 -0
- llms/extensions/tools/__init__.py +144 -0
- llms/extensions/tools/ui/index.mjs +706 -0
- llms/index.html +36 -62
- llms/llms.json +180 -879
- llms/main.py +3640 -899
- llms/providers-extra.json +394 -0
- llms/providers.json +1 -0
- llms/ui/App.mjs +176 -8
- llms/ui/ai.mjs +156 -20
- llms/ui/app.css +3161 -244
- llms/ui/ctx.mjs +412 -0
- llms/ui/index.mjs +131 -0
- llms/ui/lib/chart.js +14 -0
- llms/ui/lib/charts.mjs +16 -0
- llms/ui/lib/color.js +14 -0
- llms/ui/lib/highlight.min.mjs +1243 -0
- llms/ui/lib/idb.min.mjs +8 -0
- llms/ui/lib/marked.min.mjs +8 -0
- llms/ui/lib/servicestack-client.mjs +1 -0
- llms/ui/lib/servicestack-vue.mjs +37 -0
- llms/ui/lib/vue-router.min.mjs +6 -0
- llms/ui/lib/vue.min.mjs +13 -0
- llms/ui/lib/vue.mjs +18530 -0
- llms/ui/markdown.mjs +25 -14
- llms/ui/modules/chat/ChatBody.mjs +976 -0
- llms/ui/{SettingsDialog.mjs → modules/chat/SettingsDialog.mjs} +74 -74
- llms/ui/modules/chat/index.mjs +991 -0
- llms/ui/modules/icons.mjs +46 -0
- llms/ui/modules/layout.mjs +271 -0
- llms/ui/modules/model-selector.mjs +811 -0
- llms/ui/tailwind.input.css +550 -78
- llms/ui/typography.css +54 -36
- llms/ui/utils.mjs +197 -92
- llms_py-3.0.10.dist-info/METADATA +49 -0
- llms_py-3.0.10.dist-info/RECORD +177 -0
- {llms_py-2.0.20.dist-info → llms_py-3.0.10.dist-info}/licenses/LICENSE +1 -2
- llms/ui/Avatar.mjs +0 -28
- llms/ui/Brand.mjs +0 -34
- llms/ui/ChatPrompt.mjs +0 -443
- llms/ui/Main.mjs +0 -740
- llms/ui/ModelSelector.mjs +0 -60
- llms/ui/ProviderIcon.mjs +0 -29
- llms/ui/ProviderStatus.mjs +0 -105
- llms/ui/SignIn.mjs +0 -64
- llms/ui/SystemPromptEditor.mjs +0 -31
- llms/ui/SystemPromptSelector.mjs +0 -36
- llms/ui/Welcome.mjs +0 -8
- llms/ui/threadStore.mjs +0 -524
- llms/ui.json +0 -1069
- llms_py-2.0.20.dist-info/METADATA +0 -931
- llms_py-2.0.20.dist-info/RECORD +0 -36
- {llms_py-2.0.20.dist-info → llms_py-3.0.10.dist-info}/WHEEL +0 -0
- {llms_py-2.0.20.dist-info → llms_py-3.0.10.dist-info}/entry_points.txt +0 -0
- {llms_py-2.0.20.dist-info → llms_py-3.0.10.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from .anthropic import install_anthropic
|
|
2
|
+
from .cerebras import install_cerebras
|
|
3
|
+
from .chutes import install_chutes
|
|
4
|
+
from .google import install_google
|
|
5
|
+
from .nvidia import install_nvidia
|
|
6
|
+
from .openai import install_openai
|
|
7
|
+
from .openrouter import install_openrouter
|
|
8
|
+
from .zai import install_zai
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def install(ctx):
|
|
12
|
+
install_anthropic(ctx)
|
|
13
|
+
install_cerebras(ctx)
|
|
14
|
+
install_chutes(ctx)
|
|
15
|
+
install_google(ctx)
|
|
16
|
+
install_nvidia(ctx)
|
|
17
|
+
install_openai(ctx)
|
|
18
|
+
install_openrouter(ctx)
|
|
19
|
+
install_zai(ctx)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
__install__ = install
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
|
|
4
|
+
import aiohttp
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def install_anthropic(ctx):
|
|
8
|
+
from llms.main import OpenAiCompatible
|
|
9
|
+
|
|
10
|
+
class AnthropicProvider(OpenAiCompatible):
|
|
11
|
+
sdk = "@ai-sdk/anthropic"
|
|
12
|
+
|
|
13
|
+
def __init__(self, **kwargs):
|
|
14
|
+
if "api" not in kwargs:
|
|
15
|
+
kwargs["api"] = "https://api.anthropic.com/v1"
|
|
16
|
+
super().__init__(**kwargs)
|
|
17
|
+
|
|
18
|
+
# Anthropic uses x-api-key header instead of Authorization
|
|
19
|
+
if self.api_key:
|
|
20
|
+
self.headers = self.headers.copy()
|
|
21
|
+
if "Authorization" in self.headers:
|
|
22
|
+
del self.headers["Authorization"]
|
|
23
|
+
self.headers["x-api-key"] = self.api_key
|
|
24
|
+
|
|
25
|
+
if "anthropic-version" not in self.headers:
|
|
26
|
+
self.headers = self.headers.copy()
|
|
27
|
+
self.headers["anthropic-version"] = "2023-06-01"
|
|
28
|
+
self.chat_url = f"{self.api}/messages"
|
|
29
|
+
|
|
30
|
+
async def chat(self, chat, context=None):
|
|
31
|
+
chat["model"] = self.provider_model(chat["model"]) or chat["model"]
|
|
32
|
+
|
|
33
|
+
chat = await self.process_chat(chat, provider_id=self.id)
|
|
34
|
+
|
|
35
|
+
# Transform OpenAI format to Anthropic format
|
|
36
|
+
anthropic_request = {
|
|
37
|
+
"model": chat["model"],
|
|
38
|
+
"messages": [],
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Extract system message (Anthropic uses top-level 'system' parameter)
|
|
42
|
+
system_messages = []
|
|
43
|
+
for message in chat.get("messages", []):
|
|
44
|
+
if message.get("role") == "system":
|
|
45
|
+
content = message.get("content", "")
|
|
46
|
+
if isinstance(content, str):
|
|
47
|
+
system_messages.append(content)
|
|
48
|
+
elif isinstance(content, list):
|
|
49
|
+
for item in content:
|
|
50
|
+
if item.get("type") == "text":
|
|
51
|
+
system_messages.append(item.get("text", ""))
|
|
52
|
+
|
|
53
|
+
if system_messages:
|
|
54
|
+
anthropic_request["system"] = "\n".join(system_messages)
|
|
55
|
+
|
|
56
|
+
# Transform messages (exclude system messages)
|
|
57
|
+
for message in chat.get("messages", []):
|
|
58
|
+
if message.get("role") == "system":
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
if message.get("role") == "tool":
|
|
62
|
+
# Convert OpenAI tool response to Anthropic tool_result
|
|
63
|
+
tool_call_id = message.get("tool_call_id")
|
|
64
|
+
content = ctx.to_content(message.get("content", ""))
|
|
65
|
+
if not isinstance(content, (str, list)):
|
|
66
|
+
content = str(content)
|
|
67
|
+
|
|
68
|
+
tool_result = {"type": "tool_result", "tool_use_id": tool_call_id, "content": content}
|
|
69
|
+
|
|
70
|
+
# Anthropic requires tool results to be in a user message
|
|
71
|
+
# Check if the last message was a user message, if so append to it
|
|
72
|
+
if anthropic_request["messages"] and anthropic_request["messages"][-1]["role"] == "user":
|
|
73
|
+
anthropic_request["messages"][-1]["content"].append(tool_result)
|
|
74
|
+
else:
|
|
75
|
+
anthropic_request["messages"].append({"role": "user", "content": [tool_result]})
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
anthropic_message = {"role": message.get("role"), "content": []}
|
|
79
|
+
|
|
80
|
+
content = message.get("content", "")
|
|
81
|
+
if isinstance(content, str):
|
|
82
|
+
anthropic_message["content"] = content
|
|
83
|
+
elif isinstance(content, list):
|
|
84
|
+
for item in content:
|
|
85
|
+
if item.get("type") == "text":
|
|
86
|
+
anthropic_message["content"].append({"type": "text", "text": item.get("text", "")})
|
|
87
|
+
elif item.get("type") == "image_url" and "image_url" in item:
|
|
88
|
+
# Transform OpenAI image_url format to Anthropic format
|
|
89
|
+
image_url = item["image_url"].get("url", "")
|
|
90
|
+
if image_url.startswith("data:"):
|
|
91
|
+
# Extract media type and base64 data
|
|
92
|
+
parts = image_url.split(";base64,", 1)
|
|
93
|
+
if len(parts) == 2:
|
|
94
|
+
media_type = parts[0].replace("data:", "")
|
|
95
|
+
base64_data = parts[1]
|
|
96
|
+
anthropic_message["content"].append(
|
|
97
|
+
{
|
|
98
|
+
"type": "image",
|
|
99
|
+
"source": {"type": "base64", "media_type": media_type, "data": base64_data},
|
|
100
|
+
}
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
anthropic_request["messages"].append(anthropic_message)
|
|
104
|
+
|
|
105
|
+
# Handle max_tokens (required by Anthropic, uses max_tokens not max_completion_tokens)
|
|
106
|
+
if "max_completion_tokens" in chat:
|
|
107
|
+
anthropic_request["max_tokens"] = chat["max_completion_tokens"]
|
|
108
|
+
elif "max_tokens" in chat:
|
|
109
|
+
anthropic_request["max_tokens"] = chat["max_tokens"]
|
|
110
|
+
else:
|
|
111
|
+
# Anthropic requires max_tokens, set a default
|
|
112
|
+
anthropic_request["max_tokens"] = 4096
|
|
113
|
+
|
|
114
|
+
# Copy other supported parameters
|
|
115
|
+
if "temperature" in chat:
|
|
116
|
+
anthropic_request["temperature"] = chat["temperature"]
|
|
117
|
+
if "top_p" in chat:
|
|
118
|
+
anthropic_request["top_p"] = chat["top_p"]
|
|
119
|
+
if "top_k" in chat:
|
|
120
|
+
anthropic_request["top_k"] = chat["top_k"]
|
|
121
|
+
if "stop" in chat:
|
|
122
|
+
anthropic_request["stop_sequences"] = chat["stop"] if isinstance(chat["stop"], list) else [chat["stop"]]
|
|
123
|
+
if "stream" in chat:
|
|
124
|
+
anthropic_request["stream"] = chat["stream"]
|
|
125
|
+
if "tools" in chat:
|
|
126
|
+
anthropic_tools = []
|
|
127
|
+
for tool in chat["tools"]:
|
|
128
|
+
if tool.get("type") == "function":
|
|
129
|
+
function = tool.get("function", {})
|
|
130
|
+
anthropic_tool = {
|
|
131
|
+
"name": function.get("name"),
|
|
132
|
+
"description": function.get("description"),
|
|
133
|
+
"input_schema": function.get("parameters"),
|
|
134
|
+
}
|
|
135
|
+
anthropic_tools.append(anthropic_tool)
|
|
136
|
+
if anthropic_tools:
|
|
137
|
+
anthropic_request["tools"] = anthropic_tools
|
|
138
|
+
if "tool_choice" in chat:
|
|
139
|
+
anthropic_request["tool_choice"] = chat["tool_choice"]
|
|
140
|
+
|
|
141
|
+
ctx.log(f"POST {self.chat_url}")
|
|
142
|
+
ctx.log(json.dumps(anthropic_request, indent=2))
|
|
143
|
+
|
|
144
|
+
async with aiohttp.ClientSession() as session:
|
|
145
|
+
started_at = time.time()
|
|
146
|
+
async with session.post(
|
|
147
|
+
self.chat_url,
|
|
148
|
+
headers=self.headers,
|
|
149
|
+
data=json.dumps(anthropic_request),
|
|
150
|
+
timeout=aiohttp.ClientTimeout(total=120),
|
|
151
|
+
) as response:
|
|
152
|
+
return ctx.log_json(
|
|
153
|
+
self.to_response(await self.response_json(response), chat, started_at, context=context)
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
def to_response(self, response, chat, started_at, context=None):
|
|
157
|
+
"""Convert Anthropic response format to OpenAI-compatible format."""
|
|
158
|
+
if context is not None:
|
|
159
|
+
context["providerResponse"] = response
|
|
160
|
+
# Transform Anthropic response to OpenAI format
|
|
161
|
+
ret = {
|
|
162
|
+
"id": response.get("id", ""),
|
|
163
|
+
"object": "chat.completion",
|
|
164
|
+
"created": int(started_at),
|
|
165
|
+
"model": response.get("model", ""),
|
|
166
|
+
"choices": [],
|
|
167
|
+
"usage": {},
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# Transform content blocks to message content
|
|
171
|
+
content_parts = []
|
|
172
|
+
thinking_parts = []
|
|
173
|
+
tool_calls = []
|
|
174
|
+
|
|
175
|
+
for block in response.get("content", []):
|
|
176
|
+
if block.get("type") == "text":
|
|
177
|
+
content_parts.append(block.get("text", ""))
|
|
178
|
+
elif block.get("type") == "thinking":
|
|
179
|
+
# Store thinking blocks separately (some models include reasoning)
|
|
180
|
+
thinking_parts.append(block.get("thinking", ""))
|
|
181
|
+
elif block.get("type") == "tool_use":
|
|
182
|
+
tool_call = {
|
|
183
|
+
"id": block.get("id"),
|
|
184
|
+
"type": "function",
|
|
185
|
+
"function": {
|
|
186
|
+
"name": block.get("name"),
|
|
187
|
+
"arguments": json.dumps(block.get("input", {})),
|
|
188
|
+
},
|
|
189
|
+
}
|
|
190
|
+
tool_calls.append(tool_call)
|
|
191
|
+
|
|
192
|
+
# Combine all text content
|
|
193
|
+
message_content = "\n".join(content_parts) if content_parts else ""
|
|
194
|
+
|
|
195
|
+
# Create the choice object
|
|
196
|
+
choice = {
|
|
197
|
+
"index": 0,
|
|
198
|
+
"message": {"role": "assistant", "content": message_content},
|
|
199
|
+
"finish_reason": response.get("stop_reason", "stop"),
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
# Add thinking as metadata if present
|
|
203
|
+
if thinking_parts:
|
|
204
|
+
choice["message"]["thinking"] = "\n".join(thinking_parts)
|
|
205
|
+
|
|
206
|
+
# Add tool_calls if present
|
|
207
|
+
if tool_calls:
|
|
208
|
+
choice["message"]["tool_calls"] = tool_calls
|
|
209
|
+
|
|
210
|
+
ret["choices"].append(choice)
|
|
211
|
+
|
|
212
|
+
# Transform usage
|
|
213
|
+
if "usage" in response:
|
|
214
|
+
usage = response["usage"]
|
|
215
|
+
ret["usage"] = {
|
|
216
|
+
"prompt_tokens": usage.get("input_tokens", 0),
|
|
217
|
+
"completion_tokens": usage.get("output_tokens", 0),
|
|
218
|
+
"total_tokens": usage.get("input_tokens", 0) + usage.get("output_tokens", 0),
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
# Add metadata
|
|
222
|
+
if "metadata" not in ret:
|
|
223
|
+
ret["metadata"] = {}
|
|
224
|
+
ret["metadata"]["duration"] = int(time.time() - started_at)
|
|
225
|
+
|
|
226
|
+
if chat is not None and "model" in chat:
|
|
227
|
+
cost = self.model_cost(chat["model"])
|
|
228
|
+
if cost and "input" in cost and "output" in cost:
|
|
229
|
+
ret["metadata"]["pricing"] = f"{cost['input']}/{cost['output']}"
|
|
230
|
+
|
|
231
|
+
return ret
|
|
232
|
+
|
|
233
|
+
ctx.add_provider(AnthropicProvider)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
def install_cerebras(ctx):
|
|
2
|
+
from llms.main import OpenAiCompatible
|
|
3
|
+
|
|
4
|
+
class CerebrasProvider(OpenAiCompatible):
|
|
5
|
+
sdk = "@ai-sdk/cerebras"
|
|
6
|
+
|
|
7
|
+
def __init__(self, **kwargs):
|
|
8
|
+
if "api" not in kwargs:
|
|
9
|
+
kwargs["api"] = "https://api.cerebras.ai/v1"
|
|
10
|
+
super().__init__(**kwargs)
|
|
11
|
+
|
|
12
|
+
async def chat(self, chat, context=None):
|
|
13
|
+
# Cerebras only supports string content for text-only models
|
|
14
|
+
clean_chat = chat.copy()
|
|
15
|
+
clean_chat["messages"] = []
|
|
16
|
+
for msg in chat.get("messages", []):
|
|
17
|
+
new_msg = msg.copy()
|
|
18
|
+
content = msg.get("content")
|
|
19
|
+
if isinstance(content, list):
|
|
20
|
+
# Check if text only
|
|
21
|
+
is_text_only = True
|
|
22
|
+
text_parts = []
|
|
23
|
+
for part in content:
|
|
24
|
+
if part.get("type") != "text":
|
|
25
|
+
is_text_only = False
|
|
26
|
+
break
|
|
27
|
+
text_parts.append(part.get("text", ""))
|
|
28
|
+
|
|
29
|
+
if is_text_only:
|
|
30
|
+
new_msg["content"] = "".join(text_parts)
|
|
31
|
+
clean_chat["messages"].append(new_msg)
|
|
32
|
+
|
|
33
|
+
clean_chat.pop("modalities", None)
|
|
34
|
+
clean_chat.pop("systemPrompt", None)
|
|
35
|
+
return await super().chat(clean_chat, context)
|
|
36
|
+
|
|
37
|
+
ctx.add_provider(CerebrasProvider)
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import mimetypes
|
|
3
|
+
import time
|
|
4
|
+
|
|
5
|
+
import aiohttp
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def install_chutes(ctx):
|
|
9
|
+
from llms.main import GeneratorBase
|
|
10
|
+
|
|
11
|
+
class ChutesImage(GeneratorBase):
|
|
12
|
+
sdk = "chutes/image"
|
|
13
|
+
|
|
14
|
+
def __init__(self, **kwargs):
|
|
15
|
+
super().__init__(**kwargs)
|
|
16
|
+
self.width = int(kwargs.get("width", 1024))
|
|
17
|
+
self.height = int(kwargs.get("height", 1024))
|
|
18
|
+
self.cfg_scale = float(kwargs.get("cfg_scale", 7.5))
|
|
19
|
+
self.steps = int(kwargs.get("steps", 50))
|
|
20
|
+
self.negative_prompt = kwargs.get("negative_prompt", "blur, distortion, low quality")
|
|
21
|
+
self.gen_url = kwargs.get("api", "https://image.chutes.ai/generate")
|
|
22
|
+
self.model_resolutions = {
|
|
23
|
+
"chutes-hidream": {
|
|
24
|
+
"1:1": "1024x1024",
|
|
25
|
+
"9:16": "768x1360",
|
|
26
|
+
"16:9": "1360x768",
|
|
27
|
+
"3:4": "880x1168",
|
|
28
|
+
"4:3": "1168x880",
|
|
29
|
+
"2:3": "832x1248",
|
|
30
|
+
"3:2": "1248x832",
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
self.model_sizes = ["chutes-hunyuan-image-3"]
|
|
34
|
+
self.model_negative_prompt = [
|
|
35
|
+
"chroma",
|
|
36
|
+
"qwen-image-edit-2509",
|
|
37
|
+
"JuggernautXL-Ragnarok",
|
|
38
|
+
"JuggernautXL",
|
|
39
|
+
"Animij",
|
|
40
|
+
"iLustMix",
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
async def chat(self, chat, provider=None, context=None):
|
|
44
|
+
headers = {"Authorization": f"Bearer {self.api_key}"}
|
|
45
|
+
if provider is not None:
|
|
46
|
+
headers["Authorization"] = f"Bearer {provider.api_key}"
|
|
47
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
48
|
+
|
|
49
|
+
aspect_ratio = "1:1"
|
|
50
|
+
if "messages" in chat and len(chat["messages"]) > 0:
|
|
51
|
+
aspect_ratio = chat["messages"][0].get("aspect_ratio", "1:1")
|
|
52
|
+
cfg_scale = self.cfg_scale
|
|
53
|
+
steps = self.steps
|
|
54
|
+
width = self.width
|
|
55
|
+
height = self.height
|
|
56
|
+
if chat["model"] == "chutes-z-image-turbo":
|
|
57
|
+
cfg_scale = min(self.cfg_scale, 5)
|
|
58
|
+
payload = {
|
|
59
|
+
"model": chat["model"],
|
|
60
|
+
"prompt": ctx.last_user_prompt(chat),
|
|
61
|
+
"guidance_scale": cfg_scale,
|
|
62
|
+
"width": width,
|
|
63
|
+
"height": height,
|
|
64
|
+
"num_inference_steps": steps,
|
|
65
|
+
}
|
|
66
|
+
if chat["model"] in self.model_negative_prompt:
|
|
67
|
+
payload["negative_prompt"] = self.negative_prompt
|
|
68
|
+
|
|
69
|
+
aspect_ratio = ctx.chat_to_aspect_ratio(chat) or "1:1"
|
|
70
|
+
dimension = ctx.app.aspect_ratios.get(aspect_ratio)
|
|
71
|
+
if dimension:
|
|
72
|
+
w, h = dimension.split("×")
|
|
73
|
+
width, height = int(w), int(h)
|
|
74
|
+
payload["width"] = width
|
|
75
|
+
payload["height"] = height
|
|
76
|
+
|
|
77
|
+
if chat["model"] in self.model_resolutions:
|
|
78
|
+
# if models use resolution, remove width and height
|
|
79
|
+
del payload["width"]
|
|
80
|
+
del payload["height"]
|
|
81
|
+
resolution = self.model_resolutions[chat["model"]][aspect_ratio]
|
|
82
|
+
payload["resolution"] = resolution
|
|
83
|
+
elif chat["model"] in self.model_sizes:
|
|
84
|
+
del payload["width"]
|
|
85
|
+
del payload["height"]
|
|
86
|
+
payload["size"] = aspect_ratio
|
|
87
|
+
|
|
88
|
+
gen_url = self.gen_url
|
|
89
|
+
if chat["model"].startswith("chutes-"):
|
|
90
|
+
model = payload["model"]
|
|
91
|
+
gen_url = f"https://{model}.chutes.ai/generate"
|
|
92
|
+
del payload["model"]
|
|
93
|
+
|
|
94
|
+
ctx.log(f"POST {gen_url}")
|
|
95
|
+
ctx.log(json.dumps(payload, indent=2))
|
|
96
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
97
|
+
gen_url, headers=headers, json=payload
|
|
98
|
+
) as response:
|
|
99
|
+
if response.status < 300:
|
|
100
|
+
image_data = await response.read()
|
|
101
|
+
content_type = response.headers.get("Content-Type")
|
|
102
|
+
if content_type:
|
|
103
|
+
ext = mimetypes.guess_extension(content_type)
|
|
104
|
+
if ext:
|
|
105
|
+
ext = ext.lstrip(".") # remove leading dot
|
|
106
|
+
if not ext:
|
|
107
|
+
ext = "png"
|
|
108
|
+
|
|
109
|
+
relative_url, info = ctx.save_image_to_cache(
|
|
110
|
+
image_data,
|
|
111
|
+
f"{chat['model']}.{ext}",
|
|
112
|
+
ctx.to_file_info(
|
|
113
|
+
chat,
|
|
114
|
+
{
|
|
115
|
+
"aspect_ratio": aspect_ratio,
|
|
116
|
+
"width": width,
|
|
117
|
+
"height": height,
|
|
118
|
+
"cfg_scale": cfg_scale,
|
|
119
|
+
"steps": steps,
|
|
120
|
+
},
|
|
121
|
+
),
|
|
122
|
+
)
|
|
123
|
+
return {
|
|
124
|
+
"choices": [
|
|
125
|
+
{
|
|
126
|
+
"message": {
|
|
127
|
+
"role": "assistant",
|
|
128
|
+
"content": self.default_content,
|
|
129
|
+
"images": [
|
|
130
|
+
{
|
|
131
|
+
"type": "image_url",
|
|
132
|
+
"image_url": {
|
|
133
|
+
"url": relative_url,
|
|
134
|
+
},
|
|
135
|
+
}
|
|
136
|
+
],
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
],
|
|
140
|
+
"created": int(time.time()),
|
|
141
|
+
}
|
|
142
|
+
else:
|
|
143
|
+
text = await response.text()
|
|
144
|
+
try:
|
|
145
|
+
data = json.loads(text)
|
|
146
|
+
ctx.log(data)
|
|
147
|
+
if "detail" in data:
|
|
148
|
+
raise Exception(data["detail"])
|
|
149
|
+
except json.JSONDecodeError:
|
|
150
|
+
pass
|
|
151
|
+
raise Exception(f"Failed to generate image {response.status}")
|
|
152
|
+
|
|
153
|
+
ctx.add_provider(ChutesImage)
|