llms-py 2.0.9__py3-none-any.whl → 3.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llms/__init__.py +4 -0
- llms/__main__.py +9 -0
- llms/db.py +359 -0
- llms/extensions/analytics/ui/index.mjs +1444 -0
- llms/extensions/app/README.md +20 -0
- llms/extensions/app/__init__.py +589 -0
- llms/extensions/app/db.py +536 -0
- {llms_py-2.0.9.data/data → llms/extensions/app}/ui/Recents.mjs +100 -73
- llms_py-2.0.9.data/data/ui/Sidebar.mjs → llms/extensions/app/ui/index.mjs +150 -79
- llms/extensions/app/ui/threadStore.mjs +433 -0
- llms/extensions/core_tools/CALCULATOR.md +32 -0
- llms/extensions/core_tools/__init__.py +637 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/closebrackets.js +201 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/closetag.js +185 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/continuelist.js +101 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/matchbrackets.js +160 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/matchtags.js +66 -0
- llms/extensions/core_tools/ui/codemirror/addon/edit/trailingspace.js +27 -0
- llms/extensions/core_tools/ui/codemirror/addon/selection/active-line.js +72 -0
- llms/extensions/core_tools/ui/codemirror/addon/selection/mark-selection.js +119 -0
- llms/extensions/core_tools/ui/codemirror/addon/selection/selection-pointer.js +98 -0
- llms/extensions/core_tools/ui/codemirror/codemirror.css +344 -0
- llms/extensions/core_tools/ui/codemirror/codemirror.js +9884 -0
- llms/extensions/core_tools/ui/codemirror/doc/docs.css +225 -0
- llms/extensions/core_tools/ui/codemirror/doc/source_sans.woff +0 -0
- llms/extensions/core_tools/ui/codemirror/mode/clike/clike.js +942 -0
- llms/extensions/core_tools/ui/codemirror/mode/javascript/index.html +118 -0
- llms/extensions/core_tools/ui/codemirror/mode/javascript/javascript.js +962 -0
- llms/extensions/core_tools/ui/codemirror/mode/javascript/typescript.html +62 -0
- llms/extensions/core_tools/ui/codemirror/mode/python/python.js +402 -0
- llms/extensions/core_tools/ui/codemirror/theme/dracula.css +40 -0
- llms/extensions/core_tools/ui/codemirror/theme/mocha.css +135 -0
- llms/extensions/core_tools/ui/index.mjs +650 -0
- llms/extensions/gallery/README.md +61 -0
- llms/extensions/gallery/__init__.py +63 -0
- llms/extensions/gallery/db.py +243 -0
- llms/extensions/gallery/ui/index.mjs +482 -0
- llms/extensions/katex/README.md +39 -0
- llms/extensions/katex/__init__.py +6 -0
- llms/extensions/katex/ui/README.md +125 -0
- llms/extensions/katex/ui/contrib/auto-render.js +338 -0
- llms/extensions/katex/ui/contrib/auto-render.min.js +1 -0
- llms/extensions/katex/ui/contrib/auto-render.mjs +244 -0
- llms/extensions/katex/ui/contrib/copy-tex.js +127 -0
- llms/extensions/katex/ui/contrib/copy-tex.min.js +1 -0
- llms/extensions/katex/ui/contrib/copy-tex.mjs +105 -0
- llms/extensions/katex/ui/contrib/mathtex-script-type.js +109 -0
- llms/extensions/katex/ui/contrib/mathtex-script-type.min.js +1 -0
- llms/extensions/katex/ui/contrib/mathtex-script-type.mjs +24 -0
- llms/extensions/katex/ui/contrib/mhchem.js +3213 -0
- llms/extensions/katex/ui/contrib/mhchem.min.js +1 -0
- llms/extensions/katex/ui/contrib/mhchem.mjs +3109 -0
- llms/extensions/katex/ui/contrib/render-a11y-string.js +887 -0
- llms/extensions/katex/ui/contrib/render-a11y-string.min.js +1 -0
- llms/extensions/katex/ui/contrib/render-a11y-string.mjs +800 -0
- llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_AMS-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Caligraphic-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Fraktur-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-BoldItalic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Italic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Main-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-BoldItalic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Math-Italic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Bold.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Italic.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_SansSerif-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Script-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size1-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size2-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size3-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Size4-Regular.woff2 +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.ttf +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.woff +0 -0
- llms/extensions/katex/ui/fonts/KaTeX_Typewriter-Regular.woff2 +0 -0
- llms/extensions/katex/ui/index.mjs +92 -0
- llms/extensions/katex/ui/katex-swap.css +1230 -0
- llms/extensions/katex/ui/katex-swap.min.css +1 -0
- llms/extensions/katex/ui/katex.css +1230 -0
- llms/extensions/katex/ui/katex.js +19080 -0
- llms/extensions/katex/ui/katex.min.css +1 -0
- llms/extensions/katex/ui/katex.min.js +1 -0
- llms/extensions/katex/ui/katex.min.mjs +1 -0
- llms/extensions/katex/ui/katex.mjs +18547 -0
- llms/extensions/providers/__init__.py +22 -0
- llms/extensions/providers/anthropic.py +233 -0
- llms/extensions/providers/cerebras.py +37 -0
- llms/extensions/providers/chutes.py +153 -0
- llms/extensions/providers/google.py +481 -0
- llms/extensions/providers/nvidia.py +103 -0
- llms/extensions/providers/openai.py +154 -0
- llms/extensions/providers/openrouter.py +74 -0
- llms/extensions/providers/zai.py +182 -0
- llms/extensions/system_prompts/README.md +22 -0
- llms/extensions/system_prompts/__init__.py +45 -0
- llms/extensions/system_prompts/ui/index.mjs +280 -0
- llms/extensions/system_prompts/ui/prompts.json +1067 -0
- llms/extensions/tools/__init__.py +144 -0
- llms/extensions/tools/ui/index.mjs +706 -0
- llms/index.html +58 -0
- llms/llms.json +400 -0
- llms/main.py +4407 -0
- llms/providers-extra.json +394 -0
- llms/providers.json +1 -0
- llms/ui/App.mjs +188 -0
- llms/ui/ai.mjs +217 -0
- llms/ui/app.css +7081 -0
- llms/ui/ctx.mjs +412 -0
- llms/ui/index.mjs +131 -0
- llms/ui/lib/chart.js +14 -0
- llms/ui/lib/charts.mjs +16 -0
- llms/ui/lib/color.js +14 -0
- llms/ui/lib/servicestack-vue.mjs +37 -0
- llms/ui/lib/vue.min.mjs +13 -0
- llms/ui/lib/vue.mjs +18530 -0
- {llms_py-2.0.9.data/data → llms}/ui/markdown.mjs +33 -15
- llms/ui/modules/chat/ChatBody.mjs +976 -0
- llms/ui/modules/chat/SettingsDialog.mjs +374 -0
- llms/ui/modules/chat/index.mjs +991 -0
- llms/ui/modules/icons.mjs +46 -0
- llms/ui/modules/layout.mjs +271 -0
- llms/ui/modules/model-selector.mjs +811 -0
- llms/ui/tailwind.input.css +742 -0
- {llms_py-2.0.9.data/data → llms}/ui/typography.css +133 -7
- llms/ui/utils.mjs +261 -0
- llms_py-3.0.10.dist-info/METADATA +49 -0
- llms_py-3.0.10.dist-info/RECORD +177 -0
- llms_py-3.0.10.dist-info/entry_points.txt +2 -0
- {llms_py-2.0.9.dist-info → llms_py-3.0.10.dist-info}/licenses/LICENSE +1 -2
- llms.py +0 -1402
- llms_py-2.0.9.data/data/index.html +0 -64
- llms_py-2.0.9.data/data/llms.json +0 -447
- llms_py-2.0.9.data/data/requirements.txt +0 -1
- llms_py-2.0.9.data/data/ui/App.mjs +0 -20
- llms_py-2.0.9.data/data/ui/ChatPrompt.mjs +0 -389
- llms_py-2.0.9.data/data/ui/Main.mjs +0 -680
- llms_py-2.0.9.data/data/ui/app.css +0 -3951
- llms_py-2.0.9.data/data/ui/lib/servicestack-vue.min.mjs +0 -37
- llms_py-2.0.9.data/data/ui/lib/vue.min.mjs +0 -12
- llms_py-2.0.9.data/data/ui/tailwind.input.css +0 -261
- llms_py-2.0.9.data/data/ui/threadStore.mjs +0 -273
- llms_py-2.0.9.data/data/ui/utils.mjs +0 -114
- llms_py-2.0.9.data/data/ui.json +0 -1069
- llms_py-2.0.9.dist-info/METADATA +0 -941
- llms_py-2.0.9.dist-info/RECORD +0 -30
- llms_py-2.0.9.dist-info/entry_points.txt +0 -2
- {llms_py-2.0.9.data/data → llms}/ui/fav.svg +0 -0
- {llms_py-2.0.9.data/data → llms}/ui/lib/highlight.min.mjs +0 -0
- {llms_py-2.0.9.data/data → llms}/ui/lib/idb.min.mjs +0 -0
- {llms_py-2.0.9.data/data → llms}/ui/lib/marked.min.mjs +0 -0
- /llms_py-2.0.9.data/data/ui/lib/servicestack-client.min.mjs → /llms/ui/lib/servicestack-client.mjs +0 -0
- {llms_py-2.0.9.data/data → llms}/ui/lib/vue-router.min.mjs +0 -0
- {llms_py-2.0.9.dist-info → llms_py-3.0.10.dist-info}/WHEEL +0 -0
- {llms_py-2.0.9.dist-info → llms_py-3.0.10.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import json
|
|
3
|
+
import mimetypes
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import aiohttp
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def install_openai(ctx):
|
|
10
|
+
from llms.main import GeneratorBase, OpenAiCompatible
|
|
11
|
+
|
|
12
|
+
class OpenAiProvider(OpenAiCompatible):
|
|
13
|
+
sdk = "@ai-sdk/openai"
|
|
14
|
+
|
|
15
|
+
def __init__(self, **kwargs):
|
|
16
|
+
if "api" not in kwargs:
|
|
17
|
+
kwargs["api"] = "https://api.openai.com/v1"
|
|
18
|
+
super().__init__(**kwargs)
|
|
19
|
+
self.modalities["image"] = OpenAiGenerator(**kwargs)
|
|
20
|
+
|
|
21
|
+
# https://platform.openai.com/docs/api-reference/images
|
|
22
|
+
class OpenAiGenerator(GeneratorBase):
|
|
23
|
+
sdk = "openai/image"
|
|
24
|
+
|
|
25
|
+
def __init__(self, **kwargs):
|
|
26
|
+
super().__init__(**kwargs)
|
|
27
|
+
self.api = "https://api.openai.com/v1/images/generations"
|
|
28
|
+
self.map_image_models = kwargs.get(
|
|
29
|
+
"map_image_models",
|
|
30
|
+
{
|
|
31
|
+
"gpt-5.1-codex-mini": "gpt-image-1-mini",
|
|
32
|
+
},
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
def aspect_ratio_to_size(self, aspect_ratio, model):
|
|
36
|
+
w, h = aspect_ratio.split(":")
|
|
37
|
+
width = int(w)
|
|
38
|
+
height = int(h)
|
|
39
|
+
if model == "dall-e-2":
|
|
40
|
+
return "1024x1024"
|
|
41
|
+
if model == "dall-e-3":
|
|
42
|
+
if width > height:
|
|
43
|
+
return "1792x1024"
|
|
44
|
+
elif height > width:
|
|
45
|
+
return "1024x1792"
|
|
46
|
+
if width > height:
|
|
47
|
+
return "1536x1024"
|
|
48
|
+
elif height > width:
|
|
49
|
+
return "1024x1536"
|
|
50
|
+
return "1024x1024"
|
|
51
|
+
|
|
52
|
+
async def to_response(self, response, chat, started_at):
|
|
53
|
+
# go through all image responses and save them to cache
|
|
54
|
+
# Try to extract and save images
|
|
55
|
+
images = []
|
|
56
|
+
if "data" in response:
|
|
57
|
+
for i, item in enumerate(response["data"]):
|
|
58
|
+
image_url = item.get("url")
|
|
59
|
+
b64_json = item.get("b64_json")
|
|
60
|
+
|
|
61
|
+
ext = "png"
|
|
62
|
+
image_data = None
|
|
63
|
+
|
|
64
|
+
if b64_json:
|
|
65
|
+
image_data = base64.b64decode(b64_json)
|
|
66
|
+
elif image_url:
|
|
67
|
+
ctx.log(f"GET {image_url}")
|
|
68
|
+
async with aiohttp.ClientSession() as session, await session.get(image_url) as res:
|
|
69
|
+
if res.status == 200:
|
|
70
|
+
image_data = await res.read()
|
|
71
|
+
content_type = res.headers.get("Content-Type")
|
|
72
|
+
if content_type:
|
|
73
|
+
ext = mimetypes.guess_extension(content_type)
|
|
74
|
+
if ext:
|
|
75
|
+
ext = ext.lstrip(".") # remove leading dot
|
|
76
|
+
# Fallback if guess_extension returns None or if we want to be safe
|
|
77
|
+
if not ext:
|
|
78
|
+
ext = "png"
|
|
79
|
+
else:
|
|
80
|
+
raise Exception(f"Failed to download image: {res.status}")
|
|
81
|
+
|
|
82
|
+
if image_data:
|
|
83
|
+
relative_url, info = ctx.save_image_to_cache(
|
|
84
|
+
image_data,
|
|
85
|
+
f"{chat['model']}-{i}.{ext}",
|
|
86
|
+
ctx.to_file_info(chat),
|
|
87
|
+
)
|
|
88
|
+
images.append(
|
|
89
|
+
{
|
|
90
|
+
"type": "image_url",
|
|
91
|
+
"image_url": {
|
|
92
|
+
"url": relative_url,
|
|
93
|
+
},
|
|
94
|
+
}
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
raise Exception("No image data found")
|
|
98
|
+
|
|
99
|
+
return {
|
|
100
|
+
"choices": [
|
|
101
|
+
{
|
|
102
|
+
"message": {
|
|
103
|
+
"role": "assistant",
|
|
104
|
+
"content": self.default_content,
|
|
105
|
+
"images": images,
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
]
|
|
109
|
+
}
|
|
110
|
+
if "error" in response:
|
|
111
|
+
raise Exception(response["error"]["message"])
|
|
112
|
+
|
|
113
|
+
ctx.log(json.dumps(response, indent=2))
|
|
114
|
+
raise Exception("No 'data' field in response.")
|
|
115
|
+
|
|
116
|
+
async def chat(self, chat, provider=None, context=None):
|
|
117
|
+
headers = self.get_headers(provider, chat)
|
|
118
|
+
|
|
119
|
+
if chat["model"] in self.map_image_models:
|
|
120
|
+
chat["model"] = self.map_image_models[chat["model"]]
|
|
121
|
+
|
|
122
|
+
aspect_ratio = ctx.chat_to_aspect_ratio(chat) or "1:1"
|
|
123
|
+
payload = {
|
|
124
|
+
"model": chat["model"],
|
|
125
|
+
"prompt": ctx.last_user_prompt(chat),
|
|
126
|
+
"size": self.aspect_ratio_to_size(aspect_ratio, chat["model"]),
|
|
127
|
+
}
|
|
128
|
+
if provider is not None:
|
|
129
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
130
|
+
|
|
131
|
+
started_at = time.time()
|
|
132
|
+
if ctx.MOCK:
|
|
133
|
+
print("Mocking OpenAiGenerator")
|
|
134
|
+
text = ctx.text_from_file(f"{ctx.MOCK_DIR}/openai-image.json")
|
|
135
|
+
return await self.to_response(json.loads(text), chat, started_at)
|
|
136
|
+
else:
|
|
137
|
+
ctx.log(f"POST {self.api}")
|
|
138
|
+
# _log(json.dumps(headers, indent=2))
|
|
139
|
+
ctx.log(json.dumps(payload, indent=2))
|
|
140
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
141
|
+
self.api, headers=headers, json=payload
|
|
142
|
+
) as response:
|
|
143
|
+
text = await response.text()
|
|
144
|
+
ctx.log(text[:1024] + (len(text) > 1024 and "..." or ""))
|
|
145
|
+
if response.status < 300:
|
|
146
|
+
return ctx.log_json(await self.to_response(json.loads(text), chat, started_at, context=context))
|
|
147
|
+
else:
|
|
148
|
+
raise Exception(f"Failed to generate image {response.status}")
|
|
149
|
+
|
|
150
|
+
ctx.add_provider(OpenAiProvider)
|
|
151
|
+
ctx.add_provider(OpenAiGenerator)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
__install__ = install_openai
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
|
|
4
|
+
import aiohttp
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def install_openrouter(ctx):
|
|
8
|
+
from llms.main import GeneratorBase
|
|
9
|
+
|
|
10
|
+
# https://openrouter.ai/docs/guides/overview/multimodal/image-generation
|
|
11
|
+
class OpenRouterGenerator(GeneratorBase):
|
|
12
|
+
sdk = "openrouter/image"
|
|
13
|
+
|
|
14
|
+
def __init__(self, **kwargs):
|
|
15
|
+
super().__init__(**kwargs)
|
|
16
|
+
|
|
17
|
+
def to_response(self, response, chat, started_at):
|
|
18
|
+
# go through all image responses and save them to cache
|
|
19
|
+
cost = None
|
|
20
|
+
if "usage" in response and "cost" in response["usage"]:
|
|
21
|
+
cost = response["usage"]["cost"]
|
|
22
|
+
for choice in response["choices"]:
|
|
23
|
+
if "message" in choice and "images" in choice["message"]:
|
|
24
|
+
for image in choice["message"]["images"]:
|
|
25
|
+
if choice["message"]["content"] == "":
|
|
26
|
+
choice["message"]["content"] = self.default_content
|
|
27
|
+
if "image_url" in image:
|
|
28
|
+
data_uri = image["image_url"]["url"]
|
|
29
|
+
if data_uri.startswith("data:"):
|
|
30
|
+
parts = data_uri.split(",", 1)
|
|
31
|
+
ext = parts[0].split(";")[0].split("/")[1]
|
|
32
|
+
base64_data = parts[1]
|
|
33
|
+
model = chat["model"].split("/")[-1]
|
|
34
|
+
filename = f"{model}-{choice['index']}.{ext}"
|
|
35
|
+
relative_url, info = ctx.save_image_to_cache(
|
|
36
|
+
base64_data, filename, ctx.to_file_info(chat, {"cost": cost})
|
|
37
|
+
)
|
|
38
|
+
image["image_url"]["url"] = relative_url
|
|
39
|
+
|
|
40
|
+
return response
|
|
41
|
+
|
|
42
|
+
async def chat(self, chat, provider=None, context=None):
|
|
43
|
+
headers = self.get_headers(provider, chat)
|
|
44
|
+
if provider is not None:
|
|
45
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
46
|
+
|
|
47
|
+
started_at = time.time()
|
|
48
|
+
if ctx.MOCK:
|
|
49
|
+
print("Mocking OpenRouterGenerator")
|
|
50
|
+
text = ctx.text_from_file(f"{ctx.MOCK_DIR}/openrouter-image.json")
|
|
51
|
+
return ctx.log_json(self.to_response(json.loads(text), chat, started_at))
|
|
52
|
+
else:
|
|
53
|
+
chat_url = provider.chat_url
|
|
54
|
+
# remove tools
|
|
55
|
+
chat.pop("tools", None)
|
|
56
|
+
chat = await self.process_chat(chat, provider_id=self.id)
|
|
57
|
+
ctx.log(f"POST {chat_url}")
|
|
58
|
+
ctx.log(provider.chat_summary(chat))
|
|
59
|
+
# remove metadata if any (conflicts with some providers, e.g. Z.ai)
|
|
60
|
+
metadata = chat.pop("metadata", None)
|
|
61
|
+
|
|
62
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
63
|
+
chat_url,
|
|
64
|
+
headers=headers,
|
|
65
|
+
data=json.dumps(chat),
|
|
66
|
+
timeout=aiohttp.ClientTimeout(total=300),
|
|
67
|
+
) as response:
|
|
68
|
+
if metadata:
|
|
69
|
+
chat["metadata"] = metadata
|
|
70
|
+
return ctx.log_json(
|
|
71
|
+
self.to_response(await self.response_json(response), chat, started_at, context=context)
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
ctx.add_provider(OpenRouterGenerator)
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import aiohttp
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def install_zai(ctx):
|
|
9
|
+
from llms.main import GeneratorBase
|
|
10
|
+
|
|
11
|
+
# https://docs.z.ai/guides/image/glm-image
|
|
12
|
+
class ZaiGenerator(GeneratorBase):
|
|
13
|
+
sdk = "zai/image"
|
|
14
|
+
|
|
15
|
+
def __init__(self, **kwargs):
|
|
16
|
+
super().__init__(**kwargs)
|
|
17
|
+
self.aspect_ratios = {
|
|
18
|
+
"1:1": "1280×1280",
|
|
19
|
+
"2:3": "1056×1568",
|
|
20
|
+
"3:2": "1568×1056",
|
|
21
|
+
"3:4": "1088×1472",
|
|
22
|
+
"4:3": "1472×1088",
|
|
23
|
+
"4:5": "1088×1472",
|
|
24
|
+
"5:4": "1472×1088",
|
|
25
|
+
"9:16": "960×1728",
|
|
26
|
+
"16:9": "1728×960",
|
|
27
|
+
"21:9": "1728×960",
|
|
28
|
+
}
|
|
29
|
+
self.model: str = kwargs.get("model", "glm-image")
|
|
30
|
+
self.n: Optional[int] = kwargs.get("n")
|
|
31
|
+
self.quality: Optional[str] = kwargs.get("quality")
|
|
32
|
+
self.response_format: Optional[str] = kwargs.get("response_format")
|
|
33
|
+
self.size: Optional[str] = kwargs.get("size")
|
|
34
|
+
self.style: Optional[str] = kwargs.get("style")
|
|
35
|
+
self.sensitive_word_check: Optional[str] = kwargs.get("sensitive_word_check")
|
|
36
|
+
self.user: Optional[str] = kwargs.get("user")
|
|
37
|
+
self.request_id: Optional[str] = kwargs.get("request_id")
|
|
38
|
+
self.user_id: Optional[str] = kwargs.get("user_id")
|
|
39
|
+
self.extra_headers: Optional[dict] = kwargs.get("extra_headers")
|
|
40
|
+
self.extra_body: Optional[dict] = kwargs.get("extra_body")
|
|
41
|
+
self.disable_strict_validation: Optional[bool] = kwargs.get("disable_strict_validation")
|
|
42
|
+
self.timeout: Optional[float] = float(kwargs.get("timeout") or 300)
|
|
43
|
+
self.watermark_enabled: Optional[bool] = kwargs.get("watermark_enabled")
|
|
44
|
+
|
|
45
|
+
async def chat(self, chat, provider=None, context=None):
|
|
46
|
+
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
47
|
+
if self.extra_headers:
|
|
48
|
+
headers.update(self.extra_headers)
|
|
49
|
+
|
|
50
|
+
chat_url = "https://api.z.ai/api/paas/v4/images/generations"
|
|
51
|
+
if provider is not None:
|
|
52
|
+
headers["Authorization"] = f"Bearer {provider.api_key}"
|
|
53
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
54
|
+
chat_url = provider.api + "/images/generations"
|
|
55
|
+
|
|
56
|
+
body = {}
|
|
57
|
+
attrs = [
|
|
58
|
+
"model",
|
|
59
|
+
"n",
|
|
60
|
+
"quality",
|
|
61
|
+
"response_format",
|
|
62
|
+
"size",
|
|
63
|
+
"style",
|
|
64
|
+
"sensitive_word_check",
|
|
65
|
+
"user",
|
|
66
|
+
"request_id",
|
|
67
|
+
"user_id",
|
|
68
|
+
"disable_strict_validation",
|
|
69
|
+
"watermark_enabled",
|
|
70
|
+
]
|
|
71
|
+
for attr in attrs:
|
|
72
|
+
if hasattr(self, attr) and getattr(self, attr) is not None:
|
|
73
|
+
body[attr] = getattr(self, attr)
|
|
74
|
+
|
|
75
|
+
if self.extra_body:
|
|
76
|
+
body.update(self.extra_body)
|
|
77
|
+
|
|
78
|
+
if "model" in chat:
|
|
79
|
+
body["model"] = chat["model"]
|
|
80
|
+
|
|
81
|
+
body["prompt"] = ctx.last_user_prompt(chat)
|
|
82
|
+
|
|
83
|
+
aspect_ratio = ctx.chat_to_aspect_ratio(chat) or "1:1"
|
|
84
|
+
size = self.aspect_ratios.get(aspect_ratio, "1280x1280").replace("×", "x")
|
|
85
|
+
body["size"] = size
|
|
86
|
+
|
|
87
|
+
username = ctx.context_to_username(context)
|
|
88
|
+
if username:
|
|
89
|
+
body["user"] = username
|
|
90
|
+
|
|
91
|
+
ctx.dbg(f"ZaiProvider.chat: {chat_url}")
|
|
92
|
+
ctx.dbg(json.dumps(body, indent=2))
|
|
93
|
+
started_at = time.time()
|
|
94
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
95
|
+
chat_url,
|
|
96
|
+
headers=headers,
|
|
97
|
+
data=json.dumps(body),
|
|
98
|
+
timeout=aiohttp.ClientTimeout(total=self.timeout),
|
|
99
|
+
) as response:
|
|
100
|
+
# Example Response
|
|
101
|
+
# {
|
|
102
|
+
# "created": 1768451303,
|
|
103
|
+
# "data": [
|
|
104
|
+
# {
|
|
105
|
+
# "url": "https://mfile.z.ai/1768451374203-b334959408a643a8a6c74eb104746dcb.png?ufileattname=202601151228236805d575507d4570_watermark.png"
|
|
106
|
+
# }
|
|
107
|
+
# ],
|
|
108
|
+
# "id": "202601151228236805d575507d4570",
|
|
109
|
+
# "request_id": "202601151228236805d575507d4570",
|
|
110
|
+
# "usage": {
|
|
111
|
+
# "tokens": 0,
|
|
112
|
+
# "price": 0,
|
|
113
|
+
# "cost": 0.0,
|
|
114
|
+
# "duration": 71
|
|
115
|
+
# },
|
|
116
|
+
# "timestamp": 1768451374519,
|
|
117
|
+
# "model": "GLM-Image"
|
|
118
|
+
# }
|
|
119
|
+
|
|
120
|
+
response_json = await self.response_json(response)
|
|
121
|
+
duration = int(time.time() - started_at)
|
|
122
|
+
usage = response_json.get("usage", {})
|
|
123
|
+
if context is not None:
|
|
124
|
+
context["providerResponse"] = response_json
|
|
125
|
+
if "cost" in usage:
|
|
126
|
+
context["cost"] = usage.get("cost")
|
|
127
|
+
|
|
128
|
+
images = []
|
|
129
|
+
for image in response_json.get("data", []):
|
|
130
|
+
url = image.get("url")
|
|
131
|
+
if not url:
|
|
132
|
+
continue
|
|
133
|
+
# download url with aiohttp
|
|
134
|
+
async with session.get(url) as image_response:
|
|
135
|
+
headers = image_response.headers
|
|
136
|
+
# get filename from Content-Disposition
|
|
137
|
+
# attachment; filename="202601151228236805d575507d4570_watermark.png"
|
|
138
|
+
mime_type = headers.get("Content-Type") or "image/png"
|
|
139
|
+
disposition = headers.get("Content-Disposition")
|
|
140
|
+
if disposition:
|
|
141
|
+
start = disposition.index('filename="') + len('filename="')
|
|
142
|
+
end = disposition.index('"', start)
|
|
143
|
+
filename = disposition[start:end]
|
|
144
|
+
else:
|
|
145
|
+
ext = mime_type.split("/")[1]
|
|
146
|
+
filename = f"{body['model'].lower()}-{response_json.get('id', int(started_at))}.{ext}"
|
|
147
|
+
image_bytes = await image_response.read()
|
|
148
|
+
|
|
149
|
+
info = {
|
|
150
|
+
"prompt": body["prompt"],
|
|
151
|
+
"type": mime_type,
|
|
152
|
+
"width": int(size.split("x")[0]),
|
|
153
|
+
"height": int(size.split("x")[1]),
|
|
154
|
+
"duration": duration,
|
|
155
|
+
}
|
|
156
|
+
info.update(usage)
|
|
157
|
+
cache_url, info = ctx.save_image_to_cache(
|
|
158
|
+
image_bytes, filename, image_info=info, ignore_info=True
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
images.append(
|
|
162
|
+
{
|
|
163
|
+
"type": "image_url",
|
|
164
|
+
"image_url": {
|
|
165
|
+
"url": cache_url,
|
|
166
|
+
},
|
|
167
|
+
}
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
chat_response = {
|
|
171
|
+
"choices": [{"message": {"role": "assistant", "content": self.default_content, "images": images}}],
|
|
172
|
+
"created": int(time.time()),
|
|
173
|
+
"usage": {
|
|
174
|
+
"prompt_tokens": 0,
|
|
175
|
+
"completion_tokens": 1_000_000, # Price per image is 0.015, so 1M token is 0.015
|
|
176
|
+
},
|
|
177
|
+
}
|
|
178
|
+
if "cost" in usage:
|
|
179
|
+
chat_response["cost"] = usage["cost"]
|
|
180
|
+
return ctx.log_json(chat_response)
|
|
181
|
+
|
|
182
|
+
ctx.add_provider(ZaiGenerator)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# System Prompts Extension
|
|
2
|
+
|
|
3
|
+
This extension configures AI requests with a library of **over 200+** awesome curated system prompts that can be selected from the UI.
|
|
4
|
+
|
|
5
|
+
## Custom System Prompts
|
|
6
|
+
|
|
7
|
+
You can also maintain your own library of system prompts which can be maintained for all anonymous users at:
|
|
8
|
+
`~/.llms/user/default/system-prompts.json`
|
|
9
|
+
|
|
10
|
+
Or for signed in users at:
|
|
11
|
+
`~/.llms/user/<github-user>/system-prompts.json`
|
|
12
|
+
|
|
13
|
+
The JSON file should contain an array of Prompt objects, e.g:
|
|
14
|
+
|
|
15
|
+
```json
|
|
16
|
+
[
|
|
17
|
+
{
|
|
18
|
+
"name": "Helpful Assistant",
|
|
19
|
+
"prompt": "You are a helpful assistant."
|
|
20
|
+
}
|
|
21
|
+
]
|
|
22
|
+
```
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from aiohttp import web
|
|
5
|
+
|
|
6
|
+
default_prompts = [
|
|
7
|
+
{"name": "Helpful Assistant", "prompt": "You are a helpful assistant."},
|
|
8
|
+
]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# runs after providers are configured but before server is run
|
|
12
|
+
def install(ctx):
|
|
13
|
+
# helper to get user or default prompts
|
|
14
|
+
def get_user_prompts(request):
|
|
15
|
+
candidate_paths = []
|
|
16
|
+
# check if user is signed in
|
|
17
|
+
username = ctx.get_username(request)
|
|
18
|
+
if username:
|
|
19
|
+
# if signed in (Github OAuth), return the prompts for this user if exists
|
|
20
|
+
candidate_paths.append(os.path.join(ctx.get_user_path(username), "system_prompts", "prompts.json"))
|
|
21
|
+
# return default prompts for all users if exists
|
|
22
|
+
candidate_paths.append(os.path.join(ctx.get_user_path(), "system_prompts", "prompts.json"))
|
|
23
|
+
# otherwise return the default prompts from this repo
|
|
24
|
+
candidate_paths.append(os.path.join(ctx.path, "ui", "prompts.json"))
|
|
25
|
+
|
|
26
|
+
# iterate all candidate paths and when exists return its json
|
|
27
|
+
for path in candidate_paths:
|
|
28
|
+
if os.path.exists(path):
|
|
29
|
+
with open(path, encoding="utf-8") as f:
|
|
30
|
+
txt = f.read()
|
|
31
|
+
return json.loads(txt)
|
|
32
|
+
return default_prompts
|
|
33
|
+
|
|
34
|
+
# API Handler to get prompts
|
|
35
|
+
async def get_prompts(request):
|
|
36
|
+
prompts_json = get_user_prompts(request)
|
|
37
|
+
return web.json_response(prompts_json)
|
|
38
|
+
|
|
39
|
+
ctx.add_get("prompts.json", get_prompts)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# register install extension handler
|
|
43
|
+
__install__ = install
|
|
44
|
+
|
|
45
|
+
__order__ = -10
|