llms-py 3.0.0b2__py3-none-any.whl → 3.0.0b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llms/__pycache__/main.cpython-314.pyc +0 -0
- llms/index.html +2 -1
- llms/llms.json +50 -17
- llms/main.py +484 -544
- llms/providers/__pycache__/anthropic.cpython-314.pyc +0 -0
- llms/providers/__pycache__/chutes.cpython-314.pyc +0 -0
- llms/providers/__pycache__/google.cpython-314.pyc +0 -0
- llms/providers/__pycache__/nvidia.cpython-314.pyc +0 -0
- llms/providers/__pycache__/openai.cpython-314.pyc +0 -0
- llms/providers/__pycache__/openrouter.cpython-314.pyc +0 -0
- llms/providers/anthropic.py +189 -0
- llms/providers/chutes.py +152 -0
- llms/providers/google.py +306 -0
- llms/providers/nvidia.py +107 -0
- llms/providers/openai.py +159 -0
- llms/providers/openrouter.py +70 -0
- llms/providers-extra.json +356 -0
- llms/providers.json +1 -1
- llms/ui/App.mjs +132 -60
- llms/ui/ai.mjs +76 -10
- llms/ui/app.css +65 -28
- llms/ui/ctx.mjs +196 -0
- llms/ui/index.mjs +75 -171
- llms/ui/lib/charts.mjs +9 -13
- llms/ui/markdown.mjs +6 -0
- llms/ui/{Analytics.mjs → modules/analytics.mjs} +76 -64
- llms/ui/{Main.mjs → modules/chat/ChatBody.mjs} +59 -135
- llms/ui/{SettingsDialog.mjs → modules/chat/SettingsDialog.mjs} +8 -8
- llms/ui/{ChatPrompt.mjs → modules/chat/index.mjs} +242 -46
- llms/ui/modules/layout.mjs +267 -0
- llms/ui/modules/model-selector.mjs +851 -0
- llms/ui/{Recents.mjs → modules/threads/Recents.mjs} +0 -2
- llms/ui/{Sidebar.mjs → modules/threads/index.mjs} +46 -44
- llms/ui/{threadStore.mjs → modules/threads/threadStore.mjs} +10 -7
- llms/ui/utils.mjs +82 -123
- {llms_py-3.0.0b2.dist-info → llms_py-3.0.0b4.dist-info}/METADATA +1 -1
- llms_py-3.0.0b4.dist-info/RECORD +65 -0
- llms/ui/Avatar.mjs +0 -86
- llms/ui/Brand.mjs +0 -52
- llms/ui/OAuthSignIn.mjs +0 -61
- llms/ui/ProviderIcon.mjs +0 -36
- llms/ui/ProviderStatus.mjs +0 -104
- llms/ui/SignIn.mjs +0 -65
- llms/ui/Welcome.mjs +0 -8
- llms/ui/model-selector.mjs +0 -686
- llms/ui.json +0 -1069
- llms_py-3.0.0b2.dist-info/RECORD +0 -58
- {llms_py-3.0.0b2.dist-info → llms_py-3.0.0b4.dist-info}/WHEEL +0 -0
- {llms_py-3.0.0b2.dist-info → llms_py-3.0.0b4.dist-info}/entry_points.txt +0 -0
- {llms_py-3.0.0b2.dist-info → llms_py-3.0.0b4.dist-info}/licenses/LICENSE +0 -0
- {llms_py-3.0.0b2.dist-info → llms_py-3.0.0b4.dist-info}/top_level.txt +0 -0
llms/providers/nvidia.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
|
|
4
|
+
import aiohttp
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def install(ctx):
|
|
8
|
+
from llms.main import GeneratorBase
|
|
9
|
+
|
|
10
|
+
class NvidiaGenAi(GeneratorBase):
|
|
11
|
+
sdk = "nvidia/image"
|
|
12
|
+
|
|
13
|
+
def __init__(self, **kwargs):
|
|
14
|
+
super().__init__(**kwargs)
|
|
15
|
+
self.width = int(kwargs.get("width", 1024))
|
|
16
|
+
self.height = int(kwargs.get("height", 1024))
|
|
17
|
+
self.cfg_scale = float(kwargs.get("cfg_scale", 3))
|
|
18
|
+
self.steps = int(kwargs.get("steps", 20))
|
|
19
|
+
self.mode = kwargs.get("mode", "base")
|
|
20
|
+
self.gen_url = kwargs.get("api", "https://ai.api.nvidia.com/v1/genai")
|
|
21
|
+
|
|
22
|
+
def to_response(self, response, chat, started_at):
|
|
23
|
+
if "artifacts" in response:
|
|
24
|
+
for artifact in response["artifacts"]:
|
|
25
|
+
base64 = artifact.get("base64")
|
|
26
|
+
seed = artifact.get("seed")
|
|
27
|
+
filename = f"{seed}.png"
|
|
28
|
+
if "model" in chat:
|
|
29
|
+
last_model = "/" in chat["model"] and chat["model"].split("/")[-1] or chat["model"]
|
|
30
|
+
filename = f"{last_model}_{seed}.png"
|
|
31
|
+
|
|
32
|
+
image_info = {
|
|
33
|
+
"seed": seed,
|
|
34
|
+
}
|
|
35
|
+
relative_url, info = ctx.save_image_to_cache(base64, filename, image_info)
|
|
36
|
+
return {
|
|
37
|
+
"choices": [
|
|
38
|
+
{
|
|
39
|
+
"message": {
|
|
40
|
+
"role": "assistant",
|
|
41
|
+
"content": self.default_content,
|
|
42
|
+
"images": [
|
|
43
|
+
{
|
|
44
|
+
"type": "image_url",
|
|
45
|
+
"image_url": {
|
|
46
|
+
"url": relative_url,
|
|
47
|
+
},
|
|
48
|
+
}
|
|
49
|
+
],
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
]
|
|
53
|
+
}
|
|
54
|
+
raise Exception("No artifacts in response")
|
|
55
|
+
|
|
56
|
+
async def chat(self, chat, provider=None):
|
|
57
|
+
headers = self.get_headers(provider, chat)
|
|
58
|
+
if provider is not None:
|
|
59
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
60
|
+
|
|
61
|
+
prompt = ctx.last_user_prompt(chat)
|
|
62
|
+
|
|
63
|
+
gen_request = {
|
|
64
|
+
"prompt": prompt,
|
|
65
|
+
}
|
|
66
|
+
modalities = chat.get("modalities", ["text"])
|
|
67
|
+
if "image" in modalities:
|
|
68
|
+
image_config = chat.get("image_config", {})
|
|
69
|
+
aspect_ratio = image_config.get("aspect_ratio")
|
|
70
|
+
if aspect_ratio:
|
|
71
|
+
dimension = ctx.app.aspect_ratios.get(aspect_ratio)
|
|
72
|
+
if dimension:
|
|
73
|
+
width, height = dimension.split("×")
|
|
74
|
+
gen_request["width"] = int(width)
|
|
75
|
+
gen_request["height"] = int(height)
|
|
76
|
+
else:
|
|
77
|
+
gen_request["width"] = self.width
|
|
78
|
+
gen_request["height"] = self.height
|
|
79
|
+
|
|
80
|
+
gen_request["mode"] = self.mode
|
|
81
|
+
gen_request["cfg_scale"] = self.cfg_scale
|
|
82
|
+
gen_request["steps"] = self.steps
|
|
83
|
+
|
|
84
|
+
gen_url = f"{self.gen_url}/{chat['model']}"
|
|
85
|
+
ctx.log(f"POST {gen_url}")
|
|
86
|
+
ctx.log(self.gen_summary(gen_request))
|
|
87
|
+
# remove metadata if any (conflicts with some providers, e.g. Z.ai)
|
|
88
|
+
gen_request.pop("metadata", None)
|
|
89
|
+
started_at = time.time()
|
|
90
|
+
|
|
91
|
+
if ctx.MOCK:
|
|
92
|
+
ctx.log("Mocking NvidiaGenAi")
|
|
93
|
+
text = ctx.text_from_file(f"{ctx.MOCK_DIR}/nvidia-image.json")
|
|
94
|
+
return self.to_response(json.loads(text), chat, started_at)
|
|
95
|
+
else:
|
|
96
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
97
|
+
gen_url,
|
|
98
|
+
headers=headers,
|
|
99
|
+
data=json.dumps(gen_request),
|
|
100
|
+
timeout=aiohttp.ClientTimeout(total=120),
|
|
101
|
+
) as response:
|
|
102
|
+
return self.to_response(await self.response_json(response), chat, started_at)
|
|
103
|
+
|
|
104
|
+
ctx.add_provider(NvidiaGenAi)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
__install__ = install
|
llms/providers/openai.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import json
|
|
3
|
+
import mimetypes
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import aiohttp
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def install(ctx):
|
|
10
|
+
from llms.main import GeneratorBase, OpenAiCompatible
|
|
11
|
+
|
|
12
|
+
class OpenAiProvider(OpenAiCompatible):
|
|
13
|
+
sdk = "@ai-sdk/openai"
|
|
14
|
+
|
|
15
|
+
def __init__(self, **kwargs):
|
|
16
|
+
if "api" not in kwargs:
|
|
17
|
+
kwargs["api"] = "https://api.openai.com/v1"
|
|
18
|
+
super().__init__(**kwargs)
|
|
19
|
+
self.modalities["image"] = OpenAiGenerator(**kwargs)
|
|
20
|
+
|
|
21
|
+
# https://platform.openai.com/docs/api-reference/images
|
|
22
|
+
class OpenAiGenerator(GeneratorBase):
|
|
23
|
+
sdk = "openai/image"
|
|
24
|
+
|
|
25
|
+
def __init__(self, **kwargs):
|
|
26
|
+
super().__init__(**kwargs)
|
|
27
|
+
self.api = "https://api.openai.com/v1/images/generations"
|
|
28
|
+
self.map_image_models = kwargs.get(
|
|
29
|
+
"map_image_models",
|
|
30
|
+
{
|
|
31
|
+
"gpt-5.1-codex-mini": "gpt-image-1-mini",
|
|
32
|
+
},
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
def aspect_ratio_to_size(self, aspect_ratio, model):
|
|
36
|
+
w, h = aspect_ratio.split(":")
|
|
37
|
+
width = int(w)
|
|
38
|
+
height = int(h)
|
|
39
|
+
if model == "dall-e-2":
|
|
40
|
+
return "1024x1024"
|
|
41
|
+
if model == "dall-e-3":
|
|
42
|
+
if width > height:
|
|
43
|
+
return "1792x1024"
|
|
44
|
+
elif height > width:
|
|
45
|
+
return "1024x1792"
|
|
46
|
+
if width > height:
|
|
47
|
+
return "1536x1024"
|
|
48
|
+
elif height > width:
|
|
49
|
+
return "1024x1536"
|
|
50
|
+
return "1024x1024"
|
|
51
|
+
|
|
52
|
+
async def to_response(self, response, chat, started_at):
|
|
53
|
+
# go through all image responses and save them to cache
|
|
54
|
+
# Try to extract and save images
|
|
55
|
+
images = []
|
|
56
|
+
if "data" in response:
|
|
57
|
+
for i, item in enumerate(response["data"]):
|
|
58
|
+
image_url = item.get("url")
|
|
59
|
+
b64_json = item.get("b64_json")
|
|
60
|
+
|
|
61
|
+
ext = "png"
|
|
62
|
+
image_data = None
|
|
63
|
+
|
|
64
|
+
if b64_json:
|
|
65
|
+
image_data = base64.b64decode(b64_json)
|
|
66
|
+
elif image_url:
|
|
67
|
+
ctx.log(f"GET {image_url}")
|
|
68
|
+
async with aiohttp.ClientSession() as session, await session.get(image_url) as res:
|
|
69
|
+
if res.status == 200:
|
|
70
|
+
image_data = await res.read()
|
|
71
|
+
content_type = res.headers.get("Content-Type")
|
|
72
|
+
if content_type:
|
|
73
|
+
ext = mimetypes.guess_extension(content_type)
|
|
74
|
+
if ext:
|
|
75
|
+
ext = ext.lstrip(".") # remove leading dot
|
|
76
|
+
# Fallback if guess_extension returns None or if we want to be safe
|
|
77
|
+
if not ext:
|
|
78
|
+
ext = "png"
|
|
79
|
+
else:
|
|
80
|
+
raise Exception(f"Failed to download image: {res.status}")
|
|
81
|
+
|
|
82
|
+
if image_data:
|
|
83
|
+
relative_url, info = ctx.save_image_to_cache(
|
|
84
|
+
image_data,
|
|
85
|
+
f"{chat['model']}-{i}.{ext}",
|
|
86
|
+
{
|
|
87
|
+
"model": chat["model"],
|
|
88
|
+
"prompt": ctx.last_user_prompt(chat),
|
|
89
|
+
},
|
|
90
|
+
)
|
|
91
|
+
images.append(
|
|
92
|
+
{
|
|
93
|
+
"type": "image_url",
|
|
94
|
+
"image_url": {
|
|
95
|
+
"url": relative_url,
|
|
96
|
+
},
|
|
97
|
+
}
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
raise Exception("No image data found")
|
|
101
|
+
|
|
102
|
+
return {
|
|
103
|
+
"choices": [
|
|
104
|
+
{
|
|
105
|
+
"message": {
|
|
106
|
+
"role": "assistant",
|
|
107
|
+
"content": self.default_content,
|
|
108
|
+
"images": images,
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
]
|
|
112
|
+
}
|
|
113
|
+
if "error" in response:
|
|
114
|
+
raise Exception(response["error"]["message"])
|
|
115
|
+
|
|
116
|
+
ctx.log(json.dumps(response, indent=2))
|
|
117
|
+
raise Exception("No 'data' field in response.")
|
|
118
|
+
|
|
119
|
+
async def chat(self, chat, provider=None):
|
|
120
|
+
headers = self.get_headers(provider, chat)
|
|
121
|
+
|
|
122
|
+
if chat["model"] in self.map_image_models:
|
|
123
|
+
chat["model"] = self.map_image_models[chat["model"]]
|
|
124
|
+
|
|
125
|
+
aspect_ratio = "1:1"
|
|
126
|
+
if "image_config" in chat and "aspect_ratio" in chat["image_config"]:
|
|
127
|
+
aspect_ratio = chat["image_config"].get("aspect_ratio", "1:1")
|
|
128
|
+
payload = {
|
|
129
|
+
"model": chat["model"],
|
|
130
|
+
"prompt": ctx.last_user_prompt(chat),
|
|
131
|
+
"size": self.aspect_ratio_to_size(aspect_ratio, chat["model"]),
|
|
132
|
+
}
|
|
133
|
+
if provider is not None:
|
|
134
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
135
|
+
|
|
136
|
+
started_at = time.time()
|
|
137
|
+
if ctx.MOCK:
|
|
138
|
+
print("Mocking OpenAiGenerator")
|
|
139
|
+
text = ctx.text_from_file(f"{ctx.MOCK_DIR}/openai-image.json")
|
|
140
|
+
return await self.to_response(json.loads(text), chat, started_at)
|
|
141
|
+
else:
|
|
142
|
+
ctx.log(f"POST {self.api}")
|
|
143
|
+
# _log(json.dumps(headers, indent=2))
|
|
144
|
+
ctx.log(json.dumps(payload, indent=2))
|
|
145
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
146
|
+
self.api, headers=headers, json=payload
|
|
147
|
+
) as response:
|
|
148
|
+
text = await response.text()
|
|
149
|
+
ctx.log(text[:1024] + (len(text) > 1024 and "..." or ""))
|
|
150
|
+
if response.status < 300:
|
|
151
|
+
return ctx.log_json(await self.to_response(json.loads(text), chat, started_at))
|
|
152
|
+
else:
|
|
153
|
+
raise Exception(f"Failed to generate image {response.status}")
|
|
154
|
+
|
|
155
|
+
ctx.add_provider(OpenAiProvider)
|
|
156
|
+
ctx.add_provider(OpenAiGenerator)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
__install__ = install
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
|
|
4
|
+
import aiohttp
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def install(ctx):
|
|
8
|
+
from llms.main import GeneratorBase
|
|
9
|
+
|
|
10
|
+
# https://openrouter.ai/docs/guides/overview/multimodal/image-generation
|
|
11
|
+
class OpenRouterGenerator(GeneratorBase):
|
|
12
|
+
sdk = "openrouter/image"
|
|
13
|
+
|
|
14
|
+
def __init__(self, **kwargs):
|
|
15
|
+
super().__init__(**kwargs)
|
|
16
|
+
|
|
17
|
+
def to_response(self, response, chat, started_at):
|
|
18
|
+
# go through all image responses and save them to cache
|
|
19
|
+
for choice in response["choices"]:
|
|
20
|
+
if "message" in choice and "images" in choice["message"]:
|
|
21
|
+
for image in choice["message"]["images"]:
|
|
22
|
+
if choice["message"]["content"] == "":
|
|
23
|
+
choice["message"]["content"] = self.default_content
|
|
24
|
+
if "image_url" in image:
|
|
25
|
+
data_uri = image["image_url"]["url"]
|
|
26
|
+
if data_uri.startswith("data:"):
|
|
27
|
+
parts = data_uri.split(",", 1)
|
|
28
|
+
ext = parts[0].split(";")[0].split("/")[1]
|
|
29
|
+
base64_data = parts[1]
|
|
30
|
+
model = chat["model"].split("/")[-1]
|
|
31
|
+
filename = f"{model}-{choice['index']}.{ext}"
|
|
32
|
+
info = {
|
|
33
|
+
"model": model,
|
|
34
|
+
"prompt": ctx.last_user_prompt(chat),
|
|
35
|
+
}
|
|
36
|
+
relative_url, info = ctx.save_image_to_cache(base64_data, filename, info)
|
|
37
|
+
image["image_url"]["url"] = relative_url
|
|
38
|
+
|
|
39
|
+
return response
|
|
40
|
+
|
|
41
|
+
async def chat(self, chat, provider=None):
|
|
42
|
+
headers = self.get_headers(provider, chat)
|
|
43
|
+
if provider is not None:
|
|
44
|
+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
|
|
45
|
+
|
|
46
|
+
started_at = time.time()
|
|
47
|
+
if ctx.MOCK:
|
|
48
|
+
print("Mocking OpenRouterGenerator")
|
|
49
|
+
text = ctx.text_from_file(f"{ctx.MOCK_DIR}/openrouter-image.json")
|
|
50
|
+
return ctx.log_json(self.to_response(json.loads(text), chat, started_at))
|
|
51
|
+
else:
|
|
52
|
+
chat_url = provider.chat_url
|
|
53
|
+
chat = await self.process_chat(chat, provider_id=self.id)
|
|
54
|
+
ctx.log(f"POST {chat_url}")
|
|
55
|
+
ctx.log(provider.chat_summary(chat))
|
|
56
|
+
# remove metadata if any (conflicts with some providers, e.g. Z.ai)
|
|
57
|
+
chat.pop("metadata", None)
|
|
58
|
+
|
|
59
|
+
async with aiohttp.ClientSession() as session, session.post(
|
|
60
|
+
chat_url,
|
|
61
|
+
headers=headers,
|
|
62
|
+
data=json.dumps(chat),
|
|
63
|
+
timeout=aiohttp.ClientTimeout(total=300),
|
|
64
|
+
) as response:
|
|
65
|
+
return ctx.log_json(self.to_response(await self.response_json(response), chat, started_at))
|
|
66
|
+
|
|
67
|
+
ctx.add_provider(OpenRouterGenerator)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
__install__ = install
|
|
@@ -0,0 +1,356 @@
|
|
|
1
|
+
{
|
|
2
|
+
"openrouter": {
|
|
3
|
+
"models": {
|
|
4
|
+
"google/gemini-2.5-flash-image": {
|
|
5
|
+
"name": "Gemini 2.5 Flash Image (Nano Banana)",
|
|
6
|
+
"modalities": {
|
|
7
|
+
"input": [
|
|
8
|
+
"text"
|
|
9
|
+
],
|
|
10
|
+
"output": [
|
|
11
|
+
"image"
|
|
12
|
+
]
|
|
13
|
+
},
|
|
14
|
+
"cost": {
|
|
15
|
+
"input": 0.30,
|
|
16
|
+
"output": 2.50
|
|
17
|
+
}
|
|
18
|
+
},
|
|
19
|
+
"google/gemini-2.5-flash-image-preview": {
|
|
20
|
+
"name": "Gemini 2.5 Flash Image Preview (Nano Banana)",
|
|
21
|
+
"modalities": {
|
|
22
|
+
"input": [
|
|
23
|
+
"text"
|
|
24
|
+
],
|
|
25
|
+
"output": [
|
|
26
|
+
"image"
|
|
27
|
+
]
|
|
28
|
+
},
|
|
29
|
+
"cost": {
|
|
30
|
+
"input": 0.30,
|
|
31
|
+
"output": 2.50
|
|
32
|
+
}
|
|
33
|
+
},
|
|
34
|
+
"google/gemini-3-pro-image-preview": {
|
|
35
|
+
"name": "Google: Nano Banana Pro Preview (Gemini 3 Pro)",
|
|
36
|
+
"modalities": {
|
|
37
|
+
"input": [
|
|
38
|
+
"text"
|
|
39
|
+
],
|
|
40
|
+
"output": [
|
|
41
|
+
"image"
|
|
42
|
+
]
|
|
43
|
+
},
|
|
44
|
+
"cost": {
|
|
45
|
+
"input": 2.0,
|
|
46
|
+
"output": 12.0
|
|
47
|
+
}
|
|
48
|
+
},
|
|
49
|
+
"sourceful/riverflow-v2-fast-preview": {
|
|
50
|
+
"name": "Sourceful: Riverflow V2 Fast Preview",
|
|
51
|
+
"modalities": {
|
|
52
|
+
"input": [
|
|
53
|
+
"text"
|
|
54
|
+
],
|
|
55
|
+
"output": [
|
|
56
|
+
"image"
|
|
57
|
+
]
|
|
58
|
+
},
|
|
59
|
+
"cost": {
|
|
60
|
+
"input": 0,
|
|
61
|
+
"output": 7.19
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
"sourceful/riverflow-v2-standard-preview": {
|
|
65
|
+
"name": "Sourceful: Riverflow V2 Standard Preview",
|
|
66
|
+
"modalities": {
|
|
67
|
+
"input": [
|
|
68
|
+
"text"
|
|
69
|
+
],
|
|
70
|
+
"output": [
|
|
71
|
+
"image"
|
|
72
|
+
]
|
|
73
|
+
},
|
|
74
|
+
"cost": {
|
|
75
|
+
"input": 0,
|
|
76
|
+
"output": 8.38
|
|
77
|
+
}
|
|
78
|
+
},
|
|
79
|
+
"sourceful/riverflow-v2-max-preview": {
|
|
80
|
+
"name": "Sourceful: Riverflow V2 Max Preview",
|
|
81
|
+
"modalities": {
|
|
82
|
+
"input": [
|
|
83
|
+
"text"
|
|
84
|
+
],
|
|
85
|
+
"output": [
|
|
86
|
+
"image"
|
|
87
|
+
]
|
|
88
|
+
},
|
|
89
|
+
"cost": {
|
|
90
|
+
"input": 0,
|
|
91
|
+
"output": 17.96
|
|
92
|
+
}
|
|
93
|
+
},
|
|
94
|
+
"openai/gpt-5-image-mini": {
|
|
95
|
+
"name": "OpenAI: GPT-5 Image Mini",
|
|
96
|
+
"modalities": {
|
|
97
|
+
"input": [
|
|
98
|
+
"text"
|
|
99
|
+
],
|
|
100
|
+
"output": [
|
|
101
|
+
"image"
|
|
102
|
+
]
|
|
103
|
+
},
|
|
104
|
+
"cost": {
|
|
105
|
+
"input": 2.50,
|
|
106
|
+
"output": 2
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
"openai/gpt-5-image": {
|
|
110
|
+
"name": "OpenAI: GPT-5 Image",
|
|
111
|
+
"modalities": {
|
|
112
|
+
"input": [
|
|
113
|
+
"text"
|
|
114
|
+
],
|
|
115
|
+
"output": [
|
|
116
|
+
"image"
|
|
117
|
+
]
|
|
118
|
+
},
|
|
119
|
+
"cost": {
|
|
120
|
+
"input": 10.0,
|
|
121
|
+
"output": 10.0
|
|
122
|
+
}
|
|
123
|
+
},
|
|
124
|
+
"black-forest-labs/flux.2-pro": {
|
|
125
|
+
"name": "Black Forest Labs: FLUX.2 Pro",
|
|
126
|
+
"modalities": {
|
|
127
|
+
"input": [
|
|
128
|
+
"text"
|
|
129
|
+
],
|
|
130
|
+
"output": [
|
|
131
|
+
"image"
|
|
132
|
+
]
|
|
133
|
+
},
|
|
134
|
+
"cost": {
|
|
135
|
+
"input": 3.66,
|
|
136
|
+
"output": 3.66
|
|
137
|
+
}
|
|
138
|
+
},
|
|
139
|
+
"black-forest-labs/flux.2-max": {
|
|
140
|
+
"name": "Black Forest Labs: FLUX.2 Max",
|
|
141
|
+
"modalities": {
|
|
142
|
+
"input": [
|
|
143
|
+
"text"
|
|
144
|
+
],
|
|
145
|
+
"output": [
|
|
146
|
+
"image"
|
|
147
|
+
]
|
|
148
|
+
},
|
|
149
|
+
"cost": {
|
|
150
|
+
"input": 7.32,
|
|
151
|
+
"output": 7.32
|
|
152
|
+
}
|
|
153
|
+
},
|
|
154
|
+
"black-forest-labs/flux.2-flex": {
|
|
155
|
+
"name": "Black Forest Labs: FLUX.2 Flex",
|
|
156
|
+
"modalities": {
|
|
157
|
+
"input": [
|
|
158
|
+
"text"
|
|
159
|
+
],
|
|
160
|
+
"output": [
|
|
161
|
+
"image"
|
|
162
|
+
]
|
|
163
|
+
},
|
|
164
|
+
"cost": {
|
|
165
|
+
"input": 14.64,
|
|
166
|
+
"output": 14.64
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
},
|
|
171
|
+
"openai": {
|
|
172
|
+
"models": {
|
|
173
|
+
"chatgpt-image-latest": {
|
|
174
|
+
"name": "ChatGPT Image Latest",
|
|
175
|
+
"modalities": {
|
|
176
|
+
"input": [
|
|
177
|
+
"text"
|
|
178
|
+
],
|
|
179
|
+
"output": [
|
|
180
|
+
"image"
|
|
181
|
+
]
|
|
182
|
+
},
|
|
183
|
+
"cost": {
|
|
184
|
+
"input": 5.0,
|
|
185
|
+
"output": 32.0
|
|
186
|
+
}
|
|
187
|
+
},
|
|
188
|
+
"gpt-image-1.5": {
|
|
189
|
+
"name": "GPT Image 1.5",
|
|
190
|
+
"modalities": {
|
|
191
|
+
"input": [
|
|
192
|
+
"text"
|
|
193
|
+
],
|
|
194
|
+
"output": [
|
|
195
|
+
"image"
|
|
196
|
+
]
|
|
197
|
+
},
|
|
198
|
+
"cost": {
|
|
199
|
+
"input": 5.0,
|
|
200
|
+
"output": 32.0
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
"gpt-image-1": {
|
|
204
|
+
"name": "GPT Image 1",
|
|
205
|
+
"modalities": {
|
|
206
|
+
"input": [
|
|
207
|
+
"text"
|
|
208
|
+
],
|
|
209
|
+
"output": [
|
|
210
|
+
"image"
|
|
211
|
+
]
|
|
212
|
+
},
|
|
213
|
+
"cost": {
|
|
214
|
+
"input": 5.0,
|
|
215
|
+
"output": 40.0
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
"gpt-image-1-mini": {
|
|
219
|
+
"name": "GPT Image 1 Mini",
|
|
220
|
+
"modalities": {
|
|
221
|
+
"input": [
|
|
222
|
+
"text"
|
|
223
|
+
],
|
|
224
|
+
"output": [
|
|
225
|
+
"image"
|
|
226
|
+
]
|
|
227
|
+
},
|
|
228
|
+
"cost": {
|
|
229
|
+
"input": 5.0,
|
|
230
|
+
"output": 8.0
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
},
|
|
235
|
+
"chutes": {
|
|
236
|
+
"models": {
|
|
237
|
+
"chutes-z-image-turbo": {
|
|
238
|
+
"name": "Z Image Turbo",
|
|
239
|
+
"modalities": {
|
|
240
|
+
"input": [
|
|
241
|
+
"text"
|
|
242
|
+
],
|
|
243
|
+
"output": [
|
|
244
|
+
"image"
|
|
245
|
+
]
|
|
246
|
+
}
|
|
247
|
+
},
|
|
248
|
+
"qwen-image": {
|
|
249
|
+
"modalities": {
|
|
250
|
+
"input": [
|
|
251
|
+
"text"
|
|
252
|
+
],
|
|
253
|
+
"output": [
|
|
254
|
+
"image"
|
|
255
|
+
]
|
|
256
|
+
}
|
|
257
|
+
},
|
|
258
|
+
"chutes-qwen-image-edit-2509": {
|
|
259
|
+
"name": "Qwen Image Edit 2509",
|
|
260
|
+
"modalities": {
|
|
261
|
+
"input": [
|
|
262
|
+
"text",
|
|
263
|
+
"image"
|
|
264
|
+
],
|
|
265
|
+
"output": [
|
|
266
|
+
"image"
|
|
267
|
+
]
|
|
268
|
+
}
|
|
269
|
+
},
|
|
270
|
+
"chutes-hidream": {
|
|
271
|
+
"name": "Hidream",
|
|
272
|
+
"modalities": {
|
|
273
|
+
"input": [
|
|
274
|
+
"text"
|
|
275
|
+
],
|
|
276
|
+
"output": [
|
|
277
|
+
"image"
|
|
278
|
+
]
|
|
279
|
+
}
|
|
280
|
+
},
|
|
281
|
+
"chutes-hunyuan-image-3": {
|
|
282
|
+
"name": "Hunyuan Image 3",
|
|
283
|
+
"modalities": {
|
|
284
|
+
"input": [
|
|
285
|
+
"text"
|
|
286
|
+
],
|
|
287
|
+
"output": [
|
|
288
|
+
"image"
|
|
289
|
+
]
|
|
290
|
+
}
|
|
291
|
+
},
|
|
292
|
+
"FLUX.1-schnell": {
|
|
293
|
+
"name": "FLUX.1 Schnell",
|
|
294
|
+
"modalities": {
|
|
295
|
+
"input": [
|
|
296
|
+
"text"
|
|
297
|
+
],
|
|
298
|
+
"output": [
|
|
299
|
+
"image"
|
|
300
|
+
]
|
|
301
|
+
}
|
|
302
|
+
},
|
|
303
|
+
"chroma": {
|
|
304
|
+
"modalities": {
|
|
305
|
+
"input": [
|
|
306
|
+
"text"
|
|
307
|
+
],
|
|
308
|
+
"output": [
|
|
309
|
+
"image"
|
|
310
|
+
]
|
|
311
|
+
}
|
|
312
|
+
},
|
|
313
|
+
"JuggernautXL-Ragnarok": {
|
|
314
|
+
"name": "JuggernautXL Ragnarok",
|
|
315
|
+
"modalities": {
|
|
316
|
+
"input": [
|
|
317
|
+
"text"
|
|
318
|
+
],
|
|
319
|
+
"output": [
|
|
320
|
+
"image"
|
|
321
|
+
]
|
|
322
|
+
}
|
|
323
|
+
},
|
|
324
|
+
"Animij": {
|
|
325
|
+
"modalities": {
|
|
326
|
+
"input": [
|
|
327
|
+
"text"
|
|
328
|
+
],
|
|
329
|
+
"output": [
|
|
330
|
+
"image"
|
|
331
|
+
]
|
|
332
|
+
}
|
|
333
|
+
},
|
|
334
|
+
"Illustrij": {
|
|
335
|
+
"modalities": {
|
|
336
|
+
"input": [
|
|
337
|
+
"text"
|
|
338
|
+
],
|
|
339
|
+
"output": [
|
|
340
|
+
"image"
|
|
341
|
+
]
|
|
342
|
+
}
|
|
343
|
+
},
|
|
344
|
+
"iLustMix": {
|
|
345
|
+
"modalities": {
|
|
346
|
+
"input": [
|
|
347
|
+
"text"
|
|
348
|
+
],
|
|
349
|
+
"output": [
|
|
350
|
+
"image"
|
|
351
|
+
]
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
}
|