MeUtils 2025.8.19.22.3.25__py3-none-any.whl → 2025.8.20.18.38.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,39 +14,41 @@ from openai import OpenAI
14
14
  from openai import OpenAI, APIStatusError
15
15
 
16
16
  client = OpenAI(
17
- base_url=os.getenv("FFIRE_BASE_URL"),
18
- api_key=os.getenv("FFIRE_API_KEY") #+"-29463"
19
- )
20
- #
21
- # for i in range(1):
22
- # try:
23
- # completion = client.chat.completions.create(
24
- # # model="kimi-k2-0711-preview",
25
- # # model="deepseek-reasoner",
26
- # # model="qwen3-235b-a22b-thinking-2507",
27
- # model="qwen3-235b-a22b-instruct-2507",
28
- #
29
- # messages=[
30
- # {"role": "user", "content": '你是谁'}
31
- # ],
32
- # # top_p=0.7,
33
- # top_p=None,
34
- # temperature=None,
35
- # # stream=True,
36
- # max_tokens=1000,
37
- # extra_body={"xx": "xxxxxxxx"}
38
- # )
39
- # print(completion)
40
- # except Exception as e:
41
- # print(e)
17
+ # base_url=os.getenv("FFIRE_BASE_URL"),
18
+ # api_key=os.getenv("FFIRE_API_KEY") #+"-29463"
42
19
 
43
- model = "doubao-embedding-text-240715"
20
+ base_url="http://127.0.0.1:8000/v1",
44
21
 
45
-
46
- r = client.embeddings.create(
47
- input='hi',
48
- model=model
22
+ api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgxNTc1Njh9.eihH3NVrzJCg9bdWb9mim9rGKTLKn1a66kW2Cqc0uPM"
49
23
  )
50
- print(r)
24
+ #
25
+ for i in range(1):
26
+ try:
27
+ completion = client.chat.completions.create(
28
+ # model="kimi-k2-0711-preview",
29
+ # model="deepseek-reasoner",
30
+ # model="qwen3-235b-a22b-thinking-2507",
31
+ # model="qwen3-235b-a22b-instruct-2507",
32
+ model="qwen-image",
51
33
 
34
+ messages=[
35
+ {"role": "user", "content": 'a cat'}
36
+ ],
37
+ # top_p=0.7,
38
+ top_p=None,
39
+ temperature=None,
40
+ # stream=True,
41
+ max_tokens=1000,
42
+ extra_body={"xx": "xxxxxxxx"}
43
+ )
44
+ print(completion)
45
+ except Exception as e:
46
+ print(e)
52
47
 
48
+ # model = "doubao-embedding-text-240715"
49
+ #
50
+ # r = client.embeddings.create(
51
+ # input='hi',
52
+ # model=model
53
+ # )
54
+ # print(r)
@@ -152,7 +152,9 @@ with timer('image'):
152
152
  # base_url="https://oneapi.chatfire.cn/v1",
153
153
 
154
154
  base_url=os.getenv("MODELSCOPE_BASE_URL"),
155
- api_key=os.getenv("MODELSCOPE_API_KEY")
155
+ api_key=os.getenv("MODELSCOPE_API_KEY"),
156
+
157
+ # default_query=
156
158
 
157
159
  )
158
160
 
@@ -209,7 +211,7 @@ with timer('image'):
209
211
  response = client.images.generate(
210
212
  model=model,
211
213
  prompt=prompt,
212
- # response_format="b64_json",
214
+ response_format="url",
213
215
  # extra_body={
214
216
  # "Style": "True",
215
217
  # "controls": {}, ######### 这个看起来更通用些
@@ -222,6 +224,10 @@ with timer('image'):
222
224
 
223
225
  # size="1700x1275",
224
226
 
227
+ extra_headers={"X-ModelScope-Async-Mode": "false"},
228
+
229
+
230
+
225
231
  extra_body={
226
232
  "extra_fields": {
227
233
 
@@ -7,11 +7,13 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
+ import httpx
10
11
 
11
12
  from meutils.pipe import *
12
13
  from meutils.decorators.retry import retrying
13
14
  from meutils.notice.feishu import send_message as _send_message
14
15
  from meutils.config_utils.lark_utils import get_next_token_for_polling
16
+ from meutils.apis.proxy.kdlapi import get_one_proxy
15
17
 
16
18
  from meutils.schemas.video_types import VideoRequest
17
19
  from meutils.schemas.image_types import ImageRequest, ImagesResponse
@@ -33,7 +35,12 @@ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?shee
33
35
  async def create_task(request: VideoRequest, api_key: Optional[str] = None):
34
36
  api_key = api_key or await get_next_token_for_polling(FEISHU_URL, from_redis=False)
35
37
 
36
- client = ZhipuAI(api_key=api_key) # 请填写您自己的APIKey
38
+ proxy = await get_one_proxy()
39
+ client = ZhipuAI(
40
+ api_key=api_key,
41
+ http_client=httpx.Client(proxy=proxy)
42
+
43
+ ) # 请填写您自己的APIKey
37
44
  response = client.videos.generations(
38
45
  model=request.model,
39
46
  prompt=request.prompt,
@@ -102,15 +109,15 @@ async def generate(request: ImageRequest, n: int = 30): # 兼容dalle3
102
109
 
103
110
  # VideoResult
104
111
  if __name__ == '__main__':
105
- api_key = "a7eb02770d1a41be95ba0b82f651e761.gHEXdCbRLadIwKtI"
112
+ api_key = "f0a6177640364388a6d5d82428ee93b7.2woWPO0QdF3aWbVZ"
106
113
 
107
- ""
114
+ # api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
108
115
  # api_key = "7d10426c06afa81e8d7401d97781249c.DbqlSsicRtaUdKXI" # 新号
109
116
  # api_key = "e21bd630f681c4d90b390cd609720483.WSFVgA3Kk1wNCX0mN"
110
117
 
111
118
  request = VideoRequest(
112
119
  # model='cogvideox-flash',
113
- model='cogvideox'
120
+ model='cogvideox-3'
114
121
 
115
122
  )
116
123
  r = arun(create_task(request, api_key=api_key))
@@ -119,4 +126,3 @@ if __name__ == '__main__':
119
126
  # request = ImageRequest(prompt="https://oss.ffire.cc/files/kling_watermark.png 让这个女人笑起来")
120
127
  #
121
128
  # arun(generate(request, n=30))
122
-
@@ -18,6 +18,7 @@ from meutils.apis.fal.images import generate as fal_generate
18
18
 
19
19
  from meutils.apis.gitee.image_to_3d import generate as image_to_3d_generate
20
20
  from meutils.apis.gitee.openai_images import generate as gitee_images_generate
21
+ from meutils.apis.qwen.chat import Completions as QwenCompletions
21
22
 
22
23
 
23
24
  async def generate(
@@ -38,6 +39,11 @@ async def generate(
38
39
  if request.model in {"Qwen-Image", "FLUX_1-Krea-dev"}:
39
40
  return await gitee_images_generate(request, api_key)
40
41
 
42
+ if request.model.startswith("qwen-image"):
43
+ return await QwenCompletions(api_key=api_key).generate(request)
44
+
45
+
46
+
41
47
 
42
48
  # "flux.1-krea-dev"
43
49
 
meutils/apis/models.py CHANGED
@@ -38,10 +38,15 @@ def make_billing_model(model: str, request: dict):
38
38
 
39
39
  elif _model.startswith(("minimax")):
40
40
  # MiniMax-Hailuo-02 T2V-01-Director I2V-01-Director S2V-01 I2V-01-live I2V-01 T2V-01
41
+
41
42
  duration = request.get("duration", 6)
42
43
  resolution = request.get("resolution", "720P")
43
44
  model = request.get("model", "").lower()
44
45
 
46
+ if "01" in model:
47
+ duration = 6
48
+ resolution = "720P"
49
+
45
50
  if model.startswith("minimax"): # 02
46
51
  resolution = request.get("resolution", "768P")
47
52
 
@@ -62,6 +62,6 @@ if __name__ == '__main__':
62
62
  loop.run_until_complete(asyncio.wait(tasks))
63
63
 
64
64
 
65
- run()
65
+ # run()
66
66
 
67
- # arun(get_one_proxy())
67
+ arun(get_one_proxy())
meutils/apis/qwen/chat.py CHANGED
@@ -13,15 +13,16 @@ from openai import AsyncOpenAI, OpenAI, AsyncStream
13
13
 
14
14
  from meutils.pipe import *
15
15
  from meutils.decorators.retry import retrying
16
- # from meutils.oss.ali_oss import qwenai_upload
17
- from meutils.io.files_utils import to_bytes, guess_mime_type
16
+ from meutils.oss.ali_oss import qwenai_upload
17
+ from meutils.io.files_utils import to_bytes, guess_mime_type, to_url
18
18
  from meutils.caches import rcache
19
19
 
20
20
  from meutils.llm.openai_utils import to_openai_params, create_chat_completion_chunk, token_encoder
21
21
 
22
22
  from meutils.config_utils.lark_utils import get_next_token_for_polling
23
23
  from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage, \
24
- ChatCompletion
24
+ ChatCompletion, Choice, ChatCompletionMessage
25
+ from meutils.schemas.image_types import ImageRequest, ImagesResponse
25
26
 
26
27
  FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
27
28
 
@@ -46,8 +47,43 @@ class Completions(object):
46
47
  def __init__(self, api_key: Optional[str] = None):
47
48
  self.api_key = api_key
48
49
 
49
- async def create(self, request: CompletionRequest, api_key: Optional[str] = None, cookie: Optional[str] = None):
50
- api_key = api_key or await get_next_token_for_polling(FEISHU_URL)
50
+ async def generate(self, request: ImageRequest, **kwargs):
51
+
52
+ if request.image and not request.image.startswith("http"):
53
+ request.image = await to_url(request.image, content_type="image/png")
54
+
55
+ _ = CompletionRequest(
56
+ model="qwen-image",
57
+ stream=True,
58
+ messages=[
59
+ {
60
+ "role": "user",
61
+ "content": [
62
+ {"type": "text", "text": request.prompt},
63
+
64
+ {
65
+ "type": "image_url",
66
+ "image_url": {
67
+ "url": request.image
68
+ }
69
+ }
70
+
71
+ ],
72
+
73
+ }
74
+ ],
75
+ size=request.aspect_ratio
76
+ )
77
+
78
+ async for chunk in await self.create(_):
79
+ logger.debug(chunk)
80
+ if chunk.choices and (url := chunk.choices[0].delta.content):
81
+ return ImagesResponse(data=[{"url": url}])
82
+
83
+ raise Exception("qwen-image 生成失败, 请重试")
84
+
85
+ async def create(self, request: CompletionRequest, cookie: Optional[str] = None):
86
+ api_key = self.api_key or await get_next_token_for_polling(FEISHU_URL)
51
87
 
52
88
  self.client = AsyncOpenAI(
53
89
  base_url=base_url,
@@ -60,33 +96,28 @@ class Completions(object):
60
96
 
61
97
  chat_id = await self.create_new_chat()
62
98
 
99
+ # request.last_user_content
100
+ # request.last_urls.get("image_url", [])
63
101
 
64
102
  payload = {
65
103
  "chat_id": chat_id,
104
+ "stream": request.stream,
66
105
  "incremental_output": True,
67
106
  "chat_mode": "normal",
68
107
  "model": "qwen3-235b-a22b",
69
108
  "messages": [
70
109
  {
71
110
  "role": "user",
72
- "content": "这只熊拿着五彩画板和画笔,站在画板前画画。",
111
+ "content": request.last_user_content,
73
112
 
74
- "user_action": "recommendation",
75
- "files": [
76
- {
77
- "type": "image",
78
- "name": "example.png",
79
- "file_type": "image",
80
- "showType": "image",
81
- "file_class": "vision",
82
- "url": "https://img.alicdn.com/imgextra/i2/O1CN0137EBmZ276dnmyY0kx_!!6000000007748-2-tps-1024-1024.png"
83
- }
84
- ],
113
+ "user_action": "chat",
114
+ "files": [],
85
115
  "models": [
86
116
  "qwen3-235b-a22b"
87
117
  ],
88
- # "chat_type": "t2t",
89
- "chat_type": "image_edit",
118
+ "chat_type": "t2t",
119
+ # "chat_type": "t2i",
120
+ # "chat_type": "image_edit",
90
121
 
91
122
  "feature_config": {
92
123
  "thinking_enabled": request.enable_thinking or False,
@@ -98,20 +129,45 @@ class Completions(object):
98
129
  }
99
130
  }
100
131
  }
101
- ]
132
+ ],
133
+ "size": request.size if hasattr(request, "size") else "1:1"
102
134
  }
103
135
 
104
- payload = {**request.model_dump(), **payload}
136
+ if request.model.startswith("qwen-image"):
137
+ payload["messages"][0]["chat_type"] = "t2i"
138
+ if image_urls := request.last_urls.get("image_url"):
139
+ payload["messages"][0]["chat_type"] = "image_edit"
140
+ payload["messages"][0]["files"] = [
141
+ {
142
+ "type": "image",
143
+ "name": "example.png",
144
+ "file_type": "image",
145
+ "showType": "image",
146
+ "file_class": "vision",
147
+ "url": image_urls[-1] # todo 阿里对象存储
148
+ }
149
+ ]
105
150
 
106
151
  data = to_openai_params(payload)
152
+ logger.debug(bjson(data))
107
153
  response = await self.client.chat.completions.create(**data, extra_query={"chat_id": chat_id})
108
154
  # response = self.do_response(response)
109
155
 
110
- if isinstance(response, AsyncStream):
111
- async for i in response:
112
- print(i)
156
+ if isinstance(response, AsyncStream): # image
157
+ # async def gen():
158
+ # async for chunk in response:
159
+ # if url := chunk.choices[0].delta.content:
160
+ # yield f"[]{url}"
161
+
162
+ return response
113
163
 
114
164
  else:
165
+ logger.debug(response)
166
+
167
+ if hasattr(response, "data") and (choices := response.data.get("choices")):
168
+ response = response.model_construct(choices=choices)
169
+ logger.debug(response)
170
+
115
171
  prompt_tokens = len(token_encoder.encode(str(request.messages)))
116
172
  completion_tokens = len(token_encoder.encode(str(response.choices[0].message.content)))
117
173
  usage = {
@@ -120,12 +176,9 @@ class Completions(object):
120
176
  "total_tokens": prompt_tokens + completion_tokens
121
177
  }
122
178
  response.usage = usage
123
- print(response)
124
-
125
- # return response
179
+ return response
126
180
 
127
181
  async def create_new_chat(self):
128
-
129
182
  payload = {
130
183
  "title": "新建对话",
131
184
  "models": [DEFAUL_MODEL],
@@ -141,20 +194,40 @@ class Completions(object):
141
194
  if __name__ == '__main__':
142
195
  token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgxNTc1Njh9.eihH3NVrzJCg9bdWb9mim9rGKTLKn1a66kW2Cqc0uPM"
143
196
  request = CompletionRequest(
144
- model="qwen3-235b-a22b",
197
+ # model="qwen3-235b-a22b",
198
+ model="qwen-image",
199
+
145
200
  messages=[
146
201
 
147
202
  {
148
203
  "role": "user",
149
- # "content": [{"type": "text", "text": "周杰伦"}],
150
- "content": "这只熊拿着五彩画板和画笔,站在画板前画画。",
204
+ "content": [
205
+ {"type": "text", "text": "带个墨镜"},
206
+
207
+ {
208
+ "type": "image_url",
209
+ "image_url": {
210
+ "url": "https://oss.ffire.cc/files/kling_watermark.png"
211
+ }
212
+ }
213
+
214
+ ],
151
215
 
152
216
  }
153
217
  ],
154
218
  stream=True,
155
219
 
156
- enable_thinking=True
220
+ enable_thinking=True,
221
+
222
+ size="16:9"
157
223
 
158
224
  )
159
225
 
160
- arun(Completions().create(request, token))
226
+ # arun(Completions(api_key=token).create(request))
227
+
228
+ request = ImageRequest(
229
+ model="qwen-image",
230
+ prompt="带个墨镜",
231
+ image = "https://oss.ffire.cc/files/kling_watermark.png"
232
+ )
233
+ arun(Completions(api_key=token).generate(request))
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.08.19.22.03.25
1
+ 2025.08.20.18.38.45
@@ -286,7 +286,7 @@ async def check_token_for_zhipu(api_key, threshold: float = 1, resource_package_
286
286
  try:
287
287
  client = AsyncOpenAI(base_url="https://bigmodel.cn/api/biz/tokenAccounts/list", api_key=api_key)
288
288
  data = await client.get("", cast_to=object)
289
- logger.debug(bjson(data))
289
+ # logger.debug(bjson(data))
290
290
 
291
291
  if resource_package_name:
292
292
  # print(str(data))
@@ -298,8 +298,8 @@ async def check_token_for_zhipu(api_key, threshold: float = 1, resource_package_
298
298
  # "resourcePackageName": "【新用户专享】200万GLM-Z1-Air推理资源包",
299
299
  # "resourcePackageName": "【新用户专享】30次Vidu系列视频生成次包",
300
300
  for d in data["rows"]:
301
- if resource_package_name.lower() in d["resourcePackageName"].lower():
302
- # logger.debug(bjson(d))
301
+ if resource_package_name.lower() in d["resourcePackageName"].lower() and d['status'] != "EXPIRED":
302
+ logger.debug(bjson(d))
303
303
  return d["tokenBalance"] >= threshold
304
304
  else:
305
305
  logger.debug(bjson(data))
@@ -7,12 +7,13 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
- import json
10
+ from aiostream import stream
11
11
 
12
12
  from meutils.pipe import *
13
13
  from meutils.io.files_utils import to_url, to_url_fal
14
14
  from meutils.str_utils.json_utils import json_path
15
- from meutils.schemas.openai_types import CompletionRequest
15
+ from meutils.llm.openai_utils import create_chat_completion
16
+ from meutils.schemas.openai_types import CompletionRequest, ChatCompletion
16
17
  from meutils.schemas.image_types import ImageRequest
17
18
  from meutils.llm.openai_utils import chat_completion, chat_completion_chunk, create_chat_completion_chunk
18
19
 
@@ -39,14 +40,31 @@ async def chat_for_image(
39
40
  chat_completion.choices[0].message.content = "请设置`stream=True`"
40
41
  return chat_completion
41
42
 
43
+ # request.stream = True # 流转非流
44
+ # response = await chat_for_image(generate, request, api_key)
45
+ # chunks = await stream.list(response)
46
+ #
47
+ # logger.debug(chunks)
48
+ #
49
+ # if chunks and isinstance(chunks[0], ChatCompletion):
50
+ # response = chunks[0]
51
+ # else:
52
+ # response = create_chat_completion(chunks)
53
+ # return response
54
+ image = None
42
55
  prompt = request.last_user_content
43
56
  if request.last_urls: # image_url
44
- urls = await to_url_fal(request.last_urls["image_url"], content_type="image/png")
45
- prompt = "\n".join(urls + [prompt])
57
+ if request.model.startswith('fal'):
58
+ urls = await to_url_fal(request.last_urls["image_url"], content_type="image/png") # 国外友好
59
+ prompt = "\n".join(urls + [prompt])
60
+ else:
61
+ urls = await to_url(request.last_urls["image_url"], content_type="image/png")
62
+ image = urls[-1]
46
63
 
47
64
  request = ImageRequest(
48
65
  model=request.model,
49
66
  prompt=prompt,
67
+ image=image
50
68
  )
51
69
 
52
70
  future_task = asyncio.create_task(generate(request)) # 异步执行
@@ -65,7 +83,6 @@ async def chat_for_image(
65
83
  return chunks
66
84
 
67
85
 
68
-
69
86
  async def chat_for_video(
70
87
  get_task: Callable, # response
71
88
  taskid: str,
@@ -100,7 +117,7 @@ async def chat_for_video(
100
117
  else:
101
118
  yield "🚀"
102
119
 
103
- chunks = create_chat_completion_chunk(gen())
120
+ chunks = create_chat_completion_chunk(gen(), chat_id=taskid)
104
121
  return chunks
105
122
 
106
123
 
@@ -36,6 +36,8 @@ def calculate_min_resolution(w, h):
36
36
 
37
37
 
38
38
  def size2aspect_ratio(size):
39
+ if not size: return "1:1"
40
+
39
41
  if 'x' in size:
40
42
  w, h = size.split('x')
41
43
  w, h = calculate_min_resolution(w, h)
@@ -49,3 +51,4 @@ if __name__ == '__main__':
49
51
  print(size2aspect_ratio("1920x1080"))
50
52
  print(size2aspect_ratio("1920:1080"))
51
53
  print(size2aspect_ratio("1024x1024"))
54
+ print(size2aspect_ratio("16:9"))
meutils/oss/ali_oss.py CHANGED
@@ -11,6 +11,7 @@
11
11
  import oss2
12
12
 
13
13
  from meutils.pipe import *
14
+ from meutils.caches import rcache
14
15
  from meutils.io.files_utils import guess_mime_type
15
16
  from meutils.config_utils.lark_utils import get_next_token_for_polling
16
17
 
@@ -32,11 +33,10 @@ url = "https://chat.qwen.ai/api/v1/files/getstsToken"
32
33
 
33
34
 
34
35
  async def get_sts_token(filename, filetype: Optional[str] = None, token: Optional[str] = None):
35
- token = token or await get_next_token_for_polling(feishu_url=FEISHU_URL, from_redis=True)
36
+ token = token or await get_next_token_for_polling(feishu_url=FEISHU_URL)
36
37
 
37
38
  filetype = filetype or guess_mime_type(filename).split('/')[0]
38
39
 
39
-
40
40
  payload = {
41
41
  "filename": filename,
42
42
  "filetype": filetype, # file video audio
@@ -52,8 +52,8 @@ async def get_sts_token(filename, filetype: Optional[str] = None, token: Optiona
52
52
  return response.json()
53
53
 
54
54
 
55
- async def qwenai_upload(file, filetype: Optional[str] = None): # todo: 自动猜测类型
56
- params = await get_sts_token(file_name, filetype)
55
+ async def qwenai_upload(file, filetype: Optional[str] = None, token: Optional[str] = None): # todo: 自动猜测类型
56
+ params = await get_sts_token(file_name, filetype, token)
57
57
 
58
58
  access_key_id = params['access_key_id']
59
59
  access_key_secret = params['access_key_secret']
@@ -87,4 +87,3 @@ if __name__ == '__main__':
87
87
 
88
88
  # arun(get_sts_token(file_name))
89
89
  file_url = arun(qwenai_upload(file_name))
90
-
@@ -108,7 +108,7 @@ class ImageRequest(BaseModel): # openai
108
108
  guidance: Optional[float] = None
109
109
  steps: Optional[int] = None
110
110
 
111
- controls: Optional[dict] = None # 额外参数
111
+ controls: Optional[dict] = None # 额外参数
112
112
 
113
113
  safety_tolerance: Optional[int] = None
114
114
 
@@ -116,6 +116,9 @@ class ImageRequest(BaseModel): # openai
116
116
 
117
117
  user: Optional[str] = None # to_url_fal
118
118
 
119
+ image: Optional[str] = None # url b64
120
+ watermark: Optional[bool] = None
121
+
119
122
  def __init__(self, /, **data: Any):
120
123
  super().__init__(**data)
121
124
 
@@ -618,4 +621,4 @@ if __name__ == '__main__':
618
621
 
619
622
  r = ImageRequest(**data)
620
623
 
621
- print(ImagesResponse())
624
+ # print(ImagesResponse())