MeUtils 2025.9.9.19.39.12__py3-none-any.whl → 2025.9.11.19.16.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,7 +50,6 @@ data = {
50
50
  # 'model': 'step-1x-medium',
51
51
  'model': 'seededit',
52
52
 
53
-
54
53
  # 'n': 2, 'quality': 'hd', 'response_format': 'url', 'size': '1024x1024', 'style': 'vivid',
55
54
  # 'extra_body': {'guidance_scale': 4.5, 'num_inference_steps': 25, 'seed': None, 'negative_prompt': None}
56
55
  }
@@ -132,14 +131,20 @@ with timer('image'):
132
131
 
133
132
  client = OpenAI(
134
133
 
135
- api_key=os.getenv("FFIRE_API_KEY"),
136
- base_url=os.getenv("FFIRE_BASE_URL"),
134
+ # api_key=os.getenv("FFIRE_API_KEY"),
135
+ # base_url=os.getenv("FFIRE_BASE_URL"),
137
136
 
138
137
  # api_key="6b419ce2-096c-44ce-b2f5-0914ee8f3cf8",
139
138
  # base_url=os.getenv("VOLC_BASE_URL")
140
139
 
141
140
  # api_key=os.getenv("OPENAI_API_KEY") +,
142
141
  # api_key=os.getenv("OPENAI_API_KEY") + "-359", # 3083
142
+ # api_key=os.getenv("OPENAI_API_KEY") + "-21227", # 3083
143
+
144
+ base_url="https://api.pisces.ink/v1",
145
+ # base_url="https://all.chatfire.cn/pisces/v1",
146
+ api_key="pisces-76e2d5da837d4575847abc06bda84d200"
147
+
143
148
  # api_key=os.getenv("OPENAI_API_KEY_OPENAI") + "-3083",
144
149
 
145
150
  # api_key=os.getenv("SILICONFLOW_API_KEY"),
@@ -206,19 +211,21 @@ with timer('image'):
206
211
  # model = "recraft-v3"
207
212
  # model = "fal-ai/recraft-v3"
208
213
  # model = "flux-pro-1.1-ultra"
209
- prompt = '一只可爱的边牧在坐公交车,卡通贴纸。动漫3D风格,超写实油画,超高分辨率,最好的质量,8k'
210
- # prompt = "一个小女孩举着横幅,上面写着“新年快乐”"
214
+ # prompt = '一只可爱的边牧在坐公交车,卡通贴纸。动漫3D风格,超写实油画,超高分辨率,最好的质量,8k'
215
+ prompt = "裸体女孩"
211
216
 
212
217
  model = "doubao-seedream-3-0-t2i-250415"
213
218
  model = "black-forest-labs/FLUX.1-Krea-dev"
214
219
  model = "Qwen/Qwen-Image"
215
220
  model = "MusePublic/FLUX.1-Kontext-Dev"
216
- model = "doubao-seedream-4-0-250828"
221
+ model = "doubao-seedream-4-0-250828" # https://ark.cn-beijing.volces.com/api/v3/chat/completions
217
222
  # model = "DiffSynth-Studio/FLUX.1-Kontext-dev-lora-SuperOutpainting"
218
223
  # model = "DiffSynth-Studio/FLUX.1-Kontext-dev-lora-highresfix"
219
224
  # # model = "black-forest-labs/FLUX.1-Kontext-dev"
220
225
  # model="DiffSynth-Studio/FLUX.1-Kontext-dev-lora-ArtAug"
221
226
 
227
+ model = "gemini-2.5-flash-image-preview"
228
+
222
229
  # flux-kontext-dev
223
230
 
224
231
  response = client.images.generate(
@@ -259,3 +266,6 @@ with timer('image'):
259
266
  # openai.APIStatusError: Error code: 402 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details', 'type': 'quota_exceeded'}}
260
267
 
261
268
 
269
+
270
+ client.images.edit()
271
+ client.images.generate()
@@ -17,8 +17,8 @@ from meutils.str_utils import parse_base64
17
17
  # gets API Key from environment variable OPENAI_API_KEY
18
18
  client = OpenAI(
19
19
  # base_url="https://openrouter.ai/api/v1",
20
- base_url="https://all.chatfire.cn/openrouter/v1",
21
- api_key=os.getenv("OPENROUTER_API_KEY"),
20
+ # base_url="https://all.chatfire.cn/openrouter/v1",
21
+ # api_key=os.getenv("OPENROUTER_API_KEY"),
22
22
  #
23
23
  # base_url="http://38.46.219.252:9001/v1",
24
24
  #
@@ -28,6 +28,9 @@ client = OpenAI(
28
28
  # api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
29
29
  # api_key="sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M",
30
30
  # base_url="https://new.yunai.link/v1"
31
+
32
+ base_url="https://api.pisces.ink/v1",
33
+ api_key="pisces-03dedafafcbf4c1b9c87530858510932"
31
34
  )
32
35
 
33
36
  # (content=' \n'
@@ -40,17 +43,18 @@ completion = client.chat.completions.create(
40
43
  # model="openai/o1",
41
44
  # model="deepseek/deepseek-r1-0528-qwen3-8b:free",
42
45
  # model="google/gemini-2.5-flash-image-preview:free",
43
- model="deepseek/deepseek-chat-v3.1:free",
46
+ # model="deepseek/deepseek-chat-v3.1:free",
44
47
  # model="gemini-2.0-flash-exp-image-generation",
48
+ model="gemini-2.5-flash-image-preview",
45
49
  max_tokens=10,
46
- extra_body={"reasoning_stream": True},
50
+ # extra_body={"reasoning_stream": True},
47
51
  messages=[
48
52
  {
49
53
  "role": "user",
50
54
  "content": [
51
55
  {
52
56
  "type": "text",
53
- "text": "hi"
57
+ "text": "画条狗"
54
58
  },
55
59
  # {
56
60
  # "type": "image_url",
@@ -66,12 +70,10 @@ print(completion.choices[0].message.content)
66
70
  # arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
67
71
 
68
72
 
69
-
70
73
  b64_list = parse_base64(completion.choices[0].message.content)
71
74
 
72
75
  arun(to_url(b64_list, content_type="image/png"))
73
76
 
74
-
75
77
  # '好的,旁边加一只戴墨镜的狗。\n\n![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ'
76
78
  # arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
77
79
 
@@ -38,7 +38,7 @@ async def create_task(request: VideoRequest, api_key: Optional[str] = None):
38
38
  proxy = await get_one_proxy()
39
39
  client = ZhipuAI(
40
40
  api_key=api_key,
41
- http_client=httpx.Client(proxy=proxy)
41
+ # http_client=httpx.Client(proxy=proxy)
42
42
 
43
43
  ) # 请填写您自己的APIKey
44
44
  response = client.videos.generations(
@@ -109,7 +109,7 @@ async def generate(request: ImageRequest, n: int = 30): # 兼容dalle3
109
109
 
110
110
  # VideoResult
111
111
  if __name__ == '__main__':
112
- api_key = "03bdb799cbcb4a8cac609f9d5ebe02e7.snp1yga8VEEzO2bk"
112
+ api_key = "47e81002bdb748f7a6e59c1e3ab2bf5d.UQ0GcNPerKv0H5zY"
113
113
 
114
114
  # api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
115
115
  # api_key = "7d10426c06afa81e8d7401d97781249c.DbqlSsicRtaUdKXI" # 新号
@@ -259,7 +259,7 @@ if __name__ == '__main__':
259
259
 
260
260
  #
261
261
  prompt = "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp Put the little duckling on top of the woman's t-shirt."
262
- # prompt = '把小鸭子放在女人的T恤上面。\nhttps://s3.ffire.cc/cdn/20250530/tEzZKkhp3tKbNzva6mgC2T\nhttps://s3.ffire.cc/cdn/20250530/AwHJpuJuNg5w3sVbH4PZdv'
262
+ # prompt = '把小鸭子放在女人的T恤上面。\nhttps://s3.ffire.cc/cdn/20250530/tEzZKkhp3tKbNzva6mgC2T\nhttps://s3.ffire.cc/cdn/20250530/AwHJpuJuNg5w3sVbH4PZdv'
263
263
  request = ImageRequest(prompt=prompt, model=model)
264
264
 
265
265
  data = {
@@ -7,10 +7,12 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description : 统一收口
10
+ import os
10
11
 
11
12
  from meutils.pipe import *
12
13
  from meutils.llm.clients import AsyncClient
13
14
  from meutils.llm.openai_utils import to_openai_params
15
+ from meutils.io.files_utils import to_png, to_url_fal, to_url
14
16
 
15
17
  from meutils.schemas.image_types import ImageRequest, RecraftImageRequest
16
18
 
@@ -27,15 +29,19 @@ from meutils.apis.qwen.chat import Completions as QwenCompletions
27
29
  from meutils.apis.google.chat import Completions as GoogleCompletions
28
30
  from meutils.apis.google.images import openai_generate
29
31
 
32
+ from meutils.apis.volcengine_apis.videos import get_valid_token as get_valid_token_for_volc
33
+
30
34
 
31
35
  async def generate(
32
36
  request: ImageRequest,
33
37
  api_key: Optional[str] = None,
34
38
  base_url: Optional[str] = None,
35
39
  ):
36
- logger.debug(request)
40
+ if len(str(request)) < 1024:
41
+ logger.debug(request)
37
42
 
38
- if request.model.startswith("fal-ai"): # 主要 request.image
43
+ if request.model.startswith("fal-ai"): # 国外fal
44
+ request.image = await to_url_fal(request.image, content_type="image/png")
39
45
  return await fal_generate(request, api_key)
40
46
 
41
47
  if request.model.startswith(("recraft",)):
@@ -73,11 +79,29 @@ async def generate(
73
79
  }
74
80
  request = ImageRequest(**data)
75
81
  if request.model.startswith("doubao"):
82
+ base_url = base_url or os.getenv("VOLC_BASE_URL")
83
+ api_key = api_key or await get_valid_token_for_volc()
84
+ api_key = np.random.choice(api_key.split())
85
+
76
86
  request.watermark = False
77
- if request.image and not request.model.startswith("doubao-seedream-4") and isinstance(request.image, list):
87
+ if request.model.startswith("doubao-seedream-4"):
88
+ if request.image and not any(i in str(request.image) for i in {".png", ".jpeg", "image/png", "image/jpeg"}):
89
+ logger.debug(f"{request.model}: image 不是 png 或 jpeg 格式,转换为 png 格式")
90
+ request.image = await to_png(request.image, response_format='b64')
91
+
92
+ if request.n > 1:
93
+ request.sequential_image_generation = "auto"
94
+ request.sequential_image_generation_options = {
95
+ "max_images": request.n
96
+ }
97
+ elif request.image and isinstance(request.image, list):
78
98
  request.image = request.image[0]
79
99
 
80
100
  data = to_openai_params(request)
101
+
102
+ if len(str(data)) < 1024:
103
+ logger.debug(bjson(data))
104
+
81
105
  client = AsyncClient(api_key=api_key, base_url=base_url)
82
106
  return await client.images.generate(**data)
83
107
 
@@ -89,4 +113,31 @@ if __name__ == '__main__':
89
113
  # arun(generate(ImageRequest(model="FLUX_1-Krea-dev", prompt="笑起来")))
90
114
 
91
115
  token = f"""{os.getenv("VOLC_ACCESSKEY")}|{os.getenv("VOLC_SECRETKEY")}"""
92
- arun(generate(ImageRequest(model="seed", prompt="笑起来"), api_key=token))
116
+ # arun(generate(ImageRequest(model="seed", prompt="笑起来"), api_key=token))
117
+
118
+ request = ImageRequest(model="doubao-seedream-4-0-250828", prompt="a dog")
119
+
120
+ # request = ImageRequest(
121
+ # model="doubao-seedream-4-0-250828",
122
+ # prompt="将小鸭子放在t恤上",
123
+ # image=[
124
+ # "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp",
125
+ # "https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp"
126
+ # ]
127
+ # )
128
+
129
+ # todo: tokens 4096 1张
130
+
131
+ # 组图
132
+ # request = ImageRequest(
133
+ # model="doubao-seedream-4-0-250828",
134
+ # prompt="参考这个LOGO,做一套户外运动品牌视觉设计,品牌名称为GREEN,包括包装袋、帽子、纸盒、手环、挂绳等。绿色视觉主色调,趣味、简约现代风格",
135
+ # image="https://ark-project.tos-cn-beijing.volces.com/doc_image/seedream4_imageToimages.png",
136
+ # n=3
137
+ # )
138
+
139
+ # arun(generate(request, api_key=os.getenv("FFIRE_API_KEY"), base_url=os.getenv("FFIRE_BASE_URL"))) # +"-29494"
140
+
141
+ # print(not any(i in str(request.image) for i in {".png", ".jpeg", "image/png", "image/jpeg"}))
142
+
143
+ arun(generate(request)) # +"-29494"
@@ -28,16 +28,13 @@ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/GYCHsvI4qhnDPNtI4VPcdw2knEd?shee
28
28
 
29
29
  async def get_valid_token(tokens: Optional[list] = None, batch_size: Optional[int] = None, seed: int = 0):
30
30
  """返回 tokens
31
- 1. 从redis获取token
32
- 2. 校验token
33
- 3. 校验失败则从飞书获取token
34
- 4. 校验token
35
- 5. 校验通过则返回token
31
+ api_key = np.random.choice(api_key.split())
32
+
36
33
  """
37
34
  tokens = tokens or await get_series(FEISHU_URL, duplicated=True)
38
- batch_size = batch_size or 1
35
+ batch_size = batch_size or 1
39
36
 
40
- if seed == 0 and (volc_tokens := await redis_aclient.get(f"volc_tokens")):
37
+ if seed == 0 and (volc_tokens := await redis_aclient.get(f"volc_tokens")): # 刷新
41
38
  return volc_tokens.decode()
42
39
 
43
40
  valid_tokens = []
@@ -48,6 +45,7 @@ async def get_valid_token(tokens: Optional[list] = None, batch_size: Optional[in
48
45
  if len(valid_tokens) == batch_size:
49
46
  _ = '\n'.join(valid_tokens)
50
47
  await redis_aclient.set(f"volc_tokens", _, ex=2 * 3600)
48
+
51
49
  return _
52
50
 
53
51
 
@@ -266,7 +264,7 @@ c4356b58-4aa3-4a52-b907-b40c4dd2e502
266
264
  # arun(get_task_from_feishu(ids, ))
267
265
 
268
266
  # arun(get_valid_token(['a93ea9a5-3831-47b8-863a-57e10233f922']))
269
- arun(get_valid_token(batch_size=2, seed=3))
267
+ arun(get_valid_token(random_choice=True))
270
268
 
271
269
  """
272
270
  {'id': 'cgt-20250613160030-2dvd7',
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.09.09.19.39.12
1
+ 2025.09.11.19.16.26
meutils/io/files_utils.py CHANGED
@@ -21,6 +21,7 @@ from meutils.oss.minio_oss import Minio
21
21
  from starlette.datastructures import UploadFile
22
22
  from contextlib import asynccontextmanager
23
23
  from httpx import AsyncClient
24
+ from PIL import Image
24
25
 
25
26
 
26
27
  def guess_mime_type(file):
@@ -154,7 +155,7 @@ async def to_url(
154
155
  content_type: Optional[str] = None,
155
156
  mime_type: Optional[str] = None,
156
157
 
157
- ):
158
+ ): # 传入 url 是否 转存
158
159
  if not file: return
159
160
 
160
161
  content_type = content_type or mime_type
@@ -260,6 +261,27 @@ async def get_file_duration(filename: str = ".mp4", url: Optional[str] = None, c
260
261
  return int(np.ceil(tag.duration or 10))
261
262
 
262
263
 
264
+ async def to_png(image: Union[bytes, List[bytes], str, List[str]], response_format: str = 'bytes') -> bytes:
265
+ """
266
+ 将 WebP 二进制数据无损转换为 PNG 二进制数据
267
+ """
268
+ if isinstance(image, list):
269
+ tasks = [to_png(_, response_format) for _ in image]
270
+ return await asyncio.gather(*tasks)
271
+
272
+ if isinstance(image, str):
273
+ image = await to_bytes(image)
274
+
275
+ with Image.open(io.BytesIO(image)) as im:
276
+ out = io.BytesIO()
277
+ im.save(out, format="PNG") # 无损
278
+
279
+ if response_format != "bytes":
280
+ return await to_base64(out.getvalue(), content_type="image/png")
281
+
282
+ return out.getvalue()
283
+
284
+
263
285
  if __name__ == '__main__':
264
286
  # import tempfile
265
287
  #
@@ -342,12 +364,16 @@ if __name__ == '__main__':
342
364
  # arun(get_file_duration(url=url))
343
365
 
344
366
  # r = arun(to_url([]))
345
- text = "这是一个示例文本,包含一个图片:![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ) 这张图片很棒。"
367
+ # text = "这是一个示例文本,包含一个图片:![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ) 这张图片很棒。"
368
+ #
369
+ # arun(markdown_base64_to_url(
370
+ # text=text,
371
+ # # pattern=r'!\[.*?\]\((data:image/.*?)\)'
372
+ # # pattern=r'!\[.*?\]\((.*?)\)'
373
+ #
374
+ # )
375
+ # )
346
376
 
347
- arun(markdown_base64_to_url(
348
- text=text,
349
- # pattern=r'!\[.*?\]\((data:image/.*?)\)'
350
- # pattern=r'!\[.*?\]\((.*?)\)'
377
+ webp_bytes = Path("/Users/betterme/PycharmProjects/AI/MeUtils/meutils/apis/images/image1.webp").read_bytes()
351
378
 
352
- )
353
- )
379
+ arun(to_png(webp_bytes, response_format='b64'))
@@ -214,7 +214,7 @@ async def check_token_for_sophnet(api_key, threshold: float = 1):
214
214
 
215
215
  #
216
216
  @retrying()
217
- # @rcache(ttl=7 * 24 * 3600, skip_cache_func=skip_cache_func)
217
+ @rcache(ttl=7 * 24 * 3600, skip_cache_func=skip_cache_func)
218
218
  async def check_token_for_volc(api_key, threshold: float = 1, purpose: Optional[str] = None):
219
219
  if not isinstance(api_key, str):
220
220
  return await check_tokens(
@@ -262,7 +262,6 @@ async def check_token_for_volc(api_key, threshold: float = 1, purpose: Optional[
262
262
  response_format="url"
263
263
  )
264
264
  logger.debug(response.json())
265
- response.raise_for_status()
266
265
 
267
266
  else:
268
267
 
@@ -45,7 +45,7 @@ thinking_budget_mapping = {
45
45
  }
46
46
 
47
47
  COOKIE = """
48
- cna=KP9DIEqqyjUCATrw/+LjJV8F; _bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb; cnaui=310cbdaf-3754-461c-a3ff-9ec8005329c9; aui=310cbdaf-3754-461c-a3ff-9ec8005329c9; sca=43897cb0; _gcl_au=1.1.106229673.1748312382.56762171.1748482542.1748482541; xlly_s=1; x-ap=ap-southeast-1; acw_tc=0a03e53917509898782217414e520e5edfcdef667dcbd83b767c0ce464fad4; token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTM1ODE4ODV9.Npy24ubI717JmdSWMrodWSvVRHENgbJ7Knd-Yf158YE; atpsida=705b922fe336ee0d63fcc329_1750989888_2; SERVERID=e8c2af088c314df080fffe7d0976a96b|1750989892|1750910540; tfstk=gGtsWsqG4IKUeosYhNDUAMIBJRIbcvoz6-6vEKEaHGIOG-O2eZBabAYXRIR16hSOMpQpNtDMbtpTlWd2wNEAWA4XAOWy0FJtS6Ef3IDMbiQvps65XZYNg15fcKASLbor4dvGmGlra0WjM37NqSBAMS5d9TSfBJ35KivGmihEsEHyxdAMR0lwBiHCvt6uMiBYDMHC3TXOD1QY9yBR9iIAktIdpOX0DlCYWv9dtOsAMIQtdMChHfD7Ftg1sdMwtHJ00Jm2p6ZYDH6Ki1p6F9XBAwQOwwCQD9-CCN1JBhJB9QBXy3_MwXzN6UTkNTRZvlOWBCTRyhFKOivePI6WXYU5GCvpbwKt3zXhmFLRXnG76ppJBeLJBXzCdepwAw--No_MJCYllnlEqG8yUnbJXcNlTaXXNGLI9lOR4urPNGl0lJ_uc91rdva0oJN5AmdFjVAhW9X18vMQ6EbOK96ndva0oNBhCOMId5Lc.; isg=BNfX7gH7c3OJX_gfCBykQ2rtZk0hHKt-YCofVCkEq6YJWPSaPe8Dz9o-uvjGsIP2; ssxmod_itna=iqGxRDuQqWqxgDUxeKYI5q=xBDeMDWK07DzxC5750CDmxjKidKDUGQq7qdOamuu9XYkRGGm01DBL4qbDnqD80DQeDvYxk0K4MUPhDwpaW8YRw3Mz7GGb48aIzZGzY=0DgSdfOLpmxbD884rDYoDCqDSDxD99OdD4+3Dt4DIDAYDDxDWCeDBBWriDGpdhmbQVqmqvi2dxi3i3mPiDit8xi5bZendVL4zvDDlKPGf3WPt5xGnD0jmxhpdx038aoODzLiDbxEY698DtkHqPOK=MlTiRUXxAkDb9RG=Y2U3iA4G3DhkCXU3QBhxCqM2eeQmkeNzCwkjw/006DDAY2DlqTWweL04MKBeHhY5om5NUwYHuFiieQ0=/R=9iO9xTBhND4KF4dvyqz0/toqlqlzGDD; ssxmod_itna2=iqGxRDuQqWqxgDUxeKYI5q=xBDeMDWK07DzxC5750CDmxjKidKDUGQq7qdOamuu9XYkRGGmibDG85+YNY=exGa3Y64u5DBwiW7r++DxFqCdl=l77NQwckyAaCG64hkCOjO1pkcMRBdqj70N7nk=e94KEQYUxlf+2Dw=ViA+XKDde0uGS+eXgFkQqzYWe0Dd4oGbUj8L4QY4og345X2DjKDNOfQRgfeIKVRFQjqR098dBUrQsXBNQZcG1oBFAp4xkLYHl+W3OQW9ybPF4sML3t1tPX2T4DmCqKL+jN1XX94xpyA6k9+sgyBFY4zXOq7dHOuO3Gd3lidwdrk=8dNrOdrYQo33fobVS=MRF7nNQBC5d3kBbYdwtoxNBKmBiXoTfOTzOp3MT=ODXhxfO16Tta4vSW=ubtkEGgeQ/gKOwsVjmKDEY0NZ+ee7xlitvWmBbtk7ma7x1PinxtbitdadtYQOqG5AFEZbFxiSE6rDky7jiatQ0Fe7z6uDmYx4z5MGxMA5iDY7DtSLfNUYxU44D
48
+ _gcl_au=1.1.1093269050.1756349377;xlly_s=1;_bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb;cnaui=310cbdaf-3754-461c-a3ff-9ec8005329c9;isg=BCQknTSrEFaWtGtm_x0nSvW89SQWvUgnEt_awT5Fz--w6cezYs52t7gDqUFxMYB_;ssxmod_itna2=1-iqGhD50IThkG8Dhx_xmuxWKUt_EoG7DzxC5KY0CDmxjKidDRDB40QRTnf_Ti=qaeGrMwxrDPxD3r5iY80q7DFg0WeDBk4uAn8mY3vKFTl7S9o7EaoSeXnAOPBSok57ccC4rhgutjg2_8D7jk_lChKSSdfMbm2lKAUlRwIqjSxeNld4tMGxFev6zkUel_6LR_foIGQa5L4PuCGa6dUqvwISCODQh2TC6wQ1Hu=Ll=W4=W6s1E_V8Dr_1gDECghksL8zvQHiPI60ChnPodSvFnHjE2iXzGDdE_I5876eQ03cEzaFsA48KQLReNjiPp1I0EfN=5a=dziPIxXxcpup5zmGM2L48PYjAqiUxrwVQDY4vN=Ni_pu6pxOYEBNL7YA6RPcRy7Ak=Y5PPbZRAi242ulCDx3oZCGH2YE6p3lD4gPIOKMWX6AbmhYLrcMeXcYdjzGgaYEDq4DUtKP/1jf=vXt=MQXoZ23BWlE5h06cjceY_Bxw3AH3KeBaxT4pHEt19QSlaO20G9DfDq7Wf3BvV=5X/BYd54Y44nUHOfH_fV2mHKNz0W4lxjWDjY9H3m00I3cOIf3C6q7Y_CNx1sG1nwsiDUA34QDtsC8jw1YD;sca=aefac4ee;acw_tc=0a03e54a17574902359765651e1e9cf9780667ec2a2cefacce63a0b954bb63;atpsida=7e2e4dcfcd1c4a4530dd3395_1757490275_2;aui=310cbdaf-3754-461c-a3ff-9ec8005329c9;cna=KP9DIEqqyjUCATrw/+LjJV8F;ssxmod_itna=1-iqGhD50IThkG8Dhx_xmuxWKUt_EoG7DzxC5KY0CDmxjKidDRDB40QRTnf_Ti=qaeGrMwxrDyxGXoNexiNDAg40iDCbmLx5Yjdrq4NFtrojKaIjL4Q43rj9_8m0tnY/qmUTMU6Rljs7s66tqGI_DG2DYoDCqDS0DD99OdD4_3Dt4DIDAYDDxDWIeDB4ZrQDGPtLQut5eqKwBrtxi3QWWPQDiUCxivW56Wt5C_GwDDliPGfWepi20iDbqDuzKqaBeDLRPpvxB=PxYPmjmkUxBQGHniO/WWb2BkrGOGBKim6mTeM0O_qBGGhfyDGxNI0eYT44YxS4VQYQjytGBFDDWgL4_AzKl_TYx_7CIq13b1_BDCgtIQi_GK=DKoBddAQ2wmlBx/DYfjGeAzNA4aDhLlxtlzaiDD;tfstk=geyx-Dg4-gKxmZY2DPflIb1e9VjuX_qVPrrBIV0D1zU85rs2IlGD5dU-rirmhZz8VyUp5diXCbw8-cTbnog6NdUtyi8XG1y-XPUwhK1V7utSbmnbWlSqfVa_WVVcZ9q40Ak1-lBhKo8RmR1gkd9XVGitjCsoG1PJiYM1-wXoYFbWuAaGVPsqNugrXctjCPOWFD0ScEOb54GSADYjCAa12YiZcKi6GfZ5Fm0S5AM_5us-j4ijCAa_VgnZs7QtPngMBSZPyLNC_OpMI8nxMVpikdIljmcxRogRRwwJ3j3Qc4p18hgjf4iLFN8m78E7hD45EKHTVcUKh7QWHPEbTPoQlTLjDSZLxbeNRdnzNrloD7I6NqZIlrhY9Usx7-4YBjyC-Lubw8ytn7bvKVl_Z-lUna9xdWrmn5a5fdgTNcsPDJ2LcuTnJcAf2gdw_jirJjsq62sn5Go--iB9_ClP403h2wOw_jir22jA-CRZag5..;token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgwOTUwMzd9.JdVvyPkln2HcGm6ib0FKaF1qQ87lG1nf70oezhYZ2Jg;x-ap=cn-hongkong
49
49
  """.strip()
50
50
 
51
51
 
@@ -396,7 +396,7 @@ if __name__ == '__main__':
396
396
  )
397
397
  token = None
398
398
 
399
- token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTc0ODczMDd9.7TQ9NicXYxghzI7EP3cPMFqa5j-09Sz1B9s3SnKZvkE"
399
+ token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgwOTUwMzd9.JdVvyPkln2HcGm6ib0FKaF1qQ87lG1nf70oezhYZ2Jg"
400
400
 
401
401
  arun(create(request, token))
402
402
 
@@ -31,6 +31,9 @@ models_mapping = {
31
31
  "kimi-k2-250711": "moonshotai/Kimi-K2-Instruct",
32
32
  "kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
33
33
 
34
+ "kimi-k2-250905": "moonshotai/Kimi-K2-Instruct-0905",
35
+ "kimi-k2-0905-preview": "moonshotai/Kimi-K2-Instruct-0905",
36
+
34
37
  "qwen2.5-vl-32b-instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
35
38
  "qwen2.5-vl-72b-instruct": "Qwen/Qwen2.5-VL-72B-Instruct",
36
39
  "qwen2.5-vl-7b-instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
@@ -63,6 +66,7 @@ def get_models_mapping():
63
66
  models = client.models.list().data
64
67
  models = {
65
68
  m.id.removeprefix("Pro/").split('/', maxsplit=1)[-1].lower(): m.id.removeprefix("Pro/") for m in models
69
+ if any(i not in m.id.lower() for i in {"stable-diffusion"})
66
70
  }
67
71
  return {**models, **models_mapping}
68
72
 
@@ -7,6 +7,7 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
+ import shortuuid
10
11
  from aiostream import stream
11
12
 
12
13
  from meutils.pipe import *
@@ -39,8 +40,12 @@ async def chat_for_image(
39
40
  if request.model.startswith('fal'):
40
41
  urls = await to_url_fal(image_urls, content_type="image/png") # 国外友好
41
42
  image = urls
43
+
44
+ elif request.model.startswith("doubao-seed"):
45
+ image = image_urls # b64
46
+
42
47
  else:
43
- urls = await to_url(image_urls, content_type="image/png") # 数组
48
+ urls = await to_url(image_urls, ".png", content_type="image/png") # 数组
44
49
  image = urls
45
50
 
46
51
  image_request = ImageRequest(
@@ -91,7 +96,11 @@ async def chat_for_image(
91
96
  future_task = asyncio.create_task(generate(image_request)) # 异步执行
92
97
 
93
98
  async def gen():
94
- text = image_request.model_dump_json(exclude_none=True).replace("free", "")
99
+ exclude = None
100
+ if len(str(image_request.image)) > 1000:
101
+ exclude = {"image"}
102
+
103
+ text = image_request.model_dump_json(exclude_none=True, exclude=exclude).replace("free", "")
95
104
  for i in f"""> 🖌️正在绘画\n\n```json\n{text}\n```\n\n""":
96
105
  await asyncio.sleep(0.05)
97
106
  yield i
@@ -155,6 +164,7 @@ async def chat_for_video(
155
164
 
156
165
  if __name__ == '__main__':
157
166
  from meutils.apis.images.generations import generate
167
+
158
168
  request = CompletionRequest(
159
169
  model="deepseek-r1-Distill-Qwen-1.5B",
160
170
  messages=[
@@ -93,9 +93,10 @@ def extract_images_from_pdf(file, output: Optional[str] = None):
93
93
 
94
94
  if __name__ == '__main__':
95
95
  with timer():
96
- # r = extract_text('x.pdf')
96
+ # r = extract_text('上海证券交易所证券交易业务指南第8号——科创板股票做市(上证函〔2022〕1155号 20220715)-1757338961901 (1).pdf')
97
+ r = extract_text('非上市公司股权估值指引(2025年修订 中证协发〔2025〕86号 20250425 20250601)-1757078360106.pdf')
97
98
 
98
- r = extract_images_from_pdf('《锋利的jQuery》(高清扫描版-有书签)_副本_加水印.pdf', 'images')
99
+ # r = extract_images_from_pdf('《锋利的jQuery》(高清扫描版-有书签)_副本_加水印.pdf', 'images')
99
100
 
100
101
  # import tiktoken
101
102
  # print(tiktoken.encoding_for_model('gpt-3.5-turbo'))
@@ -84,22 +84,34 @@ class ImagesResponse(_ImagesResponse):
84
84
  class ImageRequest(BaseModel): # openai
85
85
  """
86
86
  图生图 两种方式: prompt + controls
87
+
88
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
89
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
90
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
91
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
92
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
93
+
87
94
  """
88
95
  model: str = ''
89
96
 
90
97
  prompt: constr(min_length=1, max_length=10240) = " "
91
98
 
92
- n: Optional[int] = 1
99
+ moderation: Optional[Literal["low", "auto"]] = None
93
100
 
94
- quality: Optional[Literal["standard", "hd"]] = None
95
- style: Union[str, Literal["vivid", "natural"]] = None
101
+ style: Optional[Union[str, Literal["vivid", "natural"]]] = None
102
+ background: Optional[Union[str, Literal["transparent", "opaque", "auto"]]] = None
103
+ input_fidelity: Optional[Union[str, Literal["high", "low"]]] = None
104
+ quality: Optional[Union[str, Literal["standard", "low", "medium", "high", "auto"]]] = None
96
105
 
97
106
  # 测试默认值 Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
107
+ n: Optional[int] = 1
98
108
  size: Optional[str] = '1024x1024' # null auto
99
109
 
110
+ output_format: Optional[Literal["png", "jpeg", "webp"]] = None
100
111
  response_format: Optional[Literal["url", "b64_json", "oss_url", "glb", "stl"]] = "url"
101
112
 
102
113
  seed: Optional[int] = None
114
+ stream: Optional[bool] = None
103
115
 
104
116
  # oneapi https://github.com/QuantumNous/new-api/blob/main/dto/dalle.go
105
117
  extra_fields: Optional[Any] = None ###
@@ -114,13 +126,18 @@ class ImageRequest(BaseModel): # openai
114
126
 
115
127
  aspect_ratio: Optional[str] = None
116
128
 
117
- user: Optional[str] = None # to_url_fal
129
+ user: Optional[str] = None
118
130
 
119
- image: Optional[Union[str, List[str]]] = None # url b64
131
+ image: Optional[Union[str, List[str], bytes, List[bytes]]] = None # url b64 file_bytes
120
132
  watermark: Optional[bool] = None
121
133
 
122
134
  def __init__(self, /, **data: Any):
123
135
  super().__init__(**data)
136
+ # 规避空字符
137
+ self.style = self.style or None
138
+ self.quality = self.quality or None
139
+ self.background = self.background or None
140
+ self.input_fidelity = self.input_fidelity or None
124
141
 
125
142
  if self.aspect_ratio: # 适配比例
126
143
  self.size = ASPECT_RATIOS.get(self.aspect_ratio, '1024x1024')
@@ -187,24 +204,41 @@ class ImageRequest(BaseModel): # openai
187
204
  class ImageEditRequest(BaseModel):
188
205
  model: Union[str, Literal["dall-e-2", "dall-e-3", "gpt-image-1"]]
189
206
 
207
+ # image: Union[FileTypes, SequenceNotStr[FileTypes]],
208
+ # prompt: str,
209
+ # mask: FileTypes | NotGiven = NOT_GIVEN,
210
+
190
211
  prompt: str
191
212
  image: Any # 图片
192
213
 
193
214
  mask: Optional[Any] = None # 图片
194
- background: Optional[Literal["transparent", "opaque", "auto"]] = None
215
+
216
+ background: Optional[Union[str, Literal["transparent", "opaque", "auto"]]] = None
217
+ input_fidelity: Optional[Union[str, Literal["high", "low"]]] = None
218
+ quality: Optional[Union[str, Literal["standard", "low", "medium", "high", "auto"]]] = None
195
219
 
196
220
  n: Optional[int] = 1
197
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] = None
198
221
  size: Optional[
199
222
  Union[str, Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]] = "1024x1024"
200
- response_format: Optional[Literal["url", "b64_json"]] = None
201
223
 
202
224
  aspect_ratio: Optional[str] = None
203
225
 
204
226
  user: Optional[str] = None
227
+ stream: Optional[bool] = None
228
+
229
+ response_format: Optional[Literal["url", "b64_json"]] = None
230
+ output_format: Optional[Literal["png", "jpeg", "webp"]] = None
231
+
232
+ # output_compression: Optional[int] | NotGiven = NOT_GIVEN,
233
+ # partial_images: Optional[int] | NotGiven = NOT_GIVEN,
205
234
 
206
235
  def __init__(self, /, **data: Any):
207
236
  super().__init__(**data)
237
+ # 规避空字符
238
+ self.quality = self.quality or None
239
+ self.background = self.background or None
240
+ self.input_fidelity = self.input_fidelity or None
241
+
208
242
  if not isinstance(self.image, list):
209
243
  self.image = [self.image]
210
244
 
@@ -622,6 +656,17 @@ if __name__ == '__main__':
622
656
 
623
657
  aspect_ratio="16:9"
624
658
  )
659
+
660
+ # "sequential_image_generation": "auto",
661
+ # "sequential_image_generation_options": {
662
+ # "max_images": 4
663
+ # },
664
+
665
+ request.sequential_image_generation = "auto"
666
+
667
+ request.sequential_image_generation_options = {
668
+ "max_images": 4
669
+ }
625
670
  #
626
671
  # print(request.prompt)
627
672
  #