MeUtils 2025.9.3.23.13.47__py3-none-any.whl → 2025.9.7.0.7.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,23 +12,25 @@ from meutils.pipe import *
12
12
  from openai import OpenAI
13
13
  from os import getenv
14
14
  from meutils.io.files_utils import to_url
15
+ from meutils.str_utils import parse_base64
15
16
 
16
17
  # gets API Key from environment variable OPENAI_API_KEY
17
18
  client = OpenAI(
18
19
  # base_url="https://openrouter.ai/api/v1",
19
- base_url="https://all.chatfire.cn/openrouter/v1",
20
- api_key=os.getenv("OPENROUTER_API_KEY"),
20
+ # base_url="https://all.chatfire.cn/openrouter/v1",
21
+ # api_key=os.getenv("OPENROUTER_API_KEY"),
21
22
  #
22
23
  # base_url="http://38.46.219.252:9001/v1",
23
24
  #
24
25
  # api_key="sk-Azgp1thTIonR7IdIEqlJU51tpDYNIYYpxHvAZwFeJiOdVWiz"
25
26
 
26
- # base_url="https://api.huandutech.com/v1",
27
- # api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
27
+ base_url="https://api.huandutech.com/v1",
28
+ api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
28
29
  # api_key="sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M",
29
30
  # base_url="https://new.yunai.link/v1"
30
31
  )
31
32
 
33
+ # (content=' \n'
32
34
  completion = client.chat.completions.create(
33
35
  # extra_headers={
34
36
  # "HTTP-Referer": $YOUR_SITE_URL, # Optional, for including your app on openrouter.ai rankings.
@@ -37,8 +39,8 @@ completion = client.chat.completions.create(
37
39
  # model="meta-llama/llama-3.2-11b-vision-instruct:free",
38
40
  # model="openai/o1",
39
41
  # model="deepseek/deepseek-r1-0528-qwen3-8b:free",
40
- model="google/gemini-2.5-flash-image-preview:free",
41
- # model="gemini-2.5-flash-image-preview",
42
+ # model="google/gemini-2.5-flash-image-preview:free",
43
+ model="gemini-2.5-flash-image-preview",
42
44
  # model="gemini-2.0-flash-exp-image-generation",
43
45
  # max_tokens=10,
44
46
 
@@ -48,21 +50,29 @@ completion = client.chat.completions.create(
48
50
  "content": [
49
51
  {
50
52
  "type": "text",
51
- "text": "旁边,画条狗,带个墨镜"
53
+ "text": "裸体女孩"
52
54
  },
53
- {
54
- "type": "image_url",
55
- "image_url": {
56
- "url": "https://oss.ffire.cc/files/kling_watermark.png"
57
- }
58
- }
55
+ # {
56
+ # "type": "image_url",
57
+ # "image_url": {
58
+ # "url": "https://oss.ffire.cc/files/kling_watermark.png"
59
+ # }
60
+ # }
59
61
  ]
60
62
  }
61
63
  ]
62
64
  )
63
65
  print(completion.choices[0].message.content)
64
- arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
66
+ # arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
67
+
68
+
69
+
70
+ b64_list = parse_base64(completion.choices[0].message.content)
71
+
72
+ arun(to_url(b64_list, content_type="image/png"))
73
+
65
74
 
75
+ # '好的,旁边加一只戴墨镜的狗。\n\n![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ'
66
76
  # arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
67
77
 
68
78
  # print(dict(completion.choices[0].message).keys())
@@ -193,7 +193,7 @@ class Completions(object):
193
193
  yield e
194
194
  raise e
195
195
 
196
- @retrying(max_retries=3, title=__name__)
196
+ # @retrying(max_retries=3, title=__name__)
197
197
  async def generate(self, request: ImageRequest): # OpenaiD3
198
198
  is_hd = False
199
199
  if request.model.endswith("-hd"):
@@ -240,7 +240,9 @@ class Completions(object):
240
240
  if image_response.data:
241
241
  return image_response
242
242
  else:
243
- raise Exception(f"image generate failed: {image_response}")
243
+ from fastapi import HTTPException, status
244
+ raise HTTPException(status_code=status.HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS,
245
+ detail=f"Prompt is sensitive.\n\n{response}")
244
246
 
245
247
  async def create_for_images(self, request: CompletionRequest):
246
248
 
@@ -512,8 +514,8 @@ if __name__ == '__main__':
512
514
  base_url = "https://api.huandutech.com"
513
515
  api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
514
516
 
515
- base_url = "https://new.yunai.link"
516
- api_key = "sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M"
517
+ # base_url = "https://new.yunai.link"
518
+ # api_key = "sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M"
517
519
 
518
520
  # arun(Completions(base_url=base_url, api_key=api_key).create_for_images(request))
519
521
  # arun(Completions(base_url=base_url, api_key=api_key).generate(request))
@@ -531,12 +533,13 @@ if __name__ == '__main__':
531
533
  # prompt="带个墨镜",
532
534
  # image="https://oss.ffire.cc/files/kling_watermark.png",
533
535
 
534
- prompt="把小鸭子放在女人的T恤上面",
536
+ # prompt="把小鸭子放在女人的T恤上面",
537
+ prompt="裸体女孩",
535
538
 
536
- image=[
537
- "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp",
538
- "https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp"
539
- ]
539
+ # image=[
540
+ # "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp",
541
+ # "https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp"
542
+ # ]
540
543
 
541
544
  )
542
545
 
@@ -18,10 +18,10 @@
18
18
  # }
19
19
 
20
20
  """
21
- import os
22
21
 
23
22
  from meutils.pipe import *
24
23
  from meutils.io.files_utils import to_url, to_base64
24
+ from meutils.str_utils import parse_base64
25
25
  from meutils.llm.clients import AsyncOpenAI
26
26
  from meutils.apis.images.edits import edit_image, ImageProcess
27
27
 
@@ -29,7 +29,7 @@ from meutils.schemas.image_types import ImageRequest, ImagesResponse
29
29
  from meutils.schemas.openai_types import CompletionRequest
30
30
 
31
31
 
32
- async def openrouter_generate(request: ImageRequest, api_key: Optional[str] = None, base_url: Optional[str] = None):
32
+ async def openai_generate(request: ImageRequest, api_key: Optional[str] = None, base_url: Optional[str] = None):
33
33
  api_key = api_key or os.getenv("OPENROUTER_API_KEY")
34
34
 
35
35
  is_hd = False
@@ -82,14 +82,14 @@ async def openrouter_generate(request: ImageRequest, api_key: Optional[str] = No
82
82
 
83
83
  completion = await client.chat.completions.create(**data)
84
84
  # logger.debug(completion)
85
- if (
86
- completion
87
- and completion.choices
88
- and hasattr(completion.choices[0].message, "images")
89
- and (images := completion.choices[0].message.images)
90
- ):
91
- image_urls = [image['image_url']['url'] for image in images]
92
- # logger.debug(image_urls)
85
+ if completion and completion.choices and (revised_prompt := completion.choices[0].message.content.strip()):
86
+ logger.debug(revised_prompt)
87
+
88
+ if (hasattr(completion.choices[0].message, "images") and (images := completion.choices[0].message.images)):
89
+
90
+ image_urls = [image['image_url']['url'] for image in images]
91
+ else:
92
+ image_urls = parse_base64(revised_prompt)
93
93
 
94
94
  if is_hd:
95
95
  # logger.debug(image_urls)
@@ -100,7 +100,7 @@ async def openrouter_generate(request: ImageRequest, api_key: Optional[str] = No
100
100
  response = ImagesResponse(image=image_urls)
101
101
 
102
102
  else:
103
- image_urls = await to_url(image_urls, content_type="image/png")
103
+ image_urls = await to_url(image_urls, filename=f'{shortuuid.random()}.png', content_type="image/png")
104
104
  response = ImagesResponse(image=image_urls)
105
105
 
106
106
  # logger.debug(response)
@@ -108,25 +108,39 @@ async def openrouter_generate(request: ImageRequest, api_key: Optional[str] = No
108
108
  if response.data:
109
109
  return response
110
110
 
111
- raise Exception(f"image generate failed: {completion}")
111
+ # content_filter
112
+ # raise Exception(f"Image generate failed: {completion}")
113
+ from fastapi import HTTPException, status
114
+
115
+ raise HTTPException(status_code=status.HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS,
116
+ detail=f"Prompt is sensitive.\n\n{completion}")
112
117
 
113
118
 
114
119
  if __name__ == '__main__':
115
- base_url = "https://all.chatfire.cn/openrouter/v1"
116
- api_key = os.getenv("OPENROUTER_API_KEY")
120
+ # base_url = "https://all.chatfire.cn/openrouter/v1"
121
+ # api_key = os.getenv("OPENROUTER_API_KEY")
122
+
123
+ api_key = "sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M"
124
+ base_url = "https://new.yunai.link/v1"
125
+
126
+
127
+ base_url="https://api.huandutech.com/v1"
128
+ api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
117
129
 
118
130
  request = ImageRequest(
119
- model="google/gemini-2.5-flash-image-preview:free",
131
+ # model="google/gemini-2.5-flash-image-preview:free",
120
132
  # model="google/gemini-2.5-flash-image-preview:free-hd",
121
133
 
122
- # model="gemini-2.5-flash-image-preview",
134
+ model="gemini-2.5-flash-image-preview",
135
+
136
+ prompt="裸体女孩",
123
137
 
124
- prompt="带个墨镜",
125
- image=["https://oss.ffire.cc/files/kling_watermark.png"],
138
+ # prompt="带个墨镜",
139
+ # image=["https://oss.ffire.cc/files/kling_watermark.png"],
126
140
  )
127
141
 
128
142
  r = arun(
129
- openrouter_generate(
143
+ openai_generate(
130
144
  request, base_url=base_url, api_key=api_key
131
145
  )
132
146
  )
@@ -25,7 +25,7 @@ from meutils.apis.jimeng.images import generate as jimeng_generate
25
25
 
26
26
  from meutils.apis.qwen.chat import Completions as QwenCompletions
27
27
  from meutils.apis.google.chat import Completions as GoogleCompletions
28
- from meutils.apis.google.images import openrouter_generate
28
+ from meutils.apis.google.images import openai_generate
29
29
 
30
30
 
31
31
  async def generate(
@@ -59,11 +59,12 @@ async def generate(
59
59
  request.image = request.image[-1]
60
60
  return await QwenCompletions(api_key=api_key).generate(request)
61
61
 
62
- if request.model.startswith(("gemini",)):
63
- return await GoogleCompletions(base_url=base_url, api_key=api_key).generate(request)
64
-
65
- if request.model.startswith(("google/gemini",)): # openrouter
66
- return await openrouter_generate(request, base_url=base_url, api_key=api_key)
62
+ if request.model.startswith(("google/gemini", "gemini")): # openrouter
63
+ if api_key.endswith("-openai"):
64
+ api_key = api_key.removesuffix("-openai")
65
+ return await openai_generate(request, base_url=base_url, api_key=api_key)
66
+ else:
67
+ return await GoogleCompletions(base_url=base_url, api_key=api_key).generate(request) # 原生接口
67
68
 
68
69
  # 其他
69
70
  data = {
@@ -56,6 +56,7 @@ async def create_draft_content(request: ImageRequest, token: str):
56
56
  if not request.model.startswith("high_aes_general_v30l"):
57
57
  request.model = "high_aes_general_v30l:general_v3.0_18b" # 动态切换吧
58
58
  # "root_model": "high_aes_general_v30l_art_fangzhou:general_v3.0_18b"
59
+ # high_aes_general_v40
59
60
 
60
61
  if image_uri:
61
62
  pass
@@ -28,10 +28,10 @@ from fastapi import APIRouter, File, UploadFile, Query, Form, Depends, Request,
28
28
  FEISHU_URL = "https://xchatllm.feishu.cn/sheets/GYCHsvI4qhnDPNtI4VPcdw2knEd?sheet=8W6kk8" # 超刷
29
29
 
30
30
 
31
- async def get_valid_token(tokens: Optional[list] = None, force_update: bool = True):
31
+ async def get_valid_token(tokens: Optional[list] = None, force_update: bool = True, batch_size: int = 1):
32
32
  tokens = tokens or await get_series(FEISHU_URL, duplicated=True)
33
33
 
34
- if token := await redis_aclient.get("volc-token"):
34
+ if token := await redis_aclient.get("volc-token"): # list
35
35
  token = token.decode()
36
36
 
37
37
  if force_update: # 强制检测更新
@@ -41,12 +41,13 @@ async def get_valid_token(tokens: Optional[list] = None, force_update: bool = Tr
41
41
  await redis_aclient.delete("volc-token") # 删除无效
42
42
  else:
43
43
  return token
44
-
45
- for token in tokens:
44
+ for i, token in enumerate(tokens, 1):
46
45
  if await check(token):
47
46
  await redis_aclient.set("volc-token", token, ex=2 * 3600 - 10 * 60)
48
47
 
49
48
  return token
49
+ if batch_size == i:
50
+ break
50
51
 
51
52
  logger.debug(f"无效 {token}")
52
53
  _ = f"{time.ctime()}\n\n{FEISHU_URL}\n\n所有token无效\n\n{token}"
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.09.03.23.13.47
1
+ 2025.09.07.00.07.25
meutils/io/files_utils.py CHANGED
@@ -211,7 +211,13 @@ async def to_file(file: Union[UploadFile, str, bytes], filename: Optional[str] =
211
211
  return Path(filename).resolve()
212
212
 
213
213
 
214
- async def markdown_base64_to_url(text, pattern=r'!\[Image_\d+\]\((.+?)\)'):
214
+ async def markdown_base64_to_url(text, pattern=r'!\[.*?\]\((.*?)\)'):
215
+ """
216
+ :param text:
217
+ :param pattern:
218
+ pattern=r'!\[.*?\]\((data:image/.*?)\)'
219
+ :return:
220
+ """
215
221
  base64_strings = re.findall(pattern, text)
216
222
 
217
223
  # logger.debug(text)
@@ -335,4 +341,13 @@ if __name__ == '__main__':
335
341
  # # arun(get_file_duration(content=content))
336
342
  # arun(get_file_duration(url=url))
337
343
 
338
- r = arun(to_url([]))
344
+ # r = arun(to_url([]))
345
+ text = "这是一个示例文本,包含一个图片:![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ) 这张图片很棒。"
346
+
347
+ arun(markdown_base64_to_url(
348
+ text=text,
349
+ # pattern=r'!\[.*?\]\((data:image/.*?)\)'
350
+ # pattern=r'!\[.*?\]\((.*?)\)'
351
+
352
+ )
353
+ )
@@ -363,20 +363,16 @@ async def check_token_for_gitee(api_key, threshold: float = 1):
363
363
  # @rcache(ttl=1 * 24 * 3600, skip_cache_func=skip_cache_func)
364
364
  async def check_token_for_openrouter(api_key, threshold: float = 1):
365
365
  if not isinstance(api_key, str):
366
- return await check_tokens(api_key, check_token_for_volc)
366
+ return await check_tokens(api_key, check_token_for_openrouter)
367
367
 
368
368
  try:
369
369
  # {"data": {"total_credits": 215.00205323, "total_usage": 215.20147321775545}} %
370
370
  client = AsyncOpenAI(base_url=os.getenv("OPENROUTER_BASE_URL"), api_key=api_key)
371
371
  data = await client.get("/credits", cast_to=object)
372
- logger.debug(data)
372
+ # logger.debug(data)
373
373
  # data = await client.get("/models/user", cast_to=object)
374
374
  # logger.debug(bjson(data))
375
375
 
376
- data = await client.get("/generation",
377
- options={"params": {"id": 1}},
378
- cast_to=object
379
- )
380
376
  #
381
377
  # logger.debug(bjson(data))
382
378
 
@@ -431,7 +427,7 @@ if __name__ == '__main__':
431
427
 
432
428
  # arun(check_token_for_ppinfra("sk_F0kgPyCMTzmOH_-VCEJucOK8HIrbnLGYm_IWxBToHZQ"))
433
429
 
434
- arun(check_token_for_volc("827b41d2-7e8c-46e2-9854-0720ca1bd2e4"))
430
+ # arun(check_token_for_volc("827b41d2-7e8c-46e2-9854-0720ca1bd2e4"))
435
431
  # arun(check_token_for_volc("279749bd-ba5e-4962-9c65-eb6604b65594"))
436
432
  # arun(check_token_for_volc("8a5af7cb-42a3-4391-ac40-9d0f4502acde", purpose='seedance'))
437
433
 
@@ -447,6 +443,6 @@ if __name__ == '__main__':
447
443
 
448
444
  # arun(get_valid_token_for_fal())
449
445
 
450
- # api_key = "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"
446
+ api_key = "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"
451
447
  # api_key = None
452
- # arun(check_token_for_openrouter(api_key=api_key or os.getenv("OPENROUTER_API_KEY")))
448
+ arun(check_token_for_openrouter(api_key=api_key or os.getenv("OPENROUTER_API_KEY")))
@@ -49,28 +49,6 @@ cna=KP9DIEqqyjUCATrw/+LjJV8F; _bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb; cnaui=310cbd
49
49
  """.strip()
50
50
 
51
51
 
52
- @retrying()
53
- async def create_new_chat(api_key, cookie: Optional[str] = None):
54
- qwen_client = AsyncOpenAI(
55
- base_url=base_url,
56
- api_key=api_key,
57
- default_headers={
58
- 'User-Agent': ua.random,
59
- 'Cookie': cookie or COOKIE
60
- }
61
- )
62
- payload = {
63
- "title": "新建对话",
64
- "models": [DEFAUL_MODEL],
65
- "chat_mode": "normal",
66
- "chat_type": "t2i",
67
- "timestamp": time.time() * 1000 // 1
68
- }
69
- resp = await qwen_client.post('/v2/chats/new', body=payload, cast_to=object)
70
- logger.debug(resp)
71
- return resp['data']['id']
72
-
73
-
74
52
  @retrying()
75
53
  async def to_file(file, api_key, cookie: Optional[str] = None):
76
54
  qwen_client = AsyncOpenAI(
@@ -101,9 +79,6 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
101
79
  logger.debug(token)
102
80
 
103
81
  default_query = None
104
- if 'image' in request.model:
105
- chat_id = await create_new_chat(token, cookie)
106
- default_query = {'chat_id': chat_id}
107
82
 
108
83
  client = AsyncOpenAI(
109
84
  base_url=base_url,
@@ -121,20 +96,9 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
121
96
  request.model = DEFAUL_MODEL
122
97
  request.messages[-1]['chat_type'] = "deep_research"
123
98
 
124
- elif any(i in model for i in ("image",)):
125
- request.model = DEFAUL_MODEL
126
- request.chat_id = default_query['chat_id']
127
- request.size = "1:1"
128
-
129
- request.messages[-1]['chat_type'] = "t2i"
130
- request.messages[-1]['feature_config'] = {
131
- "thinking_enabled": False,
132
- "output_schema": "phase"
133
- }
134
-
135
99
 
136
100
  elif any(i in model for i in ("search",)):
137
- request.model = "qwen-max-latest"
101
+ request.model = DEFAUL_MODEL
138
102
  request.messages[-1]['chat_type'] = "search"
139
103
 
140
104
  # 混合推理
@@ -201,7 +165,7 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
201
165
 
202
166
  request.messages[-1]['content'] = user_content
203
167
 
204
- # logger.debug(request)
168
+ logger.debug(request)
205
169
 
206
170
  request.incremental_output = True # 增量输出
207
171
  data = to_openai_params(request)
@@ -322,7 +286,8 @@ if __name__ == '__main__':
322
286
 
323
287
  request = CompletionRequest(
324
288
  # model="qwen3-235b-a22b",
325
- model="qwen3-235b-a22b-thinking-2507",
289
+ # model="qwen3-235b-a22b-thinking-2507",
290
+ model="qwen3-max-preview",
326
291
  # model="qwen-turbo-2024-11-01",
327
292
  # model="qwen-max-latest",
328
293
  # model="qvq-max-2025-03-25",
@@ -349,6 +314,14 @@ if __name__ == '__main__':
349
314
  # max_tokens=100,
350
315
 
351
316
  messages=[
317
+ {
318
+ 'role': 'user',
319
+ 'content': '1+1',
320
+ },
321
+ {
322
+ 'role': 'assistant',
323
+ 'content': '3',
324
+ },
352
325
  {
353
326
  'role': 'user',
354
327
  # 'content': '今天南京天气',
@@ -359,7 +332,7 @@ if __name__ == '__main__':
359
332
  # "chat_type": "search", deep_research
360
333
 
361
334
  # 'content': user_content,
362
- 'content': "1+1",
335
+ 'content': "错了",
363
336
 
364
337
  # "content": [
365
338
  # {
@@ -9,6 +9,7 @@
9
9
  # @Description :
10
10
 
11
11
  from meutils.pipe import *
12
+ from openai import OpenAI
12
13
 
13
14
  models_mapping = {
14
15
  "flux-kontext-dev": "MusePublic/FLUX.1-Kontext-Dev",
@@ -16,8 +17,11 @@ models_mapping = {
16
17
 
17
18
  "flux.1-krea-dev": "black-forest-labs/FLUX.1-Krea-dev",
18
19
 
19
- "moonshotai/kimi-k2-instruct": "moonshotai/Kimi-K2-Instruct",
20
20
  "kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
21
+ "kimi-k2-250711": "moonshotai/Kimi-K2-Instruct",
22
+ "kimi-k2-0905-preview": "moonshotai/Kimi-K2-Instruct-0905",
23
+ "kimi-k2-250905": "moonshotai/Kimi-K2-Instruct-0905",
24
+
21
25
  "majicflus_v1": "MAILAND/majicflus_v1",
22
26
  "deepseek-reasoner": "deepseek-ai/DeepSeek-R1-0528",
23
27
 
@@ -62,5 +66,20 @@ models_mapping = {
62
66
 
63
67
  }
64
68
 
69
+ def get_models_mapping():
70
+ client = OpenAI(
71
+ api_key=os.getenv("MODELSCOPE_API_KEY"),
72
+ base_url=os.getenv("MODELSCOPE_BASE_URL"),
73
+ )
74
+
75
+ models = client.models.list().data
76
+ models = {
77
+ m.id.split('/', maxsplit=1)[-1].lower(): m.id for m in models
78
+ }
79
+ # logger.debug(models)
80
+ return {**models, **models_mapping}
81
+
65
82
  if __name__ == '__main__':
83
+ models = get_models_mapping()
84
+ print(bjson(models))
66
85
  print(','.join(models))
@@ -13,36 +13,9 @@ from meutils.pipe import *
13
13
  from meutils.llm.clients import OpenAI
14
14
 
15
15
  models_mapping = {
16
- "glm-4.5": "zai-org/GLM-4.5",
17
- "glm-4.5v": "zai-org/GLM-4.5V",
18
-
19
- "deepseek-v3": "deepseek-ai/DeepSeek-V3",
20
- "deepseek-v3-0324": "deepseek-ai/DeepSeek-V3",
21
- "deepseek-v3-250324": "deepseek-ai/DeepSeek-V3",
22
- "deepseek-chat": "deepseek-ai/DeepSeek-V3",
23
- "deepseek-v3.1": "deepseek-ai/DeepSeek-V3.1",
24
- "deepseek-v3-1-250821": "deepseek-ai/DeepSeek-V3.1",
25
-
26
- "qwen3-32b": "Qwen/Qwen3-32B",
27
- "deepseek-r1": "deepseek-ai/DeepSeek-R1",
28
- "deepseek-r1-250528": "deepseek-ai/DeepSeek-R1",
29
- "deepseek-reasoner": "deepseek-ai/DeepSeek-R1",
30
- "qwen2.5-72b-instruct": "Qwen/Qwen2.5-72B-Instruct-128K",
31
- "kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
32
- "moonshotai/kimi-k2-instruct": "moonshotai/Kimi-K2-Instruct",
33
- "qwen2.5-vl-32b-instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
34
- "qwen2.5-vl-72b-instruct": "Qwen/Qwen2.5-VL-72B-Instruct",
35
- "qwen2.5-vl-7b-instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
36
- "minimax-m1-80k": "MiniMaxAI/MiniMax-M1-80k",
37
- "qvq-72b-preview": "Qwen/QVQ-72B-Preview",
38
- "qwen2.5-7b-instruct": "Qwen/Qwen2.5-7B-Instruct",
39
- "deepseek-r1:1.5b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
40
- "deepseek-r1-distill-qwen-1.5b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
41
- "deepseek-r1:7b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
42
- "deepseek-r1-distill-qwen-7b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
43
- "deepseek-r1:8b": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
44
- "deepseek-r1-distill-llama-8b": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
45
- "qwen2.5-32b-instruct": "Qwen/Qwen2.5-32B-Instruct"
16
+ "deepseek-v3.1": "deepseek/deepseek-chat-v3.1:free",
17
+ "kimi-k2-250711": "moonshotai/kimi-k2:free",
18
+ "deepseek-r1-250528": "deepseek/deepseek-r1-0528:free",
46
19
 
47
20
  }
48
21
 
@@ -22,14 +22,15 @@ models_mapping = {
22
22
  "deepseek-v3.1": "deepseek-ai/DeepSeek-V3.1",
23
23
  "deepseek-v3-1-250821": "deepseek-ai/DeepSeek-V3.1",
24
24
 
25
-
26
25
  "qwen3-32b": "Qwen/Qwen3-32B",
27
26
  "deepseek-r1": "deepseek-ai/DeepSeek-R1",
28
27
  "deepseek-r1-250528": "deepseek-ai/DeepSeek-R1",
29
28
  "deepseek-reasoner": "deepseek-ai/DeepSeek-R1",
30
29
  "qwen2.5-72b-instruct": "Qwen/Qwen2.5-72B-Instruct-128K",
30
+
31
+ "kimi-k2-250711": "moonshotai/Kimi-K2-Instruct",
31
32
  "kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
32
- "moonshotai/kimi-k2-instruct": "moonshotai/Kimi-K2-Instruct",
33
+
33
34
  "qwen2.5-vl-32b-instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
34
35
  "qwen2.5-vl-72b-instruct": "Qwen/Qwen2.5-VL-72B-Instruct",
35
36
  "qwen2.5-vl-7b-instruct": "Qwen/Qwen2.5-VL-7B-Instruct",