MeUtils 2025.8.21.9.4.52__py3-none-any.whl → 2025.8.26.9.9.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. examples/_openaisdk/openai_chatfire.py +3 -1
  2. examples/_openaisdk/openai_ffire.py +8 -10
  3. examples/_openaisdk/openai_images.py +11 -52
  4. examples/ann/x.py +115 -0
  5. meutils/apis/chatglm/glm_video_api.py +3 -3
  6. meutils/apis/chatglm/zai.py +7 -5
  7. meutils/apis/gitee/openai_images.py +20 -8
  8. meutils/apis/images/generations.py +26 -3
  9. meutils/apis/oneapi/channel.py +14 -33
  10. meutils/apis/oneapi/common.py +17 -6
  11. meutils/apis/qwen/chat.py +2 -0
  12. meutils/{llm/mappers.py → apis/volcengine_apis/doubao_images.py} +7 -4
  13. meutils/apis/volcengine_apis/images.py +4 -3
  14. meutils/data/VERSION +1 -1
  15. meutils/llm/check_utils.py +1 -0
  16. meutils/llm/models/__init__.py +1 -3
  17. meutils/llm/models/ele.py +35 -0
  18. meutils/llm/models/modelscope.py +66 -0
  19. meutils/llm/models/ppio.py +33 -0
  20. meutils/llm/models/siliconflow.py +12 -1
  21. meutils/llm/openai_utils/adapters.py +19 -4
  22. meutils/llm/openai_utils/billing_utils.py +4 -1
  23. meutils/schemas/image_types.py +1 -1
  24. meutils/schemas/oneapi/common.py +41 -12
  25. meutils/str_utils/__init__.py +1 -5
  26. meutils/str_utils/regular_expression.py +74 -0
  27. {meutils-2025.8.21.9.4.52.dist-info → meutils-2025.8.26.9.9.32.dist-info}/METADATA +260 -260
  28. {meutils-2025.8.21.9.4.52.dist-info → meutils-2025.8.26.9.9.32.dist-info}/RECORD +33 -29
  29. /meutils/llm/{models_mapping.py → models/models_mapping.py} +0 -0
  30. {meutils-2025.8.21.9.4.52.dist-info → meutils-2025.8.26.9.9.32.dist-info}/WHEEL +0 -0
  31. {meutils-2025.8.21.9.4.52.dist-info → meutils-2025.8.26.9.9.32.dist-info}/entry_points.txt +0 -0
  32. {meutils-2025.8.21.9.4.52.dist-info → meutils-2025.8.26.9.9.32.dist-info}/licenses/LICENSE +0 -0
  33. {meutils-2025.8.21.9.4.52.dist-info → meutils-2025.8.26.9.9.32.dist-info}/top_level.txt +0 -0
@@ -273,7 +273,9 @@ if __name__ == '__main__':
273
273
  "qwen3-235b-a22b"
274
274
  ]
275
275
 
276
- check_models(models, api_key=os.getenv("OPENAI_API_KEY"))
276
+ models = ["deepseek-v3.1"]
277
+
278
+ check_models(models, api_key=os.getenv("OPENAI_API_KEY") + "-21195")
277
279
 
278
280
  #############神奇 #############reasoning_content 神奇
279
281
 
@@ -17,11 +17,8 @@ client = OpenAI(
17
17
  # base_url=os.getenv("FFIRE_BASE_URL"),
18
18
  # api_key=os.getenv("FFIRE_API_KEY") #+"-29463"
19
19
  base_url=os.getenv("ONEAPIS_BASE_URL"),
20
- api_key=os.getenv("ONEAPIS_API_KEY") +"-13"
20
+ api_key=os.getenv("ONEAPIS_API_KEY") + "-3"
21
21
 
22
- # base_url="http://127.0.0.1:8000/v1",
23
-
24
- # api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgxNTc1Njh9.eihH3NVrzJCg9bdWb9mim9rGKTLKn1a66kW2Cqc0uPM"
25
22
  )
26
23
  #
27
24
  for i in range(1):
@@ -33,16 +30,17 @@ for i in range(1):
33
30
  # model="qwen3-235b-a22b-instruct-2507",
34
31
  # model="qwen-image",
35
32
  model="glm-4.5",
36
-
33
+ #
37
34
  messages=[
38
35
  {"role": "user", "content": 'are you ok?'}
39
36
  ],
40
- # top_p=0.7,
41
- top_p=None,
42
- temperature=None,
43
37
  # stream=True,
44
- max_tokens=1000,
45
- extra_body={"xx": "xxxxxxxx"}
38
+ # max_tokens=1000,
39
+ # extra_body={"xx": "xxxxxxxx"}
40
+ extra_body={"thinking": {
41
+ "type": "enabled"
42
+ }
43
+ }
46
44
  )
47
45
  print(completion)
48
46
  except Exception as e:
@@ -50,12 +50,10 @@ data = {
50
50
  # 'model': 'step-1x-medium',
51
51
  'model': 'seededit',
52
52
 
53
-
54
53
  # 'n': 2, 'quality': 'hd', 'response_format': 'url', 'size': '1024x1024', 'style': 'vivid',
55
54
  # 'extra_body': {'guidance_scale': 4.5, 'num_inference_steps': 25, 'seed': None, 'negative_prompt': None}
56
55
  }
57
56
 
58
-
59
57
  from openai import OpenAI
60
58
 
61
59
  # model = "stable-diffusion-3"
@@ -131,7 +129,6 @@ with timer('image'):
131
129
  # model = "black-forest-labs/FLUX.1-dev"
132
130
  # model = "flux-dev"
133
131
 
134
-
135
132
  client = OpenAI(
136
133
  # api_key=os.getenv("OPENAI_API_KEY") +,
137
134
  # api_key=os.getenv("OPENAI_API_KEY") + "-359", # 3083
@@ -207,6 +204,13 @@ with timer('image'):
207
204
  model = "doubao-seedream-3-0-t2i-250415"
208
205
  model = "black-forest-labs/FLUX.1-Krea-dev"
209
206
  model = "Qwen/Qwen-Image"
207
+ model = "MusePublic/FLUX.1-Kontext-Dev"
208
+ # model = "DiffSynth-Studio/FLUX.1-Kontext-dev-lora-SuperOutpainting"
209
+ # model = "DiffSynth-Studio/FLUX.1-Kontext-dev-lora-highresfix"
210
+ # # model = "black-forest-labs/FLUX.1-Kontext-dev"
211
+ # model="DiffSynth-Studio/FLUX.1-Kontext-dev-lora-ArtAug"
212
+
213
+ # flux-kontext-dev
210
214
 
211
215
  response = client.images.generate(
212
216
  model=model,
@@ -224,9 +228,10 @@ with timer('image'):
224
228
 
225
229
  # size="1700x1275",
226
230
 
227
- extra_headers={"X-ModelScope-Async-Mode": "false"},
228
-
229
-
231
+ extra_headers={
232
+ # "X-ModelScope-Async-Mode": "true",
233
+ # "X-ModelScope-Task-Type": "image_generation"
234
+ },
230
235
 
231
236
  extra_body={
232
237
  "extra_fields": {
@@ -245,49 +250,3 @@ with timer('image'):
245
250
  # openai.APIStatusError: Error code: 402 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details', 'type': 'quota_exceeded'}}
246
251
 
247
252
 
248
-
249
- """
250
- import requests
251
- import time
252
- import json
253
- from PIL import Image
254
- from io import BytesIO
255
-
256
- base_url = 'https://api-inference.modelscope.cn/'
257
- api_key = "ms-81ccebd2-1933-4996-8c65-8e170d4f4264"
258
-
259
- common_headers = {
260
- "Authorization": f"Bearer {api_key}",
261
- "Content-Type": "application/json",
262
- }
263
-
264
- response = requests.post(
265
- f"{base_url}v1/images/generations",
266
- headers={**common_headers, "X-ModelScope-Async-Mode": "true"},
267
- data=json.dumps({
268
- "model": "Qwen/Qwen-Image", # ModelScope Model-Id, required
269
- "prompt": "A golden cat"
270
- }, ensure_ascii=False).encode('utf-8')
271
- )
272
-
273
- response.raise_for_status()
274
- task_id = response.json()["task_id"]
275
-
276
- while True:
277
- result = requests.get(
278
- f"{base_url}v1/tasks/{task_id}",
279
- headers={**common_headers, "X-ModelScope-Task-Type": "image_generation"},
280
- )
281
- result.raise_for_status()
282
- data = result.json()
283
-
284
- if data["task_status"] == "SUCCEED":
285
- image = Image.open(BytesIO(requests.get(data["output_images"][0]).content))
286
- image.save("result_image.jpg")
287
- break
288
- elif data["task_status"] == "FAILED":
289
- print("Image Generation Failed.")
290
- break
291
-
292
- time.sleep(5)
293
- """
examples/ann/x.py ADDED
@@ -0,0 +1,115 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : x
5
+ # @Time : 2025/8/25 15:55
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+ from meutils.pipe import *
12
+
13
+
14
+
15
+ def parse_command_string(command_str: str) -> dict:
16
+ """
17
+ 解析一个类似 "prompt --key1 value1 --key2 value2" 格式的字符串。
18
+
19
+ Args:
20
+ command_str: 输入的命令行字符串。
21
+
22
+ Returns:
23
+ 一个包含 prompt 和解析后参数的字典。
24
+ 例如: {"prompt": "画条狗", "size": "1:1", "n": 10}
25
+ """
26
+ # 初始化结果字典
27
+ result = {}
28
+
29
+ # 使用正则表达式找到第一个参数 '--' 的位置
30
+ # 这比简单的 split 更健壮,可以处理 prompt 中包含 '--' 的情况(虽然不常见)
31
+ match = re.search(r'\s--\w', command_str)
32
+
33
+ if not match:
34
+ # 如果没有找到任何参数,整个字符串都是 prompt
35
+ result['prompt'] = command_str.strip()
36
+ return result
37
+
38
+ first_arg_index = match.start()
39
+
40
+ # 提取 prompt 和参数部分
41
+ prompt = command_str[:first_arg_index].strip()
42
+ args_str = command_str[first_arg_index:].strip()
43
+
44
+ result['prompt'] = prompt
45
+
46
+ # 将参数字符串按空格分割成列表
47
+ # 例如 "--size 1:1 --n 10" -> ['--size', '1:1', '--n', '10']
48
+ args_list = args_str.split()
49
+
50
+ # 遍历参数列表,每次处理一个键值对
51
+ i = 0
52
+ while i < len(args_list):
53
+ arg = args_list[i]
54
+
55
+ # 确认当前项是一个参数键(以 '--' 开头)
56
+ if arg.startswith('--'):
57
+ key = arg[2:] # 去掉 '--' 前缀
58
+
59
+ # 检查后面是否跟着一个值
60
+ if i + 1 < len(args_list) and not args_list[i + 1].startswith('--'):
61
+ value = args_list[i + 1]
62
+
63
+ # 尝试将值转换为整数,如果失败则保留为字符串
64
+ try:
65
+ processed_value = int(value)
66
+ except ValueError:
67
+ processed_value = value
68
+
69
+ # 布尔型
70
+ if processed_value in ['true', 'yes', 'on']:
71
+ processed_value = True
72
+ elif processed_value in ['false', 'no', 'off']:
73
+ processed_value = False
74
+
75
+ result[key] = processed_value
76
+
77
+ i += 2 # 跳过键和值,移动到下一个参数
78
+ else:
79
+ # 处理没有值的参数,例如 --test,可以设为 True 或忽略
80
+ result[key] = True # 或者可以写 pass 直接忽略
81
+ i += 1
82
+ else:
83
+ # 如果某一项不是以 '--' 开头,它可能是格式错误,直接跳过
84
+ i += 1
85
+
86
+ return result
87
+
88
+
89
+ if __name__ == "__main__":
90
+ # # --- 使用示例 ---
91
+ # command = "画条狗 --size 1:1 --n 10"
92
+ # parsed_result = parse_command_string(command)
93
+ #
94
+ # print(f"原始字符串: '{command}'")
95
+ # print(f"解析结果: {parsed_result}")
96
+ # print("-" * 20)
97
+ #
98
+ # # 测试其他例子
99
+ # command_2 = "a cat in a space suit, cinematic lighting --n 4 --size 16:9"
100
+ # parsed_result_2 = parse_command_string(command_2)
101
+ # print(f"原始字符串: '{command_2}'")
102
+ # print(f"解析结果: {parsed_result_2}")
103
+ # print("-" * 20)
104
+ #
105
+ # command_3 = "一只赛博朋克风格的狐狸" # 没有参数的情况
106
+ # parsed_result_3 = parse_command_string(command_3)
107
+ # print(f"原始字符串: '{command_3}'")
108
+ # print(f"解析结果: {parsed_result_3}")
109
+ #
110
+ # 测试输入
111
+ test_input = "画条狗 --size 1:1 --n 10 --aspect_ratio 1:1 --f --a aa"
112
+ test_input = "画条狗"
113
+
114
+ output = parse_command_string(test_input)
115
+ print(output)
@@ -109,15 +109,15 @@ async def generate(request: ImageRequest, n: int = 30): # 兼容dalle3
109
109
 
110
110
  # VideoResult
111
111
  if __name__ == '__main__':
112
- api_key = "f0a6177640364388a6d5d82428ee93b7.2woWPO0QdF3aWbVZ"
112
+ api_key = "8c1fd8923a2f4c0eb323335ca4ba9f98.I7FisfQS7x1zYK8P"
113
113
 
114
114
  # api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
115
115
  # api_key = "7d10426c06afa81e8d7401d97781249c.DbqlSsicRtaUdKXI" # 新号
116
116
  # api_key = "e21bd630f681c4d90b390cd609720483.WSFVgA3Kk1wNCX0mN"
117
117
 
118
118
  request = VideoRequest(
119
- # model='cogvideox-flash',
120
- model='cogvideox-3'
119
+ model='cogvideox-flash',
120
+ # model='cogvideox-3',
121
121
 
122
122
  )
123
123
  r = arun(create_task(request, api_key=api_key))
@@ -26,15 +26,15 @@ from fake_useragent import UserAgent
26
26
  ua = UserAgent()
27
27
 
28
28
  BASE_URL = "https://chat.z.ai/api"
29
- FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=9VvErr"
29
+ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=x3TJrE"
30
30
 
31
31
 
32
32
  class Completions(object):
33
33
  def __init__(self, api_key: Optional[str] = None):
34
34
  self.api_key = api_key
35
35
 
36
- async def create(self, request: CompletionRequest, token: Optional[str] = None):
37
- token = token or await get_next_token_for_polling(FEISHU_URL)
36
+ async def create(self, request: CompletionRequest):
37
+ token = self.api_key or await get_next_token_for_polling(FEISHU_URL)
38
38
 
39
39
  chat_id = str(uuid.uuid4())
40
40
  payload = {
@@ -130,7 +130,9 @@ class Completions(object):
130
130
 
131
131
 
132
132
  if __name__ == '__main__':
133
- token = "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6ImI0YThkMTI5LWY2YzgtNDM5Mi1iYzlhLWEyNjM1Nzg0ZDM5MyIsImVtYWlsIjoiemJqZ2NlZ2NsbkB0aXRrLnVrIn0.cME4z8rip8Y6mQ0q_JEoY6ywPk_7ud2BsyFHyPRhFhtzEl_uLcQEMNlop7hM_fTy0S5pS8qdLK5y7iA1it0n7g"
133
+ token ="""
134
+ eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjUxZmNjMWZmLWRlMGYtNDgyYi1hOTAzLTMyMGM0OGJhZTQ2MiIsImVtYWlsIjoiYjZ5bnI5bjlvQG1haWwueGl1dmkuY24ifQ.286d3lCg-p9QUHEBWusC2Oi_a3WgeKtlTcbs1tfXsbW2KatRNxlbtUIeDQBPY5u-Fc3uhoU5ao0DJ4Ww_9dXZw
135
+ """.strip()
134
136
 
135
137
  request = CompletionRequest(
136
138
  model="glm-4.5",
@@ -155,7 +157,7 @@ if __name__ == '__main__':
155
157
  )
156
158
 
157
159
  async def main():
158
- response = await Completions().create(request, token)
160
+ response = await Completions(token).create(request)
159
161
  async for i in response:
160
162
  print(i)
161
163
 
@@ -32,10 +32,22 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None):
32
32
  **request.model_dump(exclude_none=True, exclude={"extra_fields", "aspect_ratio"}),
33
33
  **(request.extra_fields or {})
34
34
  }
35
+
36
+ if request.model.endswith("-lora"):
37
+ data["model"] = data["model"].removesuffix("-lora")
38
+ if "Qwen-Image" in data["model"]:
39
+ data['num_inference_steps'] = 8
40
+ data["cfg_scale"] = 1
41
+ data["lora_weights"] = [
42
+ {
43
+ "url": "https://gitee.com/realhugh/materials/raw/master/Qwen-Image-Lightning-8steps-V1.1.safetensors",
44
+ "weight": 1
45
+ }
46
+ ]
35
47
  # logger.debug(bjson(data))
36
48
 
37
49
  data = to_openai_params(ImageRequest(**data))
38
- # logger.debug(bjson(data))
50
+ logger.debug(bjson(data))
39
51
 
40
52
  response = await client.images.generate(**data)
41
53
  return response
@@ -50,11 +62,11 @@ if __name__ == '__main__':
50
62
  "guidance_scale": 4.5,
51
63
  "seed": 42
52
64
  }
53
- # data = {
54
- # "prompt": "一幅精致细腻的工笔画,画面中心是一株蓬勃生长的红色牡丹,花朵繁茂,既有盛开的硕大花瓣,也有含苞待放的花蕾,层次丰富,色彩艳丽而不失典雅。牡丹枝叶舒展,叶片浓绿饱满,脉络清晰可见,与红花相映成趣。一只蓝紫色蝴蝶仿佛被画中花朵吸引,停驻在画面中央的一朵盛开牡丹上,流连忘返,蝶翼轻展,细节逼真,仿佛随时会随风飞舞。整幅画作笔触工整严谨,色彩浓郁鲜明,展现出中国传统工笔画的精妙与神韵,画面充满生机与灵动之感。",
55
- # "model": "Qwen-Image",
56
- # "size": "1024x1024",
57
- # "num_inference_steps": 30,
58
- # "cfg_scale": 4
59
- # }
65
+ data = {
66
+ "prompt": "一幅精致细腻的工笔画,画面中心是一株蓬勃生长的红色牡丹,花朵繁茂,既有盛开的硕大花瓣,也有含苞待放的花蕾,层次丰富,色彩艳丽而不失典雅。牡丹枝叶舒展,叶片浓绿饱满,脉络清晰可见,与红花相映成趣。一只蓝紫色蝴蝶仿佛被画中花朵吸引,停驻在画面中央的一朵盛开牡丹上,流连忘返,蝶翼轻展,细节逼真,仿佛随时会随风飞舞。整幅画作笔触工整严谨,色彩浓郁鲜明,展现出中国传统工笔画的精妙与神韵,画面充满生机与灵动之感。",
67
+ "model": "Qwen-Image-lora",
68
+ "size": "1024x1024",
69
+ "num_inference_steps": 30,
70
+ "cfg_scale": 4
71
+ }
60
72
  arun(generate(ImageRequest(**data)))
@@ -12,13 +12,16 @@ from meutils.pipe import *
12
12
  from meutils.llm.clients import AsyncClient
13
13
  from meutils.llm.openai_utils import to_openai_params
14
14
 
15
- from meutils.schemas.image_types import ImageRequest
15
+ from meutils.schemas.image_types import ImageRequest, RecraftImageRequest
16
16
 
17
17
  from meutils.apis.fal.images import generate as fal_generate
18
18
 
19
19
  from meutils.apis.gitee.image_to_3d import generate as image_to_3d_generate
20
20
  from meutils.apis.gitee.openai_images import generate as gitee_images_generate
21
21
  from meutils.apis.qwen.chat import Completions as QwenCompletions
22
+ from meutils.apis.volcengine_apis.images import generate as volc_generate
23
+ from meutils.apis.images.recraft import generate as recraft_generate
24
+ from meutils.apis.jimeng.images import generate as jimeng_generate
22
25
 
23
26
 
24
27
  async def generate(
@@ -27,16 +30,36 @@ async def generate(
27
30
  base_url: Optional[str] = None,
28
31
  ):
29
32
  if base_url: # 优先级最高
33
+ data = {
34
+ **request.model_dump(exclude_none=True, exclude={"extra_fields", "aspect_ratio"}),
35
+ **(request.extra_fields or {})
36
+ }
37
+ #
38
+ if request.model.startswith("doubao"):
39
+ data['watermark'] = False
40
+
41
+ data = to_openai_params(ImageRequest(**data))
42
+
30
43
  client = AsyncClient(api_key=api_key, base_url=base_url)
31
- return await client.images.generate(**to_openai_params(request))
44
+ return await client.images.generate(**data)
32
45
 
33
46
  if request.model.startswith("fal-ai"):
34
47
  return await fal_generate(request, api_key)
35
48
 
49
+ if request.model.startswith(("recraft",)):
50
+ request = RecraftImageRequest(**request.model_dump(exclude_none=True))
51
+ return await recraft_generate(request)
52
+
53
+ if request.model.startswith(("seededit", "jimeng")): # 即梦
54
+ return await jimeng_generate(request)
55
+
56
+ if request.model.startswith(("seed", "seededit_v3.0", "byteedit_v2.0")):
57
+ return await volc_generate(request, api_key)
58
+
36
59
  if request.model in {"Hunyuan3D-2", "Hi3DGen", "Step1X-3D"}:
37
60
  return await image_to_3d_generate(request, api_key)
38
61
 
39
- if request.model in {"Qwen-Image", "FLUX_1-Krea-dev"}:
62
+ if request.model in {"Qwen-Image", "FLUX_1-Krea-dev"} and request.model.endswith(("lora",)):
40
63
  return await gitee_images_generate(request, api_key)
41
64
 
42
65
  if request.model.startswith("qwen-image"):
@@ -7,7 +7,6 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
- import datetime
11
10
 
12
11
  from meutils.pipe import *
13
12
  from meutils.hash_utils import murmurhash
@@ -21,6 +20,17 @@ headers = {
21
20
  }
22
21
 
23
22
 
23
+ async def get_channel_info(id: int, base_url: str ) -> ChannelInfo:
24
+ params = {"keyword": id} # keyword=21222&group=&model=&id_sort=true&tag_mode=false&p=1&page_size=100
25
+
26
+ async with httpx.AsyncClient(base_url=base_url, headers=headers, timeout=30) as client:
27
+ response = await client.get("/api/channel/search", params=params)
28
+ response.raise_for_status()
29
+ return response.json() # status==1 是正常
30
+
31
+
32
+
33
+
24
34
  async def edit_channel(models, token: Optional[str] = None):
25
35
  token = token or os.environ.get("CHATFIRE_ONEAPI_TOKEN")
26
36
 
@@ -260,39 +270,10 @@ if __name__ == '__main__':
260
270
  request = ChannelInfo(name='', key=key)
261
271
  request = ChannelInfo(id=21223, key=key, used_quota=0.001)
262
272
 
263
- arun(create_or_update_channel(request, base_url=base_url))
273
+ # arun(create_or_update_channel(request, base_url=base_url))
264
274
  #
265
275
  # arun(exist_channel(request, base_url=base_url))
266
276
 
267
- """
268
- UPSTREAM_BASE_URL=https://api.ffire.cc
269
- UPSTREAM_API_KEY=
270
-
271
- API_KEY=https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=3aA5dH[:100]
272
- BASE_URL=https://api.siliconflow.cn
273
-
274
-
275
- curl -X 'POST' http://0.0.0.0:8000/oneapi/channel \
276
- -H "Authorization: Bearer $API_KEY" \
277
- -H "UPSTREAM-BASE-URL: $UPSTREAM_BASE_URL" \
278
- -H "UPSTREAM-API-KEY: $UPSTREAM_API_KEY" \
279
- -H 'accept: application/json' \
280
- -H 'Content-Type: application/json' \
281
- -d '{
282
- "id": "1000:1500",
283
- "name": "sf",
284
- "tag": "sf",
285
- "key": "$KEY",
286
- "type": 1,
287
-
288
- "base_url": "'$BASE_URL'",
289
- "group": "default,china",
290
-
291
- "models": "kimi-k2-0711-preview,moonshotai/kimi-k2-instruct",
292
- "model_mapping": {
293
- "kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct"
294
- },
295
- "param_override": "{\n \"max_tokens\": null\n}"
296
- }'
277
+ # arun(get_channel_info(21222, base_url=base_url))
297
278
 
298
- """
279
+ arun(delete_channel(range(10015, 10032), base_url=base_url))
@@ -7,6 +7,8 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description : https://github.com/Thekers/Get_OpenaiKey/blob/9d174669d7778ea32d1132bedd5167597912dcfb/Add_01AI_Token.py
10
+ import os
11
+
10
12
  import jsonpath
11
13
  from meutils.pipe import *
12
14
  from meutils.schemas.oneapi import REDIRECT_MODEL, NOTICE, FOOTER
@@ -20,12 +22,20 @@ import json
20
22
 
21
23
  # 500000对应1块
22
24
  def option(token: Optional[str] = None):
23
- token = token or os.environ.get("CHATFIRE_ONEAPI_TOKEN")
24
25
 
25
- url = "https://api.chatfire.cn/api/option/"
26
+ urls = [
27
+ "https://api.chatfire.cn/api/option/",
28
+ "https://api.ffire.cc/api/option/",
29
+ "https://api.oneapis.cn/api/option/",
30
+
31
+ ]
32
+
33
+ token = token or os.getenv("CHATFIRE_ONEAPI_TOKEN")
26
34
  headers = {
27
35
  'authorization': f'Bearer {token}',
28
- 'rix-api-user': '1'
36
+ 'rix-api-user': '1',
37
+ 'new-api-user': '1'
38
+
29
39
  }
30
40
 
31
41
  payloads = [
@@ -71,9 +81,10 @@ def option(token: Optional[str] = None):
71
81
  "value": json.dumps(model_group_info, indent=4)
72
82
  },
73
83
  ]
74
- for payload in payloads:
75
- response = requests.request("PUT", url, headers=headers, json=payload)
76
- logger.debug(response.json())
84
+ for url in urls:
85
+ for payload in payloads:
86
+ response = requests.request("PUT", url, headers=headers, json=payload)
87
+ logger.debug(response.json())
77
88
 
78
89
 
79
90
  def add_channel(
meutils/apis/qwen/chat.py CHANGED
@@ -140,6 +140,8 @@ class Completions(object):
140
140
 
141
141
  if request.model.startswith("qwen-image"):
142
142
  payload["messages"][0]["chat_type"] = "t2i"
143
+
144
+ logger.debug(request.last_urls)
143
145
  if image_urls := request.last_urls.get("image_url"):
144
146
  payload["messages"][0]["chat_type"] = "image_edit"
145
147
  payload["messages"][0]["files"] = [
@@ -1,8 +1,8 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  # @Project : AI. @by PyCharm
4
- # @File : mappers
5
- # @Time : 2024/12/16 14:49
4
+ # @File : doubao_images
5
+ # @Time : 2025/8/25 16:27
6
6
  # @Author : betterme
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
@@ -11,5 +11,8 @@
11
11
  from meutils.pipe import *
12
12
 
13
13
 
14
- def llm_mapper(model):
15
- return
14
+ """
15
+ doubao-seededit-3-0-i2i-250628
16
+ doubao-seedream-3-0-t2i-250415
17
+ """
18
+
@@ -12,7 +12,6 @@ https://www.volcengine.com/docs/85128/1526761
12
12
  Seedream 通用3.0文生图模型是基于字节跳动视觉大模型打造的新一代文生图模型,本次升级模型综合能力(图文,结构,美感)均显著提升。V3.0参数量更大,对语义有更好的理解,实体结构也更加自然真实,支持 2048 以下分辨率直出,各类场景下的效果均大幅提升。
13
13
  https://www.volcengine.com/docs/6791/1384311
14
14
  """
15
- import os
16
15
 
17
16
  from meutils.pipe import *
18
17
  from meutils.decorators.retry import retrying
@@ -23,7 +22,7 @@ from meutils.schemas.image_types import ImageRequest, ImagesResponse
23
22
  from volcengine.visual.VisualService import VisualService
24
23
 
25
24
 
26
- @retrying(min=3, max=5)
25
+ @retrying()
27
26
  async def generate(request: ImageRequest, token: Optional[str] = None):
28
27
  """
29
28
 
@@ -59,7 +58,9 @@ async def generate(request: ImageRequest, token: Optional[str] = None):
59
58
 
60
59
  if image:
61
60
  payload["image_urls"] = [image]
62
- payload["req_key"] = "byteedit_v2.0"
61
+ payload["req_key"] = request.model
62
+ if payload["req_key"] not in {"seededit_v3.0", "byteedit_v2.0"}:
63
+ payload["req_key"] = "byteedit_v2.0" # "seededit_v3.0" https://www.volcengine.com/docs/85128/1602254
63
64
 
64
65
  payload['width'] = width
65
66
  payload['height'] = height
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.08.21.09.04.52
1
+ 2025.08.26.09.09.32
@@ -195,6 +195,7 @@ async def check_token_for_sophnet(api_key, threshold: float = 1):
195
195
 
196
196
  try:
197
197
  client = AsyncOpenAI(base_url=os.getenv("SOPHNET_BASE_URL"), api_key=api_key)
198
+ print(await client.models.list())
198
199
  data = await client.chat.completions.create(
199
200
  model="DeepSeek-v3",
200
201
  messages=[{"role": "user", "content": "hi"}],
@@ -6,6 +6,4 @@
6
6
  # @Author : betterme
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
- # @Description :
10
-
11
- from meutils.pipe import *
9
+ # @Description :
@@ -0,0 +1,35 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : ele
5
+ # @Time : 2025/8/25 18:18
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+ import os
11
+
12
+ from meutils.pipe import *
13
+
14
+ """
15
+ curl https://api.elevenlabs.io/v1/models \
16
+ -H "xi-api-key: xi-api-key"
17
+ """
18
+
19
+
20
+ async def get_models():
21
+ api_key = os.getenv("ELEVENLABS_API_KEY")
22
+ async with httpx.AsyncClient() as client:
23
+ response = await client.get("https://api.elevenlabs.io/v1/models", headers={"xi-api-key": api_key})
24
+ response.raise_for_status()
25
+ data = response.json()
26
+ return [m.get("model_id") for m in data]
27
+
28
+
29
+ if __name__ == '__main__':
30
+ models = arun(get_models())
31
+
32
+ print(','.join([f"elevenlabs/{m}" for m in models]))
33
+
34
+
35
+ # deepseek-ai/DeepSeek-V3.1