MeUtils 2025.6.13.18.6.6__py3-none-any.whl → 2025.6.18.14.28.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -257,7 +257,9 @@ async def get_next_token_for_polling(
257
257
  check_token: Optional[Callable] = None,
258
258
  max_size: Optional[int] = 10,
259
259
  from_redis: Optional[bool] = False,
260
- min_points: float = 0
260
+ min_points: float = 0,
261
+
262
+ ttl: Optional[int] = None,
261
263
  ):
262
264
  if from_redis:
263
265
  return await get_next_token(feishu_url, check_token, ttl=7 * 24 * 3600, min_points=min_points)
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.06.13.18.06.06
1
+ 2025.06.18.14.28.04
@@ -192,18 +192,28 @@ async def check_token_for_sophnet(api_key, threshold: float = 1):
192
192
 
193
193
  #
194
194
  @retrying()
195
- async def check_token_for_volc(api_key, threshold: float = 1):
195
+ async def check_token_for_volc(api_key, threshold: float = 1, purpose: Optional[str] = None):
196
196
  if not isinstance(api_key, str):
197
197
  return await check_tokens(api_key, check_token_for_volc)
198
198
 
199
199
  try:
200
- client = AsyncOpenAI(base_url=os.getenv("VOLC_BASE_URL"), api_key=api_key)
201
-
202
- data = await client.chat.completions.create(
203
- model="deepseek-v3-250324",
204
- messages=[{"role": "user", "content": "hi"}],
205
- max_tokens=1
206
- )
200
+ base_url = os.getenv("VOLC_BASE_URL") or "https://ark.cn-beijing.volces.com/api/v3"
201
+ client = AsyncOpenAI(base_url, api_key=api_key)
202
+
203
+ if purpose:
204
+ response = client.images.generate(
205
+ model="doubao-seedream-3-0-t2i-250415",
206
+ prompt="鱼眼镜头,一只猫咪的头部,画面呈现出猫咪的五官因为拍摄方式扭曲的效果。",
207
+ size="1024x1024",
208
+ response_format="url"
209
+ )
210
+ else:
211
+
212
+ response = await client.chat.completions.create(
213
+ model="deepseek-v3-250324",
214
+ messages=[{"role": "user", "content": "hi"}],
215
+ max_tokens=1
216
+ )
207
217
  return True
208
218
  except TimeoutException as e:
209
219
  raise
@@ -124,6 +124,9 @@ class ImageRequest(BaseModel): # openai
124
124
  if self.aspect_ratio: # 适配比例
125
125
  self.size = ASPECT_RATIOS.get(self.aspect_ratio, '1024x1024')
126
126
 
127
+ elif self.size == 'auto':
128
+ self.size = self.aspect_ratio = None
129
+
127
130
  elif self.size: # 适配尺寸
128
131
  self.aspect_ratio = size2aspect_ratio(self.size)
129
132
 
@@ -191,8 +194,10 @@ class ImageEditRequest(BaseModel):
191
194
  if self.aspect_ratio: # 适配比例
192
195
  self.size = ASPECT_RATIOS.get(self.aspect_ratio, '1024x1024')
193
196
 
194
- elif self.size: # 适配尺寸
197
+ elif self.size == 'auto':
198
+ self.size = self.aspect_ratio = None
195
199
 
200
+ elif self.size: # 适配尺寸
196
201
  self.aspect_ratio = size2aspect_ratio(self.size)
197
202
 
198
203
  self.size = self.size if 'x' in self.size else '512x512'
@@ -89,7 +89,10 @@ class ChannelInfo(BaseModel):
89
89
  super().__init__(**data)
90
90
  self.name = self.name or self.base_url or "NAME"
91
91
  self.tag = self.tag or self.base_url or "TAG"
92
+ self.group = self.group or self.base_url or "GROUP"
92
93
 
94
+ self.setting= self.setting or ""
95
+ self.param_override = self.param_override or ""
93
96
  if isinstance(self.model_mapping, dict):
94
97
  self.model_mapping = json.dumps(self.model_mapping)
95
98
 
@@ -73,11 +73,22 @@ MODEL_PRICE = {
73
73
  "api-volcengine-high_aes_general_v30l_zt2i": 0.05,
74
74
  "api-volcengine-byteedit_v2.0": 0.05,
75
75
 
76
+ "doubao-seedance-1-0-pro-250528": 1,
76
77
  "api-doubao-seedance-1-0-pro-250528": 1,
77
78
  "api-doubao-seedance-1-0-pro-250528-480p": 1,
78
79
  "api-doubao-seedance-1-0-pro-250528-720p": 2,
79
80
  "api-doubao-seedance-1-0-pro-250528-1080p": 3,
80
81
 
82
+ "api-doubao-seedance-1-0-lite-t2v-250428": 1 / 2,
83
+ "api-doubao-seedance-1-0-lite-t2v-250428-480p": 1 / 2,
84
+ "api-doubao-seedance-1-0-lite-t2v-250428-720p": 2 / 2,
85
+ "api-doubao-seedance-1-0-lite-t2v-250428-1080p": 3 / 2,
86
+
87
+ "api-doubao-seedance-1-0-lite-i2v-250428": 1 / 2,
88
+ "api-doubao-seedance-1-0-lite-i2v-250428-480p": 1 / 2,
89
+ "api-doubao-seedance-1-0-lite-i2v-250428-720p": 2 / 2,
90
+ "api-doubao-seedance-1-0-lite-i2v-250428-1080p": 3 / 2,
91
+
81
92
  # videos
82
93
  "api-videos-wan-ai/wan2.1-t2v-14b": 1.2,
83
94
  "api-videos-wan-ai/wan2.1-t2v-14b-turbo": 1.2,
@@ -614,6 +625,8 @@ MODEL_RATIO = {
614
625
 
615
626
  # minimax https://platform.minimaxi.com/document/price?id=6433f32294878d408fc8293e
616
627
  "minimax-text-01": 0.5,
628
+ "minimax-m1-80k": 2,
629
+
617
630
  "abab6.5-chat": 15 / 7.5,
618
631
  "abab6.5s-chat": 5,
619
632
  "abab6.5t-chat": 2.5,
@@ -840,10 +853,12 @@ MODEL_RATIO = {
840
853
  "gemini-2.0-flash": 0.075,
841
854
  "gemini-2.0-flash-001": 0.075,
842
855
  "gemini-2.0-flash-lite-preview-02-05": 0.075,
856
+ "gemini-2.5-flash-lite-preview-06-17": 0.075,
843
857
  "gemini-2.0-flash-exp": 0.075,
844
858
  "gemini-2.0-flash-thinking-exp": 0.075,
845
859
  "gemini-2.0-flash-thinking-exp-1219": 0.075,
846
860
  "gemini-2.0-flash-thinking-exp-01-21": 0.075,
861
+ "gemini-2.5-flash": 0.075,
847
862
 
848
863
  "gemini-2.5-flash-preview-04-17": 0.075,
849
864
  "gemini-2.5-flash-preview-05-20": 0.075,
@@ -855,6 +870,7 @@ MODEL_RATIO = {
855
870
  "gemini-2.5-pro-preview-03-25": 0.625,
856
871
  "gemini-2.5-pro-preview-05-06": 0.625,
857
872
  "gemini-2.5-pro-preview-06-05": 0.625,
873
+ "gemini-2.5-pro": 0.625,
858
874
 
859
875
  "gemini-1.5-pro-001": 1.25,
860
876
  "gemini-1.5-pro-002": 1.25,
@@ -936,10 +952,10 @@ MODEL_RATIO = {
936
952
  "o4-mini": 0.55,
937
953
  "gpt-image-1": 2.5,
938
954
 
939
- "o3": 5,
940
- "o3-2025-04-16": 5,
941
-
955
+ "o3": 1,
956
+ "o3-2025-04-16": 1,
942
957
  "o3-pro": 10,
958
+ "o3-pro-2025-06-10": 10,
943
959
 
944
960
  # 硅基
945
961
  "llama-3.1-8b-instruct": 0.01,
@@ -1002,6 +1018,7 @@ MODEL_RATIO = {
1002
1018
 
1003
1019
  COMPLETION_RATIO = {
1004
1020
  "minimax-text-01": 8,
1021
+ "minimax-m1-80k":4,
1005
1022
 
1006
1023
  # 智能体
1007
1024
  "gpt-4-plus": 5,
@@ -34,9 +34,9 @@ async def get_bearer_token(
34
34
 
35
35
  token = auth.credentials
36
36
  if token.startswith('redis:'): # redis里按序轮询
37
- if "feishu.cn" in token:
37
+ if "feishu.cn" in token: # redis:https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=Y7HVfo
38
38
  feishu_url = token.removeprefix("redis:")
39
- token = await get_next_token(feishu_url, ttl=24 * 3600) # todo: 摆脱 feishu
39
+ token = await get_next_token(feishu_url, ttl=24 * 3600)
40
40
 
41
41
  # logger.debug(token)
42
42
 
@@ -44,10 +44,15 @@ async def get_bearer_token(
44
44
  tokens = token.removeprefix("redis:").split(',') # todo: 初始化redis
45
45
  token = np.random.choice(tokens)
46
46
 
47
+ elif token.startswith("http") and "feishu.cn" in token: # feishu 取所有 keys 主要针对 channel
48
+ feishu_url = token
49
+ tokens = await get_series(feishu_url)
50
+ token = '\n'.join(tokens) # 多渠道
51
+
47
52
  elif ',' in token: # 内存里随机轮询
48
53
  token = np.random.choice(token.split(','))
49
54
 
50
- elif token in {"none", "null"}:
55
+ elif token in {"None", "none", "null"}:
51
56
  token = None
52
57
 
53
58
  return token
@@ -1,32 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- # @Project : AI. @by PyCharm
4
- # @File : openai_siliconflow
5
- # @Time : 2024/6/26 10:42
6
- # @Author : betterme
7
- # @WeChat : meutils
8
- # @Software : PyCharm
9
- # @Description :
10
- import os
11
-
12
- from meutils.pipe import *
13
- from openai import OpenAI
14
- from openai import OpenAI, APIStatusError
15
-
16
-
17
- client = OpenAI(
18
- # base_url="https://free.chatfire.cn/v1",
19
- api_key="9a867a14-26d1-4950-89ae-dd989dec10b5-b137b199a507d6f5",
20
- base_url="https://all.chatfire.cc/kindo/v1"
21
-
22
- )
23
-
24
- try:
25
- client.chat.completions.create(
26
- messages=[
27
- {"role": "user", "content": "你是谁"}
28
- ],
29
- model="azure/gpt-4o-mini",
30
- )
31
- except Exception as e:
32
- print(e)