MeUtils 2025.6.12.17.34.4__py3-none-any.whl → 2025.6.13.18.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,19 +6,23 @@
6
6
  # @Author : betterme
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
- # @Description :
9
+ # @Description :
10
+ import json
10
11
 
11
12
  from meutils.pipe import *
12
13
  from meutils.io.files_utils import to_url, to_url_fal
14
+ from meutils.str_utils.json_utils import json_path
13
15
  from meutils.schemas.openai_types import CompletionRequest
14
16
  from meutils.schemas.image_types import ImageRequest
15
17
  from meutils.llm.openai_utils import chat_completion, chat_completion_chunk, create_chat_completion_chunk
16
18
 
19
+
17
20
  async def stream_to_nostream(
18
21
  request: CompletionRequest,
19
22
  ):
20
23
  pass
21
24
 
25
+
22
26
  async def chat_for_image(
23
27
  generate: Callable,
24
28
  request: CompletionRequest,
@@ -61,6 +65,44 @@ async def chat_for_image(
61
65
  return chunks
62
66
 
63
67
 
68
+ async def chat_for_video(
69
+ get_task: Callable, # response
70
+ taskid: str,
71
+ ):
72
+ """异步任务"""
73
+
74
+ async def gen():
75
+
76
+ # 获取任务
77
+ for i in f"""> VideoTask(id={taskid})\n""":
78
+ await asyncio.sleep(0.03)
79
+ yield i
80
+
81
+ yield f"[🤫 任务进度]("
82
+ for i in range(60):
83
+ await asyncio.sleep(3)
84
+ response = await get_task(taskid) # 包含 "status"
85
+
86
+ logger.debug(response)
87
+ if response.get("status", "").lower().startswith(("succ", "fail")):
88
+
89
+ yield ")🎉🎉🎉\n\n"
90
+
91
+ yield f"""```json\n{json.dumps(response, indent=4, ensure_ascii=False)}\n```"""
92
+
93
+ if urls := json_path(response, expr='$..[url,image_url,video_url]'): # 所有url
94
+ for i, url in enumerate(urls, 1):
95
+ yield f"\n\n[下载链接{i}]({url})\n\n"
96
+
97
+ break
98
+
99
+ else:
100
+ yield "🚀"
101
+
102
+ chunks = create_chat_completion_chunk(gen())
103
+ return chunks
104
+
105
+
64
106
  if __name__ == '__main__':
65
107
  request = CompletionRequest(
66
108
  model="deepseek-r1-Distill-Qwen-1.5B",
@@ -69,4 +111,4 @@ if __name__ == '__main__':
69
111
  ],
70
112
  stream=False,
71
113
  )
72
- arun(chat_for_image(None, request))
114
+ arun(chat_for_image(None, request))
@@ -178,6 +178,7 @@ async def ppu_flow(
178
178
  else: # 不计费
179
179
  yield
180
180
 
181
+
181
182
  # 按量计费
182
183
  def create_chat_completion(
183
184
  completion: Union[str, Iterable[str]],
@@ -255,6 +256,10 @@ def get_payment_times(request: Union[BaseModel, dict], duration: float = 5):
255
256
  # 时长
256
257
  N += request.get("duration", 0) // duration
257
258
 
259
+ # 命令行参数 --duration 5
260
+ if "--duration 10" in str(request):
261
+ N += 1
262
+
258
263
  return N
259
264
 
260
265
 
@@ -48,3 +48,4 @@ def size2aspect_ratio(size):
48
48
  if __name__ == '__main__':
49
49
  print(size2aspect_ratio("1920x1080"))
50
50
  print(size2aspect_ratio("1920:1080"))
51
+ print(size2aspect_ratio("1024x1024"))
@@ -99,7 +99,7 @@ class ImageRequest(BaseModel): # openai
99
99
  style: Union[str, Literal["vivid", "natural"]] = None
100
100
 
101
101
  # 测试默认值 Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
102
- size: Optional[str] = '1024x1024'
102
+ size: Optional[str] = '1024x1024' # null auto
103
103
 
104
104
  response_format: Optional[Literal["oss_url", "url", "b64_json"]] = "url"
105
105
 
@@ -614,3 +614,5 @@ if __name__ == '__main__':
614
614
  }
615
615
 
616
616
  r = ImageRequest(**data)
617
+
618
+ print(ImagesResponse())
@@ -38,52 +38,61 @@ class ModelInfo(BaseModel):
38
38
 
39
39
 
40
40
  class ChannelInfo(BaseModel):
41
+ id: Optional[int] = None # 不存在就新建
42
+ type: int = 1 # 枚举值 openai
43
+
44
+ name: str = ''
41
45
  tag: str = ''
46
+ group: str = 'default'
47
+
48
+ base_url: str = ''
49
+ key: str
50
+ models: str = 'MODEL'
42
51
 
43
- id: Optional[int] = None
44
- type: int = 1 # 枚举值 openai
45
- key: Optional[str] = None
46
52
  access_token: str = ''
47
53
  openai_organization: str = ''
48
54
  test_model: str = ''
49
- status: int = '1'
50
- name: str = 'fal-flux'
51
- weight: int = '0'
52
- created_time: int = '1749175121'
53
- test_time: int = '0'
54
- response_time: int = '0'
55
- base_url: Optional[str] = None
55
+ status: int = 1
56
+ weight: int = 0
57
+ created_time: int = Field(default_factory=lambda: int(time.time()))
58
+ test_time: int = 0
59
+ response_time: int = 0
56
60
  other: str = ''
57
- balance: int = '0'
58
- balance_updated_time: int = '0'
61
+ balance: int = 0
62
+ balance_updated_time: int = 0
59
63
 
60
- models: str
61
- group: str = 'default'
62
-
63
- used_quota: Optional[int] = None
64
- upstream_user_quota: int = '0'
64
+ used_quota: int = 0
65
+ upstream_user_quota: int = 0
65
66
 
66
- model_mapping: Optional[str] = None # json
67
+ model_mapping: Union[str, dict] = "" # json
67
68
 
68
69
  headers: str = '' # json
69
70
  status_code_mapping: str = ''
70
- priority: int = '1'
71
- auto_ban: int = '1'
72
- empty_response_retry: int = '0'
73
- not_use_key: int = '0'
71
+ priority: int = 0
72
+ auto_ban: int = 1
73
+ empty_response_retry: int = 0
74
+ not_use_key: int = 0
74
75
  remark: str = ''
75
- mj_relax_limit: int = '99'
76
- mj_fast_limit: int = '99'
77
- mj_turbo_limit: int = '99'
76
+ mj_relax_limit: int = 99
77
+ mj_fast_limit: int = 99
78
+ mj_turbo_limit: int = 99
78
79
  other_info: str = ''
79
- channel_ratio: int = '1'
80
- error_return_429: int = '0'
80
+ channel_ratio: int = 1
81
+ error_return_429: int = 0
81
82
  setting: str = ''
82
83
 
83
84
  """参数覆盖"""
84
85
  param_override: str = '' # json
85
86
  is_tools: bool = False
86
87
 
88
+ def __init__(self, /, **data: Any):
89
+ super().__init__(**data)
90
+ self.name = self.name or self.base_url or "NAME"
91
+ self.tag = self.tag or self.base_url or "TAG"
92
+
93
+ if isinstance(self.model_mapping, dict):
94
+ self.model_mapping = json.dumps(self.model_mapping)
95
+
87
96
 
88
97
  # https://oss.ffire.cc/images/qw.jpeg?x-oss-process=image/format,jpg/resize,w_512
89
98
  if __name__ == '__main__':
@@ -73,6 +73,11 @@ MODEL_PRICE = {
73
73
  "api-volcengine-high_aes_general_v30l_zt2i": 0.05,
74
74
  "api-volcengine-byteedit_v2.0": 0.05,
75
75
 
76
+ "api-doubao-seedance-1-0-pro-250528": 1,
77
+ "api-doubao-seedance-1-0-pro-250528-480p": 1,
78
+ "api-doubao-seedance-1-0-pro-250528-720p": 2,
79
+ "api-doubao-seedance-1-0-pro-250528-1080p": 3,
80
+
76
81
  # videos
77
82
  "api-videos-wan-ai/wan2.1-t2v-14b": 1.2,
78
83
  "api-videos-wan-ai/wan2.1-t2v-14b-turbo": 1.2,
@@ -627,3 +627,5 @@ if __name__ == '__main__':
627
627
  # )
628
628
  #
629
629
  # print(chat_completion_chunk)
630
+
631
+ print(ImageRequest(prompt='xx'))
@@ -52,7 +52,12 @@ def json_loads(s):
52
52
 
53
53
 
54
54
  def json_path(obj, expr): # todo: 缓存
55
- """$..["keywords","query","search_result"]"""
55
+ """
56
+ $..["keywords","query","search_result"]
57
+
58
+ python => $..[keywords,query,search_result]
59
+
60
+ """
56
61
  if isinstance(obj, dict):
57
62
  pass
58
63
  elif isinstance(obj, str):
@@ -76,47 +81,20 @@ if __name__ == '__main__':
76
81
  a: int = 1
77
82
 
78
83
 
79
- print(json_path(A(), '$.a'))
80
-
81
- json2class(
82
- {
83
- "id": 3323,
84
- "type": 1,
85
- "key": "20c790a8-8f5b-4382-942f-05d6f93ce04d:c3ceee990d5aae3f99084e7da6fa7c98",
86
- "access_token": "",
87
- "openai_organization": "",
88
- "test_model": "",
89
- "status": 1,
90
- "name": "fal-flux",
91
- "weight": 0,
92
- "created_time": 1749175121,
93
- "test_time": 0,
94
- "response_time": 0,
95
- "base_url": "https://openai.chatfire.cn/images",
96
- "other": "",
97
- "balance": 0,
98
- "balance_updated_time": 0,
99
- "models": "imagen4,recraft-v3,recraftv3,flux-pro-1.1-ultra,flux-kontext-pro,flux-kontext-max",
100
- "group": "default",
101
- "used_quota": 9927600,
102
- "upstream_user_quota": 0,
103
- "model_mapping": "{\n \"flux-pro-1.1-ultra\": \"fal-ai/flux-pro/v1.1-ultra\",\n \"ideogram-ai/ideogram-v2-turbo\": \"fal-ai/ideogram/v2/turbo\",\n \"ideogram-ai/ideogram-v2\": \"fal-ai/ideogram/v2\",\n \"recraftv3\": \"fal-ai/recraft-v3\",\n \"recraft-v3\": \"fal-ai/recraft-v3\",\n \"imagen4\": \"fal-ai/imagen4/preview\",\n \"flux-kontext-pro\": \"fal-ai/flux-pro/kontext\",\n \"flux-kontext-max\": \"fal-ai/flux-pro/kontext/max\",\n \"imagen4,recraft-v3,recraftv3,flux-pro-1.1-ultra,flux-kontext-pro,flux-kontext-max\": \"\"\n}",
104
- "headers": "",
105
- "status_code_mapping": "",
106
- "priority": 1,
107
- "auto_ban": 1,
108
- "empty_response_retry": 0,
109
- "not_use_key": 0,
110
- "remark": "",
111
- "mj_relax_limit": 99,
112
- "mj_fast_limit": 99,
113
- "mj_turbo_limit": 99,
114
- "other_info": "{\"status_reason\":\"model: flux-kontext-max, status code: 500, reason: Failed to generate image: User is locked. Reason: Exhausted balance. Top up your balance at fal.ai/dashboard/billing.\",\"status_time\":1749175757}",
115
- "channel_ratio": 1,
116
- "error_return_429": 0,
117
- "tag": "fal",
118
- "setting": "",
119
- "param_override": "",
120
- "is_tools": True
121
- }
122
- )
84
+ data = {
85
+ "id": "cgt-20250613173405-qnpqg",
86
+ "model": "doubao-seedance-1-0-pro-250528",
87
+ "status": "succeeded",
88
+ "content": {
89
+ "video_url": "https://ark-content-generation-cn-beijing.tos-cn-beijing.volces.com/doubao-seedance-1-0-pro/02174980724664200000000000000000000ffffac182c177b9d12.mp4?X-Tos-Algorithm=TOS4-HMAC-SHA256&X-Tos-Credential=AKLTYjg3ZjNlOGM0YzQyNGE1MmI2MDFiOTM3Y2IwMTY3OTE%2F20250613%2Fcn-beijing%2Ftos%2Frequest&X-Tos-Date=20250613T093454Z&X-Tos-Expires=86400&X-Tos-Signature=bc080dc9e02282dbe10c82e04c59ac1ed4afb67cbec8aa0506357540f9d47fc4&X-Tos-SignedHeaders=host"
90
+ },
91
+ "usage": {
92
+ "completion_tokens": 245388,
93
+ "total_tokens": 245388
94
+ },
95
+ "created_at": 1749807246,
96
+ "updated_at": 1749807294
97
+ }
98
+
99
+
100
+ print(json_path(data, expr='$..[url,image_url,video_url]'))