MeUtils 2025.7.7.18.15.57__py3-none-any.whl → 2025.7.15.17.29.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {MeUtils-2025.7.7.18.15.57.dist-info → MeUtils-2025.7.15.17.29.5.dist-info}/METADATA +263 -262
  2. {MeUtils-2025.7.7.18.15.57.dist-info → MeUtils-2025.7.15.17.29.5.dist-info}/RECORD +37 -28
  3. examples/_openaisdk/openai_audio_asr.py +43 -0
  4. examples/_openaisdk/openai_edits.py +34 -8
  5. examples/_openaisdk/openai_gitee.py +17 -11
  6. examples/_openaisdk/openai_google_image.py +42 -43
  7. examples/_openaisdk/openai_modelscope.py +73 -1
  8. examples/_openaisdk/openai_moon.py +12 -11
  9. meutils/ai_audio/tin.py +68 -0
  10. meutils/ai_audio/xx.py +14 -0
  11. meutils/apis/audio/elevenlabs.py +49 -0
  12. meutils/apis/audio/gitee.py +51 -0
  13. meutils/apis/gitee/edits.py +48 -0
  14. meutils/apis/gitee/images/kolors.py +1 -1
  15. meutils/apis/images/edits.py +1 -1
  16. meutils/apis/images/lora.py +67 -0
  17. meutils/apis/images/recraft.py +3 -3
  18. meutils/apis/models.py +25 -0
  19. meutils/apis/oneapi/channel.py +18 -7
  20. meutils/apis/oneapi/tasks.py +89 -0
  21. meutils/apis/oneapi/user.py +1 -1
  22. meutils/apis/ppio/videos.py +2 -2
  23. meutils/apis/utils.py +109 -12
  24. meutils/data/VERSION +1 -1
  25. meutils/io/files_utils.py +38 -2
  26. meutils/llm/check_utils.py +36 -2
  27. meutils/llm/completions/qwenllm.py +33 -6
  28. meutils/llm/openai_polling/chat.py +1 -1
  29. meutils/llm/openai_utils/billing_utils.py +11 -7
  30. meutils/llm/openai_utils/common.py +3 -3
  31. meutils/oss/{ali.py → ali_oss.py} +21 -9
  32. meutils/schemas/oneapi/common.py +81 -46
  33. meutils/schemas/openai_types.py +8 -3
  34. {MeUtils-2025.7.7.18.15.57.dist-info → MeUtils-2025.7.15.17.29.5.dist-info}/LICENSE +0 -0
  35. {MeUtils-2025.7.7.18.15.57.dist-info → MeUtils-2025.7.15.17.29.5.dist-info}/WHEEL +0 -0
  36. {MeUtils-2025.7.7.18.15.57.dist-info → MeUtils-2025.7.15.17.29.5.dist-info}/entry_points.txt +0 -0
  37. {MeUtils-2025.7.7.18.15.57.dist-info → MeUtils-2025.7.15.17.29.5.dist-info}/top_level.txt +0 -0
meutils/apis/utils.py CHANGED
@@ -7,10 +7,70 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
+ import httpx
10
11
 
11
12
  from meutils.pipe import *
12
13
  from openai import AsyncClient
13
14
  from meutils.caches import rcache
15
+ from openai._legacy_response import HttpxBinaryResponseContent
16
+
17
+
18
+ async def make_request_httpx(
19
+ base_url: str,
20
+ headers: Optional[dict] = None,
21
+
22
+ path: Optional[str] = None,
23
+
24
+ params: Optional[dict] = None,
25
+ payload: Optional[dict] = None,
26
+ data: Optional[Any] = None,
27
+ files: Optional[dict] = None,
28
+ timeout: Optional[int] = None,
29
+
30
+ method: Optional[str] = None,
31
+
32
+ debug: bool = False,
33
+ **kwargs
34
+ ):
35
+ if method is None:
36
+ method = (payload or data or files) and "POST" or "GET"
37
+
38
+ path = path or "/"
39
+ path = f"""/{path.removeprefix("/")}"""
40
+
41
+
42
+ if debug:
43
+ log = {
44
+ "base_url": base_url,
45
+ "path": path,
46
+ "method": method,
47
+ "headers": headers,
48
+ "params": params,
49
+ "payload": payload,
50
+ "data": data,
51
+ "timeout": timeout,
52
+ }
53
+ logger.debug(f"MAKE_REQUEST: {method.upper()} => {base_url}{path}")
54
+ logger.debug(f"MAKE_REQUEST_DETAIL: {bjson(log)}")
55
+
56
+ async with httpx.AsyncClient(base_url=base_url, headers=headers, timeout=timeout or 100) as client:
57
+ # content: RequestContent | None = None,
58
+ # data: RequestData | None = None,
59
+ # files: RequestFiles | None = None,
60
+ # json: typing.Any | None = None,
61
+ # params: QueryParamTypes | None = None,
62
+ # headers: HeaderTypes | None = None,
63
+ response = await client.request(method, path, json=payload, data=data, files=files, params=params)
64
+ # response.raise_for_status()
65
+
66
+ if isinstance(response.content, HttpxBinaryResponseContent):
67
+ return response.content
68
+
69
+ try:
70
+ return response.json()
71
+ except Exception as e:
72
+ logger.error(e)
73
+ return response
14
74
 
15
75
 
16
76
  async def make_request(
@@ -30,6 +90,10 @@ async def make_request(
30
90
 
31
91
  debug: bool = False
32
92
  ):
93
+ if isinstance(api_key, dict):
94
+ # "Authorization": f"key {api_key}",
95
+ headers = {**headers, **api_key} # 覆盖
96
+
33
97
  if headers:
34
98
  headers = {k: v for k, v in headers.items() if '_' not in k}
35
99
  if not any(i in base_url for i in {"queue.fal.run", "elevenlabs"}): # todo xi-api-key
@@ -96,6 +160,9 @@ async def make_request(
96
160
  # return response.json()
97
161
 
98
162
  response = await client.post(path, body=payload, options=options, files=files, cast_to=object)
163
+
164
+ # HttpxBinaryResponseContent
165
+
99
166
  return response
100
167
 
101
168
 
@@ -253,35 +320,65 @@ if __name__ == '__main__':
253
320
 
254
321
  """
255
322
  curl -X POST "https://api.elevenlabs.io/v1/text-to-speech/JBFqnCBsd6RMkjVDRZzb?output_format=mp3_44100_128" \
256
- -H "xi-api-key: sk_9e7ce9190f85579b527beb6e673eb350db9c0cbfe2c7334b" \
323
+ -H "xi-api-key: sk_f155f9d255438f52942edc8e1e7c56fb61a78dedaae0fbc5" \
257
324
  -H "Content-Type: application/json" \
258
325
  -d '{
259
326
  "text": "The first move is what sets everything in motion.",
260
- "model_id": "eleven_multilingual_v2"
327
+ "model_id": "eleven_turbo_v2_5"
261
328
  }'
329
+
330
+ curl -X POST "http://0.0.0.0:80000/elevenlabs/v1/text-to-speech/JBFqnCBsd6RMkjVDRZzb?output_format=mp3_44100_128" \
331
+ -H "xi-api-key: sk_f155f9d255438f52942edc8e1e7c56fb61a78dedaae0fbc5" \
332
+ -H "Content-Type: application/json" \
333
+ -d '{
334
+ "text": "The first move is what sets everything in motion.",
335
+ "model_id": "eleven_turbo_v2_5"
336
+ }'
337
+
338
+ curl -X POST https://api.elevenlabs.io/v1/speech-to-text \
339
+ -H "xi-api-key: xi-api-key" \
340
+ -H "Content-Type: multipart/form-data" \
341
+ -F model_id="foo" \
342
+ -F file=@<file1>
343
+
262
344
  """
345
+ #
263
346
  UPSTREAM_BASE_URL = "https://api.elevenlabs.io/v1"
264
- UPSTREAM_API_KEY = "sk_9e7ce9190f85579b527beb6e673eb350db9c0cbfe2c7334b"
265
- path = "/text-to-speech/JBFqnCBsd6RMkjVDRZzb"
266
-
347
+ UPSTREAM_API_KEY = "sk_f155f9d255438f52942edc8e1e7c56fb61a78dedaae0fbc5"
267
348
  headers = {
268
349
  "xi-api-key": UPSTREAM_API_KEY
269
350
  }
270
351
 
352
+ path = "/text-to-speech/JBFqnCBsd6RMkjVDRZzb"
353
+
354
+ params = {
355
+ "output_format": "mp3_44100_128"
356
+ }
271
357
  payload = {
272
358
  "text": "The first move is what sets everything in motion.",
273
359
  "model_id": "eleven_multilingual_v2"
274
360
  }
275
- params = {
276
- "output_format": "mp3_44100_128"
361
+
362
+ path = "speech-to-text"
363
+
364
+ data = {
365
+ 'model_id': "scribe_v1",
366
+
367
+ }
368
+ files = {
369
+ 'file': ('xx.mp3', open("/Users/betterme/PycharmProjects/AI/MeUtils/meutils/ai_audio/x1.wav", 'rb'))
370
+
277
371
  }
278
372
 
279
- arun(make_request(
373
+ params=None
374
+
375
+ arun(make_request_httpx(
280
376
  base_url=UPSTREAM_BASE_URL,
281
- # api_key=UPSTREAM_API_KEY,
282
377
  path=path,
283
- payload=payload,
284
- debug=True,
378
+ # payload=payload,
379
+ data=data,
380
+ files=files,
285
381
  headers=headers,
286
- params=params
382
+ params=params,
383
+ # debug=True,
287
384
  ))
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.07.07.18.15.57
1
+ 2025.07.15.17.29.05
meutils/io/files_utils.py CHANGED
@@ -224,6 +224,35 @@ async def markdown_base64_to_url(text, pattern=r'!\[Image_\d+\]\((.+?)\)'):
224
224
  return text
225
225
 
226
226
 
227
+ async def get_file_duration(filename: str = ".mp4", url: Optional[str] = None, content: Optional[bytes] = None,
228
+ headers: Optional[dict] = None):
229
+ # Path(url.split('?')[0]).name
230
+ headers = {
231
+ "Range": "bytes=0-8191"
232
+ }
233
+ async with httpx.AsyncClient(timeout=200) as client:
234
+ for i in range(2):
235
+ if url:
236
+ response = await client.get(url=url, headers=headers)
237
+ response.raise_for_status()
238
+
239
+ content = response.content
240
+ elif content is None:
241
+ raise ValueError("url or content is required")
242
+
243
+ from tinytag import TinyTag
244
+
245
+ tag = TinyTag.get(filename=filename, file_obj=io.BytesIO(content), ignore_errors=False)
246
+
247
+ logger.debug(tag.duration)
248
+ if tag.duration:
249
+ break
250
+ else:
251
+ headers = None
252
+
253
+ return int(np.ceil(tag.duration or 10))
254
+
255
+
227
256
  if __name__ == '__main__':
228
257
  # import tempfile
229
258
  #
@@ -281,7 +310,7 @@ if __name__ == '__main__':
281
310
  file = "/Users/betterme/PycharmProjects/AI/ppt.txt"
282
311
  # arun(to_url(Path(file).read_bytes(), filename='ppt.txt'))
283
312
 
284
- arun(markdown_base64_to_url("![image](data:imagexxxxx)", pattern=r'!\[image\]\((.+?)\)'))
313
+ # arun(markdown_base64_to_url("![image](data:imagexxxxx)", pattern=r'!\[image\]\((.+?)\)'))
285
314
 
286
315
  # arun(to_bytes("https://oss.ffire.cc/files/kling_watermark.png"))
287
316
 
@@ -294,4 +323,11 @@ if __name__ == '__main__':
294
323
 
295
324
  # r = arun(to_bytes(url))
296
325
 
297
- # print(mimetypes.guess_type(url)[0])
326
+ print(mimetypes.guess_type(url)[0])
327
+
328
+ url = "https://lmdbk.com/5.mp4"
329
+ # url = "https://v3.fal.media/files/kangaroo/y5-1YTGpun17eSeggZMzX_video-1733468228.mp4"
330
+ content = requests.get(url).content
331
+ with timer():
332
+ # arun(get_file_duration(content=content))
333
+ arun(get_file_duration(url=url))
@@ -172,7 +172,7 @@ async def check_token_for_gemini(api_key):
172
172
  @retrying()
173
173
  async def check_token_for_ppinfra(api_key, threshold: float = 1):
174
174
  if not isinstance(api_key, str):
175
- return await check_tokens(api_key, check_token_for_ppinfra)
175
+ return await check_tokens(api_key, partial(check_token_for_ppinfra, threshold=threshold))
176
176
  try:
177
177
  client = AsyncOpenAI(base_url="https://api.ppinfra.com/v3/user", api_key=api_key)
178
178
  data = await client.get("", cast_to=object)
@@ -274,6 +274,14 @@ async def check_token_for_zhipu(api_key, threshold: float = 1, resource_package_
274
274
  return False
275
275
 
276
276
 
277
+ # curl 'https://bigmodel.cn/api/biz/product/createPreOrder' \
278
+ # -H 'accept: application/json, text/plain, */*' \
279
+ # -H 'accept-language: zh' \
280
+ # -H 'authorization: eyJhbGciOiJIUzUxMiJ9.eyJ1c2VyX3R5cGUiOiJQRVJTT05BTCIsInVzZXJfaWQiOjIyNDAwNDUsInVzZXJfa2V5IjoiNTZmNjkxODUtOTBhMS00NDYyLWFhNWYtYTljNjhmMGY0Njc3IiwiY3VzdG9tZXJfaWQiOiI2MTQ2MTc1MjExNDcxMTYwNSIsInVzZXJuYW1lIjoia2JieHYxMDYifQ.3zLefwa8lzQLyNYz0DVqkgdOgm1_ljPEoihza44Wv1r-pEPS08kQFeZm-v2CY3qhslgHh2I3d1LN_mr-9bhQjw' \
281
+ # -
282
+ # -H 'user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36' \
283
+ # --data-raw '{"channelCode":"BALANCE","isMobile":false,"num":1,"payPrice":0,"productId":"product-e6e499"}'
284
+
277
285
  @retrying()
278
286
  @rcache(ttl=1 * 24 * 3600, skip_cache_func=skip_cache_func)
279
287
  async def check_token_for_fal(token, threshold: float = 0):
@@ -295,6 +303,28 @@ async def check_token_for_fal(token, threshold: float = 0):
295
303
  return False
296
304
 
297
305
 
306
+ @retrying()
307
+ @rcache(ttl=1 * 24 * 3600, skip_cache_func=skip_cache_func)
308
+ async def check_token_for_gitee(api_key, threshold: float = 1):
309
+ if not isinstance(api_key, str):
310
+ return await check_tokens(api_key, check_token_for_volc)
311
+
312
+ try:
313
+ base_url = "https://ai.gitee.com/v1"
314
+ client = AsyncOpenAI(base_url=base_url, api_key=api_key)
315
+ _ = await client.embeddings.create(
316
+ model="Qwen3-Embedding-0.6B",
317
+ input="hi"
318
+ )
319
+ return True
320
+ except TimeoutException as e:
321
+ raise
322
+
323
+ except Exception as e:
324
+ logger.error(f"Error: {e}\n{api_key}")
325
+ return False
326
+
327
+
298
328
  if __name__ == '__main__':
299
329
  from meutils.config_utils.lark_utils import get_next_token_for_polling, get_series
300
330
 
@@ -331,6 +361,8 @@ if __name__ == '__main__':
331
361
  # arun(check_token_for_ppinfra("sk_F0kgPyCMTzmOH_-VCEJucOK8HIrbnLGYm_IWxBToHZQ"))
332
362
 
333
363
  # arun(check_token_for_volc("f1f394db-59e9-4cdd-9d12-b11d50173efc"))
364
+ # arun(check_token_for_volc("279749bd-ba5e-4962-9c65-eb6604b65594"))
365
+
334
366
  # arun(check_token_for_ppinfra("sk_mCb5sRGTi6GXkSRp5F679Rbs0V_Hfee3p85lccGXCOo"))
335
367
 
336
368
  # arun(check_token_for_zhipu(api_key="e130b903ab684d4fad0d35e411162e99.PqyXq4QBjfTdhyCh",
@@ -338,4 +370,6 @@ if __name__ == '__main__':
338
370
 
339
371
  # arun(check_token_for_fal("56d8a95e-2fe6-44a6-8f7d-f7f9c83eec24:537f06b6044770071f5d86fc7fcd6d6f"))
340
372
 
341
- arun(check_token_for_ppinfra("sk_NdGt2jb_QSEW_1cauXnWK-qShGIJ_irWgtG-DEBVs4A"))
373
+ arun(check_token_for_ppinfra("sk_IeM4wjPIsgQFpfGqIOdMpHNF28qGLu1wxvh_vy3DiWM", threshold=1))
374
+
375
+ # arun(check_token_for_gitee("NWVXUPI38OQVXZGOEL3D23I9YUQWZPV23GVVBW1X"))
@@ -18,6 +18,7 @@ from openai import AsyncOpenAI
18
18
 
19
19
  from meutils.pipe import *
20
20
  from meutils.decorators.retry import retrying
21
+ # from meutils.oss.ali_oss import qwenai_upload
21
22
  from meutils.io.files_utils import to_bytes, guess_mime_type
22
23
  from meutils.caches import rcache
23
24
 
@@ -64,6 +65,8 @@ async def to_file(file, api_key, cookie: Optional[str] = None):
64
65
  logger.debug(file_object)
65
66
  return file_object
66
67
 
68
+ # todo
69
+ # oss
67
70
 
68
71
  async def create(request: CompletionRequest, token: Optional[str] = None, cookie: Optional[str] = None):
69
72
  cookie = cookie or COOKIE
@@ -139,6 +142,12 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
139
142
 
140
143
  user_content[i] = {"type": "image", "image": file_object.id}
141
144
 
145
+ elif content.get("type") == 'input_audio':
146
+ url = content.get(content.get("type")).get("data")
147
+ file_object = await to_file(url, token, cookie)
148
+
149
+ user_content[i] = {"type": "image", "image": file_object.id}
150
+
142
151
  elif user_content.startswith("http"):
143
152
  file_url, user_content = user_content.split(maxsplit=1)
144
153
 
@@ -244,12 +253,28 @@ if __name__ == '__main__':
244
253
  #
245
254
  # ]
246
255
 
256
+ # user_content = [
257
+ # {
258
+ # "role": "user",
259
+ # "content": [
260
+ # {
261
+ # "type": "input_audio",
262
+ # "input_audio": {
263
+ # "data": "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20250211/tixcef/cherry.wav",
264
+ # "format": "wav",
265
+ # },
266
+ # },
267
+ # {"type": "text", "text": "这段音频在说什么"},
268
+ # ],
269
+ # },
270
+ # ]
271
+
247
272
  request = CompletionRequest(
248
273
  # model="qwen-turbo-2024-11-01",
249
274
  # model="qwen-max-latest",
250
275
  # model="qvq-max-2025-03-25",
251
276
  # model="qvq-72b-preview-0310",
252
- # model="qwen2.5-omni-7b",
277
+ model="qwen2.5-omni-7b",
253
278
  # model="qwen-plus",
254
279
 
255
280
  # model="qwen-max-latest-search",
@@ -260,10 +285,12 @@ if __name__ == '__main__':
260
285
  # model="qwen2.5-vl-72b-instruct",
261
286
 
262
287
  # model="qwen-plus-latest",
263
- model="qwen3-235b-a22b",
288
+ # model="qwen3-235b-a22b",
264
289
  # model="qwen3-30b-a3b",
265
290
  # model="qwen3-32b",
266
291
 
292
+ # model="qwen-omni-turbo-0119",
293
+
267
294
  # max_tokens=1,
268
295
  max_tokens=None,
269
296
 
@@ -273,11 +300,11 @@ if __name__ == '__main__':
273
300
  # 'content': '今天南京天气',
274
301
  # 'content': "9.8 9.11哪个大",
275
302
  # 'content': 'https://oss.ffire.cc/files/AIGC.pdf 总结下',
276
- 'content': ' 总结下',
303
+ # 'content': ' 总结下',
277
304
 
278
305
  # "chat_type": "search", deep_research
279
306
 
280
- # 'content': user_content,
307
+ 'content': user_content,
281
308
 
282
309
  # "content": [
283
310
  # {
@@ -343,6 +370,6 @@ if __name__ == '__main__':
343
370
 
344
371
  # token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImV4cCI6MTc0ODQ3OTE0M30.oAIE1K0XA0YYqlxB8Su-u0UJbY_BBZa4_tvZpFJKxGY"
345
372
 
346
- arun(create(request, token))
373
+ # arun(create(request, token))
347
374
 
348
- # arun(to_file("https://oss.ffire.cc/files/kling_watermark.png", token))
375
+ arun(to_file("https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20250211/tixcef/cherry.wav", token))
@@ -36,7 +36,7 @@ class Completions(object):
36
36
 
37
37
  ###########################################################################
38
38
  # 开启视觉模型
39
- if not any(i in request.model for i in ["vl", 'vision']) and (urls := request.last_urls.get("image_url")):
39
+ if not any(i in request.model.lower() for i in ["vl", 'vision']) and (urls := request.last_urls.get("image_url")):
40
40
  # logger.debug(request)
41
41
  if request.model.startswith(("gemini",)):
42
42
 
@@ -90,11 +90,11 @@ async def billing_for_tokens(
90
90
  "total_tokens": total_tokens
91
91
  }
92
92
 
93
- # usage = {
94
- # "prompt_tokens": input_tokens,
95
- # "completion_tokens": output_tokens,
96
- # "total_tokens": total_tokens
97
- # }
93
+ usage = {
94
+ "prompt_tokens": input_tokens,
95
+ "completion_tokens": output_tokens,
96
+ "total_tokens": total_tokens
97
+ }
98
98
  """
99
99
  usage = usage or {}
100
100
  n = n and int(np.round(n))
@@ -115,6 +115,7 @@ async def billing_for_tokens(
115
115
  extra_body={"extra_fields": usage}
116
116
  )
117
117
  else:
118
+ # todo 设计 id chatcmpl-NEdenEpvzGiKR2FfK2GmzK => 表达某些含义
118
119
  _ = await client.chat.completions.create(
119
120
  model=model,
120
121
  messages=[{"role": "user", "content": "ChatfireAPI"}],
@@ -227,7 +228,10 @@ if __name__ == '__main__':
227
228
  # "total_tokens": 101
228
229
  # }
229
230
  # n = 1
230
- # arun(create_usage_for_tokens(usage=usage, n=n))
231
+ usage = {
232
+ "prompt_tokens": 1000, "completion_tokens": 100, # "total_tokens": 2000,
233
+ }
234
+ arun(billing_for_tokens(model="tokens", usage=usage))
231
235
 
232
236
  # arun(create_usage_for_async_task(task_id="task_id", n=1))
233
237
 
@@ -278,4 +282,4 @@ if __name__ == '__main__':
278
282
  ],
279
283
  "enhance_prompt": True
280
284
  }
281
- print(get_billing_model(data, default_resolution=""))
285
+ # print(get_billing_model(data, default_resolution=""))
@@ -39,7 +39,7 @@ AUDIO_TRANSCRIPTIONS_PARAMS = get_function_params(fn=OpenAI(api_key='').audio.tr
39
39
 
40
40
 
41
41
  def to_openai_params(
42
- request: Union[dict, CompletionRequest, ChatCompletionRequest, ImageRequest, TTSRequest, STTRequest],
42
+ request: Union[dict, CompletionRequest, ChatCompletionRequest, ImageRequest, ImageEditRequest, TTSRequest, STTRequest],
43
43
  redirect_model: Optional[str] = None,
44
44
  ) -> dict:
45
45
  data = copy.deepcopy(request)
@@ -66,7 +66,7 @@ def to_openai_params(
66
66
  data['extra_body'] = extra_body # 拓展字段
67
67
  data['model'] = redirect_model or data['model']
68
68
 
69
- if request.model.startswith(("gemini",)):
69
+ if data['model'].startswith(("gemini",)):
70
70
  data.pop("extra_body", None)
71
71
  data.pop("presence_penalty", None)
72
72
  data.pop("frequency_penalty", None)
@@ -156,7 +156,7 @@ async def ppu_flow(
156
156
  成功,充足,后扣费
157
157
  成功,不足,报错
158
158
  """
159
- post =post.lower()
159
+ post = post.lower()
160
160
  if n is not None and n > 0: # todo: 跳过某些用户
161
161
  try:
162
162
  user_money, api_key_money = await asyncio.gather(*[get_user_money(api_key), get_api_key_money(api_key)])
@@ -8,9 +8,13 @@
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
10
 
11
+ import oss2
12
+
11
13
  from meutils.pipe import *
14
+ from meutils.io.files_utils import guess_mime_type
15
+ from meutils.config_utils.lark_utils import get_next_token_for_polling
12
16
 
13
- import oss2
17
+ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
14
18
 
15
19
  # params = {
16
20
  # "access_key_id": "STS.NWwe4G59dgRocw4eRdfCXvCrV",
@@ -27,15 +31,20 @@ import oss2
27
31
  url = "https://chat.qwen.ai/api/v1/files/getstsToken"
28
32
 
29
33
 
30
- def get_sts_token(filename):
34
+ async def get_sts_token(filename, filetype: Optional[str] = None, token: Optional[str] = None):
35
+ token = token or await get_next_token_for_polling(feishu_url=FEISHU_URL, from_redis=True)
36
+
37
+ filetype = filetype or guess_mime_type(filename).split('/')[0]
38
+
39
+
31
40
  payload = {
32
41
  "filename": filename,
33
- "filetype": "image" # file video audio
42
+ "filetype": filetype, # file video audio
43
+ "filesize": 1001
34
44
  }
35
45
 
36
46
  headers = {
37
- 'authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjEwNzY1N2Y1LTgxN2ItNDg5Yi1iNjk4LWFhZjAyM2EwZTE4MyIsImV4cCI6MTc0NjI5NTAwNH0.D1uJN44NHiEt6URce4upbHvs7v73_Vd0V1s3T_JzclI',
38
-
47
+ 'authorization': f'Bearer {token}',
39
48
  }
40
49
 
41
50
  response = requests.request("POST", url, headers=headers, json=payload)
@@ -43,8 +52,8 @@ def get_sts_token(filename):
43
52
  return response.json()
44
53
 
45
54
 
46
- def qwenai_upload(file, filetype: str = 'image'): # todo: 自动猜测类型
47
- params = get_sts_token(file_name)
55
+ async def qwenai_upload(file, filetype: Optional[str] = None): # todo: 自动猜测类型
56
+ params = await get_sts_token(file_name, filetype)
48
57
 
49
58
  access_key_id = params['access_key_id']
50
59
  access_key_secret = params['access_key_secret']
@@ -61,6 +70,8 @@ def qwenai_upload(file, filetype: str = 'image'): # todo: 自动猜测类型
61
70
  file_path = params.get("file_path")
62
71
  file_url = params.get("file_url")
63
72
 
73
+ logger.debug(params)
74
+
64
75
  # 上传文件
65
76
  if isinstance(file, bytes):
66
77
  bucket.put_object(file_path, file)
@@ -73,6 +84,7 @@ def qwenai_upload(file, filetype: str = 'image'): # todo: 自动猜测类型
73
84
  if __name__ == '__main__':
74
85
  # qwenai_upload(params['file_path'], params)
75
86
  file_name = "/Users/betterme/PycharmProjects/AI/QR.png"
76
- # file_url = qwenai_upload(file_name)
77
87
 
78
- get_sts_token(file_name)
88
+ # arun(get_sts_token(file_name))
89
+ file_url = arun(qwenai_upload(file_name))
90
+