MeUtils 2025.4.11.19.33.11__py3-none-any.whl → 2025.4.14.18.6.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/METADATA +263 -263
- {MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/RECORD +23 -20
- examples/_openaisdk/4v.py +3 -3
- examples/_openaisdk/openai_chatfire.py +3 -2
- meutils/apis/images/edits.py +20 -13
- meutils/apis/images/recraft.py +35 -8
- meutils/apis/proxy/ips.py +5 -7
- meutils/apis/proxy/kuaidaili.py +24 -0
- meutils/apis/siliconflow/images.py +4 -3
- meutils/data/VERSION +1 -1
- meutils/io/files_utils.py +4 -0
- meutils/llm/completions/chat_gemini.py +8 -7
- meutils/llm/completions/qwenllm.py +12 -8
- meutils/llm/models.py +114 -0
- meutils/llm/openai_polling/chat.py +53 -5
- meutils/oss/ali.py +78 -0
- meutils/schemas/image_types.py +1 -1
- meutils/schemas/oneapi/common.py +16 -9
- meutils/schemas/openai_types.py +2 -2
- {MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/LICENSE +0 -0
- {MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/WHEEL +0 -0
- {MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/entry_points.txt +0 -0
- {MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/top_level.txt +0 -0
meutils/llm/models.py
ADDED
@@ -0,0 +1,114 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : models
|
5
|
+
# @Time : 2025/4/14 11:09
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
import os
|
11
|
+
|
12
|
+
from meutils.pipe import *
|
13
|
+
from meutils.llm.clients import OpenAI, AsyncOpenAI
|
14
|
+
|
15
|
+
|
16
|
+
async def check_token(api_key):
|
17
|
+
try:
|
18
|
+
client = AsyncOpenAI(
|
19
|
+
base_url=os.getenv("OPENROUTER_BASE_URL"),
|
20
|
+
api_key=api_key,
|
21
|
+
)
|
22
|
+
r = await client.chat.completions.create(
|
23
|
+
messages=[{'role': 'user', 'content': 'hi'}],
|
24
|
+
model="google/gemma-3-1b-it:free",
|
25
|
+
max_tokens=1,
|
26
|
+
stream=False
|
27
|
+
)
|
28
|
+
logger.debug(r)
|
29
|
+
return True
|
30
|
+
|
31
|
+
except Exception as e:
|
32
|
+
logger.error(e)
|
33
|
+
return False
|
34
|
+
|
35
|
+
|
36
|
+
async def check_token_for_together(api_key):
|
37
|
+
model = "meta-llama/Llama-Vision-Free"
|
38
|
+
try:
|
39
|
+
client = AsyncOpenAI(
|
40
|
+
base_url=os.getenv("TOGETHER_BASE_URL"),
|
41
|
+
api_key=api_key,
|
42
|
+
)
|
43
|
+
r = await client.chat.completions.create(
|
44
|
+
messages=[{'role': 'user', 'content': 'hi'}],
|
45
|
+
model=model,
|
46
|
+
max_tokens=1,
|
47
|
+
stream=False
|
48
|
+
)
|
49
|
+
logger.debug(r)
|
50
|
+
return True
|
51
|
+
|
52
|
+
except Exception as e:
|
53
|
+
logger.error(e)
|
54
|
+
return False
|
55
|
+
|
56
|
+
|
57
|
+
def get_openrouter_models():
|
58
|
+
models = OpenAI(base_url=os.getenv("OPENROUTER_BASE_URL"), api_key='xx').models.list()
|
59
|
+
|
60
|
+
data = {}
|
61
|
+
for model in models.data:
|
62
|
+
if model.id.lower().endswith(':free'):
|
63
|
+
_model = model.id.lower().removesuffix(":free").split('/')[-1]
|
64
|
+
data[_model] = f"""{_model}=={model.id}"""
|
65
|
+
|
66
|
+
print(data | xjoin(","))
|
67
|
+
return data
|
68
|
+
|
69
|
+
|
70
|
+
def get_together_models():
|
71
|
+
client = OpenAI(base_url=os.getenv("TOGETHER_BASE_URL"), api_key=os.getenv("TOGETHER_API_KEY"))
|
72
|
+
models = client.get("models", cast_to=object)
|
73
|
+
# logger.debug(bjson(models))
|
74
|
+
|
75
|
+
data = {}
|
76
|
+
for model in models:
|
77
|
+
if model['id'].lower().endswith('-free'):
|
78
|
+
_model = model['id'].lower().removesuffix("-free").split('/')[-1]
|
79
|
+
data[_model] = f"""{_model}=={model['id']}"""
|
80
|
+
|
81
|
+
print(data | xjoin(","))
|
82
|
+
return data
|
83
|
+
|
84
|
+
|
85
|
+
if __name__ == '__main__':
|
86
|
+
from meutils.config_utils.lark_utils import get_series
|
87
|
+
|
88
|
+
# print(bjson(get_openrouter_models()))
|
89
|
+
# print(bjson(get_together_models()))
|
90
|
+
|
91
|
+
# arun(check_token("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"))
|
92
|
+
|
93
|
+
# tokens = arun(get_series("https://xchatllm.feishu.cn/sheets/GYCHsvI4qhnDPNtI4VPcdw2knEd?sheet=gGFIXb"))
|
94
|
+
#
|
95
|
+
# r = []
|
96
|
+
# for i in tokens:
|
97
|
+
# if not arun(check_token(i)):
|
98
|
+
# print(i)
|
99
|
+
# r.append(i)
|
100
|
+
|
101
|
+
feishu_url = "https://xchatllm.feishu.cn/sheets/GYCHsvI4qhnDPNtI4VPcdw2knEd?sheet=tEsIyw"
|
102
|
+
|
103
|
+
tokens = arun(get_series(feishu_url))
|
104
|
+
|
105
|
+
r = []
|
106
|
+
rr = []
|
107
|
+
for i in tokens:
|
108
|
+
if not arun(check_token_for_together(i)):
|
109
|
+
print(i)
|
110
|
+
r.append(i)
|
111
|
+
else:
|
112
|
+
rr.append(i)
|
113
|
+
|
114
|
+
# arun(check_token_for_together("1581bb1c605501c96569cf9a24aafa7361752697a23475cdf8f2c3fe8a488292"))
|
@@ -9,7 +9,8 @@
|
|
9
9
|
# @Description :
|
10
10
|
|
11
11
|
from meutils.pipe import *
|
12
|
-
from meutils.
|
12
|
+
from meutils.io.files_utils import to_base64
|
13
|
+
from meutils.llm.clients import AsyncOpenAI, zhipuai_client
|
13
14
|
from meutils.llm.openai_utils import to_openai_params
|
14
15
|
|
15
16
|
from meutils.schemas.openai_types import CompletionRequest
|
@@ -21,6 +22,39 @@ class Completions(object):
|
|
21
22
|
self.client = AsyncOpenAI(base_url=base_url, api_key=api_key)
|
22
23
|
|
23
24
|
async def create(self, request: CompletionRequest):
|
25
|
+
###########################################################################
|
26
|
+
|
27
|
+
# 开启视觉模型
|
28
|
+
if not any(i in request.model for i in ["vl", 'vision']) and (urls := request.last_urls.get("image_url")):
|
29
|
+
logger.debug(request)
|
30
|
+
if request.model.startswith(("gemini",)): # 仅支持base64
|
31
|
+
base64_list = await to_base64(urls, content_type="image/png") ######## todo: tokens怎么计算的
|
32
|
+
request.messages = [
|
33
|
+
{
|
34
|
+
'role': 'user',
|
35
|
+
'content': [
|
36
|
+
{
|
37
|
+
'type': 'text',
|
38
|
+
'text': request.last_user_content
|
39
|
+
},
|
40
|
+
*[
|
41
|
+
{
|
42
|
+
'type': 'image_url',
|
43
|
+
'image_url': {
|
44
|
+
'url': base64_data
|
45
|
+
}
|
46
|
+
}
|
47
|
+
for base64_data in base64_list
|
48
|
+
]
|
49
|
+
|
50
|
+
]
|
51
|
+
}
|
52
|
+
]
|
53
|
+
else:
|
54
|
+
request.model = "glm-4v-flash"
|
55
|
+
self.client = zhipuai_client
|
56
|
+
###########################################################################
|
57
|
+
|
24
58
|
data = to_openai_params(request)
|
25
59
|
if 'gemini' in request.model:
|
26
60
|
data.pop("seed", None)
|
@@ -32,13 +66,27 @@ class Completions(object):
|
|
32
66
|
|
33
67
|
|
34
68
|
if __name__ == '__main__':
|
35
|
-
# 测试
|
69
|
+
# 测试 token 1800
|
36
70
|
|
37
71
|
request = CompletionRequest(
|
38
|
-
model="
|
72
|
+
# model="gemini-2.0-flash",
|
73
|
+
model="glm-4-flash",
|
74
|
+
|
39
75
|
messages=[
|
40
|
-
{"role": "
|
41
|
-
{"role": "user", "content":
|
76
|
+
# {"role": "user", "content": "你好"},
|
77
|
+
{"role": "user", "content": [
|
78
|
+
{
|
79
|
+
"type": "text",
|
80
|
+
"text": "解释下"
|
81
|
+
},
|
82
|
+
{
|
83
|
+
"image_url": {
|
84
|
+
"detail": "auto",
|
85
|
+
"url": "https://osshk.share704.com/file/upload/2025/04/14/1911575959253815296.jpg"
|
86
|
+
},
|
87
|
+
"type": "image_url"
|
88
|
+
}
|
89
|
+
]}
|
42
90
|
],
|
43
91
|
stream=False
|
44
92
|
)
|
meutils/oss/ali.py
ADDED
@@ -0,0 +1,78 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : ali
|
5
|
+
# @Time : 2025/4/11 19:52
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
|
11
|
+
from meutils.pipe import *
|
12
|
+
|
13
|
+
import oss2
|
14
|
+
|
15
|
+
# params = {
|
16
|
+
# "access_key_id": "STS.NWwe4G59dgRocw4eRdfCXvCrV",
|
17
|
+
# "access_key_secret": "FXZPFZ9fJxkwHQbcfMbW6pSy9bFmb3xsSYWjxvTfnx3u",
|
18
|
+
# "security_token": "CAISvgN1q6Ft5B2yfSjIr5TCLo7z2OZF0JCEYVGFgVIxasx0mYbZtDz2IHhMeXZqAuEcs/8znGlU6/gYlqRtT6h+SFffbMx24plJqado/UdL4Z7b16cNrbH4M8L6aXeirhu7AYjQSNfaZY3iCTTtnTNyxr3XbCirW0ffX7SClZ9gaKZwPGy/diEUPMpKAQFgpcQGT5q4V5CXPwXtn3DbAWdxpwN4khkf06mkxdCG4ResiT/5w+QO9YPqOcrmPYs+JYhyVZKq0eZrd+/ZyilcrEMTrKx8gKVKvGyY443YXwcI6FCPaOTat4xiJ18hPvVhQf9P/b+iz/Em5+Ddy8GpwkhAeL0FDyiaFdCtkI6bE7z0bocyeev2Yiv6i5aNLpbXy1p8Pi9Kb1gRIoJ6eiQtU0cWJ2uEevP9yjfjeRy+TqWJ6qYy3Kduwk/gldjwfADXHurDindCZ8RgNxp0akBMxw37e6oBaBdAfk13zDVs0w7K8Hm0wIafXm26PkUIphk/NM0lZWRslY41fWSSjD/XHMdspXXr/rnEdS6D75iEJCl62qLrD8iYHifDx+FBhpFLooGxJdqiIJRhHj3m9p+H/kLlIRqAAURdoxHCj+ca+GZXLN76Ae2FqVmunalPJWbb/DlgSSH4hk4uIaIQzX6NRfHMrfK/xFw++ykKEr27uA/whIn+xvmyuPrgssyHDlN8kS3lHjmsB72OX1YQRFLa3fHCy8wZalhfpDKAsSkI/FT+HDPu8EV5f+t8pdw5ZFHJFJyp7xlsIAA=",
|
19
|
+
# "file_url": "https://cdn.qwenlm.ai/310cbdaf-3754-461c-a3ff-9ec8005329c9/62d65df4-6a6e-484d-98e8-7c7509cd5e17_1.jpg?key=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyZXNvdXJjZV91c2VyX2lkIjoiMzEwY2JkYWYtMzc1NC00NjFjLWEzZmYtOWVjODAwNTMyOWM5IiwicmVzb3VyY2VfaWQiOiI2MmQ2NWRmNC02YTZlLTQ4NGQtOThlOC03Yzc1MDljZDVlMTciLCJyZXNvdXJjZV9jaGF0X2lkIjpudWxsfQ.1lc6X4KJsAyqV71cdIjkeazPEOKYNtF5rgtiGuu_iFI",
|
20
|
+
# "file_path": "310cbdaf-3754-461c-a3ff-9ec8005329c9/62d65df4-6a6e-484d-98e8-7c7509cd5e17_1.jpg",
|
21
|
+
# "file_id": "62d65df4-6a6e-484d-98e8-7c7509cd5e17",
|
22
|
+
# "bucketname": "qwen-webui-prod",
|
23
|
+
# "region": "oss-ap-southeast-1"
|
24
|
+
# }
|
25
|
+
|
26
|
+
|
27
|
+
url = "https://chat.qwen.ai/api/v1/files/getstsToken"
|
28
|
+
|
29
|
+
|
30
|
+
def get_sts_token(filename):
|
31
|
+
payload = {
|
32
|
+
"filename": filename,
|
33
|
+
"filetype": "image" # file video audio
|
34
|
+
}
|
35
|
+
|
36
|
+
headers = {
|
37
|
+
'authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjEwNzY1N2Y1LTgxN2ItNDg5Yi1iNjk4LWFhZjAyM2EwZTE4MyIsImV4cCI6MTc0NjI5NTAwNH0.D1uJN44NHiEt6URce4upbHvs7v73_Vd0V1s3T_JzclI',
|
38
|
+
|
39
|
+
}
|
40
|
+
|
41
|
+
response = requests.request("POST", url, headers=headers, json=payload)
|
42
|
+
|
43
|
+
return response.json()
|
44
|
+
|
45
|
+
|
46
|
+
def qwenai_upload(file, filetype: str = 'image'): # todo: 自动猜测类型
|
47
|
+
params = get_sts_token(file_name)
|
48
|
+
|
49
|
+
access_key_id = params['access_key_id']
|
50
|
+
access_key_secret = params['access_key_secret']
|
51
|
+
security_token = params['security_token']
|
52
|
+
|
53
|
+
endpoint = "oss-ap-southeast-1.aliyuncs.com"
|
54
|
+
bucket_name = params["bucketname"]
|
55
|
+
|
56
|
+
# 创建OSS客户端
|
57
|
+
auth = oss2.StsAuth(access_key_id, access_key_secret, security_token)
|
58
|
+
bucket = oss2.Bucket(auth, endpoint, bucket_name)
|
59
|
+
|
60
|
+
# 要上传的文件路径和文件名
|
61
|
+
file_path = params.get("file_path")
|
62
|
+
file_url = params.get("file_url")
|
63
|
+
|
64
|
+
# 上传文件
|
65
|
+
if isinstance(file, bytes):
|
66
|
+
bucket.put_object(file_path, file)
|
67
|
+
else:
|
68
|
+
bucket.put_object_from_file(file_path, file)
|
69
|
+
|
70
|
+
return file_url
|
71
|
+
|
72
|
+
|
73
|
+
if __name__ == '__main__':
|
74
|
+
# qwenai_upload(params['file_path'], params)
|
75
|
+
file_name = "/Users/betterme/PycharmProjects/AI/QR.png"
|
76
|
+
# file_url = qwenai_upload(file_name)
|
77
|
+
|
78
|
+
get_sts_token(file_name)
|
meutils/schemas/image_types.py
CHANGED
meutils/schemas/oneapi/common.py
CHANGED
@@ -25,6 +25,7 @@ MODEL_PRICE = {
|
|
25
25
|
|
26
26
|
"black-forest-labs/FLUX.1-dev": 0.0001,
|
27
27
|
"black-forest-labs/FLUX.1-pro": 0.0001,
|
28
|
+
"gemini-2.0-flash-exp-image-generation": 0.03,
|
28
29
|
|
29
30
|
"images": FREE,
|
30
31
|
# rix
|
@@ -51,9 +52,6 @@ MODEL_PRICE = {
|
|
51
52
|
"minimax_video-01-live2d": MINIMAX_VIDEO,
|
52
53
|
|
53
54
|
# free
|
54
|
-
"google/gemini-2.0-flash-thinking-exp:free": 0.00001,
|
55
|
-
"google/gemini-2.0-flash-lite-preview-02-05:free": 0.00001,
|
56
|
-
"google/gemini-2.0-pro-exp-02-05:free": 0.00001,
|
57
55
|
|
58
56
|
# chatfire
|
59
57
|
"ppu-0001": 0.0001,
|
@@ -567,6 +565,13 @@ MODEL_RATIO = {
|
|
567
565
|
"meta-deepresearch": 2,
|
568
566
|
|
569
567
|
# 豆包
|
568
|
+
"doubao-1-5-pro-32k": 0.4,
|
569
|
+
"doubao-1-5-pro-32k-250115": 0.4,
|
570
|
+
"doubao-1-5-pro-256k": 2.5,
|
571
|
+
"doubao-1-5-pro-256k-250115": 2.5,
|
572
|
+
"doubao-1-5-vision-pro-32k": 1.5,
|
573
|
+
"doubao-1-5-vision-pro-32k-250115": 1.5,
|
574
|
+
|
570
575
|
"doubao-lite-128k": 0.4,
|
571
576
|
"doubao-lite-32k": 0.15,
|
572
577
|
"doubao-lite-32k-character": 0.15,
|
@@ -578,8 +583,8 @@ MODEL_RATIO = {
|
|
578
583
|
"doubao-pro-32k-character": 0.4,
|
579
584
|
"doubao-pro-128k": 2.5,
|
580
585
|
"doubao-pro-256k": 5,
|
581
|
-
"doubao-1.5-pro-32k": 0.8,
|
582
|
-
"doubao-1.5-pro-256k": 5,
|
586
|
+
"doubao-1.5-pro-32k": 0.8 / 2,
|
587
|
+
"doubao-1.5-pro-256k": 5 / 2,
|
583
588
|
|
584
589
|
"doubao-1.5-vision-pro-32k": 1.5,
|
585
590
|
"doubao-vision-lite-32k": 0.75,
|
@@ -680,8 +685,6 @@ MODEL_RATIO = {
|
|
680
685
|
"gemini-2.0-flash-001": 0.0625,
|
681
686
|
"gemini-2.0-flash-lite-preview-02-05": 0.0625,
|
682
687
|
"gemini-2.0-flash-exp": 0.0625,
|
683
|
-
"gemini-2.0-flash-exp-image": 2.5,
|
684
|
-
"gemini-2.0-flash-exp-image-generation": 2.5,
|
685
688
|
|
686
689
|
"gemini-2.0-pro": 1.25,
|
687
690
|
"gemini-2.0-pro-exp-02-05": 1.25,
|
@@ -904,8 +907,6 @@ COMPLETION_RATIO = {
|
|
904
907
|
"gemini-2.0-flash-001": 4,
|
905
908
|
|
906
909
|
"gemini-2.0-flash-exp": 5,
|
907
|
-
"gemini-2.0-flash-exp-image": 5,
|
908
|
-
"gemini-2.0-flash-exp-image-generation": 5,
|
909
910
|
|
910
911
|
"gemini-2.0-flash-thinking-exp": 5,
|
911
912
|
"gemini-2.0-flash-thinking-exp-1219": 5,
|
@@ -974,6 +975,7 @@ COMPLETION_RATIO = {
|
|
974
975
|
|
975
976
|
"doubao-pro-4k": 3,
|
976
977
|
"doubao-pro-32k": 3,
|
978
|
+
"doubao-pro-32k-241215": 3,
|
977
979
|
"doubao-pro-32k-character": 3,
|
978
980
|
"doubao-pro-128k": 3,
|
979
981
|
"doubao-pro-256k": 3,
|
@@ -984,6 +986,11 @@ COMPLETION_RATIO = {
|
|
984
986
|
"doubao-vision-lite-32k": 3,
|
985
987
|
"doubao-vision-pro-32k": 3,
|
986
988
|
|
989
|
+
"doubao-1-5-pro-32k": 1.25,
|
990
|
+
"doubao-1-5-pro-32k-250115": 1.25,
|
991
|
+
"doubao-1-5-pro-256k": 1.8,
|
992
|
+
"doubao-1-5-pro-256k-250115": 1.8,
|
993
|
+
|
987
994
|
"deepseek-r1:1.5b": 4,
|
988
995
|
"deepseek-r1-distill-qwen-1.5b": 4,
|
989
996
|
"deepseek-r1:7b": 4,
|
meutils/schemas/openai_types.py
CHANGED
@@ -603,10 +603,10 @@ if __name__ == '__main__':
|
|
603
603
|
# print(chat_completion_chunk_stop)
|
604
604
|
|
605
605
|
# print(CompletionRequest(messages=messages).last_urls)
|
606
|
-
|
606
|
+
print(CompletionRequest(messages=messages).last_urls)
|
607
607
|
|
608
608
|
# print(mesages)
|
609
|
-
print(CompletionRequest(messages=messages).last_assistant_content)
|
609
|
+
# print(CompletionRequest(messages=messages).last_assistant_content)
|
610
610
|
|
611
611
|
# print(chat_completion_chunk)
|
612
612
|
# print(chat_completion)
|
File without changes
|
File without changes
|
{MeUtils-2025.4.11.19.33.11.dist-info → MeUtils-2025.4.14.18.6.48.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|