MeUtils 2025.8.16.10.41.4__py3-none-any.whl → 2025.8.19.22.3.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,30 +15,38 @@ from openai import OpenAI, APIStatusError
15
15
 
16
16
  client = OpenAI(
17
17
  base_url=os.getenv("FFIRE_BASE_URL"),
18
- api_key=os.getenv("FFIRE_API_KEY") +"-29463"
18
+ api_key=os.getenv("FFIRE_API_KEY") #+"-29463"
19
19
  )
20
+ #
21
+ # for i in range(1):
22
+ # try:
23
+ # completion = client.chat.completions.create(
24
+ # # model="kimi-k2-0711-preview",
25
+ # # model="deepseek-reasoner",
26
+ # # model="qwen3-235b-a22b-thinking-2507",
27
+ # model="qwen3-235b-a22b-instruct-2507",
28
+ #
29
+ # messages=[
30
+ # {"role": "user", "content": '你是谁'}
31
+ # ],
32
+ # # top_p=0.7,
33
+ # top_p=None,
34
+ # temperature=None,
35
+ # # stream=True,
36
+ # max_tokens=1000,
37
+ # extra_body={"xx": "xxxxxxxx"}
38
+ # )
39
+ # print(completion)
40
+ # except Exception as e:
41
+ # print(e)
20
42
 
21
- for i in range(1):
22
- try:
23
- completion = client.chat.completions.create(
24
- # model="kimi-k2-0711-preview",
25
- # model="deepseek-reasoner",
26
- # model="qwen3-235b-a22b-thinking-2507",
27
- model="qwen3-235b-a22b-instruct-2507",
43
+ model = "doubao-embedding-text-240715"
28
44
 
29
- messages=[
30
- {"role": "user", "content": '你是谁'}
31
- ],
32
- # top_p=0.7,
33
- top_p=None,
34
- temperature=None,
35
- # stream=True,
36
- max_tokens=1000,
37
- extra_body={"xx": "xxxxxxxx"}
38
- )
39
- print(completion)
40
- except Exception as e:
41
- print(e)
42
45
 
46
+ r = client.embeddings.create(
47
+ input='hi',
48
+ model=model
49
+ )
50
+ print(r)
43
51
 
44
52
 
@@ -15,8 +15,8 @@ from meutils.llm.clients import OpenAI
15
15
 
16
16
 
17
17
  client = OpenAI(
18
- api_key=os.getenv("SILICONFLOW_API_KEY"),
19
- # api_key="sk-ugfakteneejitibfzpwttxplymratxacudosclwlvzopexwq",
18
+ # api_key=os.getenv("SILICONFLOW_API_KEY"),
19
+ api_key="sk-ugfakteneejitibfzpwttxplymratxacudosclwlvzopexwq",
20
20
  base_url="https://api.siliconflow.cn/v1",
21
21
  # http_client=httpx.Client(
22
22
  # proxy="http://110.42.51.201:38443",
@@ -57,7 +57,7 @@ response = client.chat.completions.create(
57
57
  messages=messages,
58
58
  stream=True,
59
59
  max_tokens=1,
60
- extra_body={"enable_thinking": False}
60
+ # extra_body={"enable_thinking": False}
61
61
  )
62
62
  print(response)
63
63
  for chunk in response:
@@ -70,10 +70,18 @@ def request_many():
70
70
  # model='alibaba/Qwen1.5-110B-Chat',
71
71
  model=model,
72
72
  messages=[
73
+ {'role': 'user', 'content': "1+1"},
74
+ {'role': 'assistant', 'content': """
75
+ <think>
76
+ reasoning_content
77
+ </think>
78
+ content
79
+ """ },
80
+
73
81
  {'role': 'user', 'content': "抛砖引玉是什么意思呀" * 1}
74
82
  ],
75
83
  # messages=messages,
76
- stream=False,
84
+ stream=True,
77
85
  max_tokens=1,
78
86
  extra_body={"enable_thinking": False}
79
87
 
@@ -81,3 +89,24 @@ def request_many():
81
89
  print(response)
82
90
  # for chunk in response:
83
91
  # print(chunk)
92
+
93
+
94
+ """"
95
+ reasoning_content
96
+ content 多轮
97
+
98
+
99
+
100
+ content
101
+ <think>
102
+ 1
103
+ 2
104
+ 3
105
+ 4
106
+ reasoning_content
107
+ </think>
108
+
109
+ content
110
+ reasoning_content:
111
+ type: 'think'
112
+ """
@@ -13,10 +13,10 @@ from meutils.pipe import *
13
13
  from openai import OpenAI
14
14
  from openai import OpenAI, APIStatusError
15
15
 
16
-
16
+ # e21bd630f681c4d90b390cd609720483.WSFVgA3KkwNCX0mN
17
17
  client = OpenAI(
18
18
  # base_url="https://free.chatfire.cn/v1",
19
- api_key="e21bd630f681c4d90b390cd609720483.WSFVgA3KkwNCX0mN",
19
+ api_key="9df724995f384c2e91d673864d1d32eb.aeLMBoocPyRfGBx8",
20
20
  base_url="https://open.bigmodel.cn/api/paas/v4"
21
21
 
22
22
  # api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI3YmFmYWQzYTRmZDU0OTk3YjNmYmNmYjExMjY5NThmZiIsImV4cCI6MTczODAyNDg4MiwibmJmIjoxNzIyNDcyODgyLCJpYXQiOjE3MjI0NzI4ODIsImp0aSI6IjY5Y2ZiNzgzNjRjODQxYjA5Mjg1OTgxYmY4ODMzZDllIiwidWlkIjoiNjVmMDc1Y2E4NWM3NDFiOGU2ZmRjYjEyIiwidHlwZSI6InJlZnJlc2gifQ.u9pIfuQZ7Y00DB6x3rbWYomwQGEyYDSE-814k67SH74",
@@ -29,7 +29,7 @@ A Chinese beauty plays Catwoman. She is seductive. She wears a fitted black leat
29
29
 
30
30
  try:
31
31
  completion = client.chat.completions.create(
32
- model="glm-4.5",
32
+ model="glm-4.5-air",
33
33
  # model="xxxxxxxxxxxxx",
34
34
  messages=[
35
35
  {"role": "system", "content": '你是个内容审核助手'},
@@ -8,4 +8,3 @@
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
10
 
11
- from meutils.pipe import *
@@ -0,0 +1,154 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : z.py
5
+ # @Time : 2025/8/19 08:32
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+
12
+ from meutils.pipe import *
13
+ from meutils.caches import rcache
14
+ from meutils.db.redis_db import redis_aclient
15
+
16
+ from openai import AsyncOpenAI
17
+ from meutils.llm.openai_utils import to_openai_params, create_chat_completion_chunk
18
+
19
+ from meutils.schemas.openai_types import CompletionRequest, chat_completion_chunk, chat_completion
20
+
21
+ from meutils.decorators.retry import retrying
22
+ from meutils.config_utils.lark_utils import get_next_token_for_polling
23
+
24
+ from fake_useragent import UserAgent
25
+
26
+ ua = UserAgent()
27
+
28
+ BASE_URL = "https://chat.z.ai/api"
29
+ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=9VvErr"
30
+
31
+
32
+ class Completions(object):
33
+ def __init__(self, api_key: Optional[str] = None):
34
+ self.api_key = api_key
35
+
36
+ async def create(self, request: CompletionRequest, token: Optional[str] = None):
37
+ token = token or await get_next_token_for_polling(FEISHU_URL)
38
+
39
+ chat_id = str(uuid.uuid4())
40
+ payload = {
41
+ "id": chat_id,
42
+ "chat_id": chat_id,
43
+ "model": "0727-360B-API",
44
+
45
+ "stream": True,
46
+
47
+ "params": {},
48
+ "features": {
49
+ "image_generation": False,
50
+ "web_search": False,
51
+ "auto_web_search": False,
52
+ "preview_mode": False,
53
+ "flags": [],
54
+ "features": [
55
+ {
56
+ "type": "mcp",
57
+ "server": "vibe-coding",
58
+ "status": "hidden"
59
+ },
60
+ {
61
+ "type": "mcp",
62
+ "server": "ppt-maker",
63
+ "status": "hidden"
64
+ },
65
+ {
66
+ "type": "mcp",
67
+ "server": "image-search",
68
+ "status": "hidden"
69
+ }
70
+ ],
71
+ "enable_thinking": request.enable_thinking or False
72
+ },
73
+
74
+ "background_tasks": {
75
+ "title_generation": False,
76
+ "tags_generation": False
77
+ }
78
+ }
79
+
80
+ payload = {**request.model_dump(), **payload}
81
+
82
+ data = to_openai_params(payload)
83
+
84
+ # todo 代理
85
+ client = AsyncOpenAI(base_url=BASE_URL, api_key=token, default_headers={"X-FE-Version": "prod-fe-1.0.69"})
86
+ response = await client.chat.completions.create(**data)
87
+ response = self.do_response(response, request.stream)
88
+
89
+ # async for i in response:
90
+ # logger.debug(i)
91
+
92
+ return response
93
+
94
+ async def do_response(self, response, stream: bool):
95
+ usage = None
96
+ nostream_content = ""
97
+ nostream_reasoning_content = ""
98
+ chat_completion_chunk.model = "glm-4.5"
99
+ async for i in response:
100
+ # print(i)
101
+
102
+ delta_content = (
103
+ i.data.get("delta_content", "").split(' ')[-1]
104
+ or i.data.get("edit_content", "").split("\n")[-1]
105
+ )
106
+ if i.data.get("phase") == "thinking":
107
+ nostream_reasoning_content += delta_content
108
+ chat_completion_chunk.choices[0].delta.reasoning_content = delta_content
109
+
110
+ elif i.data.get("phase") == "answer":
111
+ nostream_content += delta_content
112
+ chat_completion_chunk.choices[0].delta.content = delta_content
113
+
114
+ else:
115
+ logger.debug(bjson(i))
116
+
117
+ if stream:
118
+ yield chat_completion_chunk
119
+
120
+ usage = usage or i.data.get("usage", "")
121
+
122
+ if not stream:
123
+ chat_completion.choices[0].message.content = nostream_content
124
+ chat_completion.choices[0].message.reasoning_content = nostream_reasoning_content
125
+ chat_completion.usage = usage
126
+ chat_completion.model = "glm-4.5"
127
+ yield chat_completion
128
+
129
+
130
+ if __name__ == '__main__':
131
+ token = "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6ImI0YThkMTI5LWY2YzgtNDM5Mi1iYzlhLWEyNjM1Nzg0ZDM5MyIsImVtYWlsIjoiemJqZ2NlZ2NsbkB0aXRrLnVrIn0.cME4z8rip8Y6mQ0q_JEoY6ywPk_7ud2BsyFHyPRhFhtzEl_uLcQEMNlop7hM_fTy0S5pS8qdLK5y7iA1it0n7g"
132
+
133
+ request = CompletionRequest(
134
+ model="glm-4.5",
135
+ messages=[
136
+ {
137
+ "role": "system",
138
+ "content": "你是gpt",
139
+
140
+ },
141
+ {
142
+ "role": "user",
143
+ "content": [{"type": "text", "text": "周杰伦"}],
144
+ # "content": "你是谁",
145
+
146
+ }
147
+ ],
148
+ stream=True,
149
+
150
+ enable_thinking=True
151
+
152
+ )
153
+
154
+ arun(Completions().create(request, token))
@@ -28,7 +28,10 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None):
28
28
 
29
29
  client = AsyncClient(base_url=BASE_URL, api_key=api_key)
30
30
 
31
- data = {**request.model_dump(exclude_none=True, exclude={"extra_fields"}), **(request.extra_fields or {})}
31
+ data = {
32
+ **request.model_dump(exclude_none=True, exclude={"extra_fields", "aspect_ratio"}),
33
+ **(request.extra_fields or {})
34
+ }
32
35
  # logger.debug(bjson(data))
33
36
 
34
37
  data = to_openai_params(ImageRequest(**data))
@@ -258,11 +258,11 @@ if __name__ == '__main__':
258
258
  # # arun(delete_channel(range(10000, 20000)))
259
259
  key = "KEY"
260
260
  request = ChannelInfo(name='', key=key)
261
- request = ChannelInfo(id=10099, key=key, used_quota=0.001)
261
+ request = ChannelInfo(id=21223, key=key, used_quota=0.001)
262
262
 
263
- # arun(create_or_update_channel(request, base_url=base_url))
264
-
265
- arun(exist_channel(request, base_url=base_url))
263
+ arun(create_or_update_channel(request, base_url=base_url))
264
+ #
265
+ # arun(exist_channel(request, base_url=base_url))
266
266
 
267
267
  """
268
268
  UPSTREAM_BASE_URL=https://api.ffire.cc
@@ -0,0 +1,11 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : __init__.py
5
+ # @Time : 2025/8/19 13:22
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+ from meutils.pipe import *
@@ -0,0 +1,160 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : chat
5
+ # @Time : 2025/8/19 13:22
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description : qwen-image
10
+
11
+
12
+ from openai import AsyncOpenAI, OpenAI, AsyncStream
13
+
14
+ from meutils.pipe import *
15
+ from meutils.decorators.retry import retrying
16
+ # from meutils.oss.ali_oss import qwenai_upload
17
+ from meutils.io.files_utils import to_bytes, guess_mime_type
18
+ from meutils.caches import rcache
19
+
20
+ from meutils.llm.openai_utils import to_openai_params, create_chat_completion_chunk, token_encoder
21
+
22
+ from meutils.config_utils.lark_utils import get_next_token_for_polling
23
+ from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage, \
24
+ ChatCompletion
25
+
26
+ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
27
+
28
+ base_url = "https://chat.qwen.ai/api/v2"
29
+ DEFAUL_MODEL = "qwen3-235b-a22b"
30
+ from fake_useragent import UserAgent
31
+
32
+ ua = UserAgent()
33
+
34
+ thinking_budget_mapping = {
35
+ "low": 1000,
36
+ "medium": 8000,
37
+ "high": 24000
38
+ }
39
+
40
+ COOKIE = """
41
+ cna=KP9DIEqqyjUCATrw/+LjJV8F; _bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb; cnaui=310cbdaf-3754-461c-a3ff-9ec8005329c9; aui=310cbdaf-3754-461c-a3ff-9ec8005329c9; sca=43897cb0; _gcl_au=1.1.106229673.1748312382.56762171.1748482542.1748482541; xlly_s=1; x-ap=ap-southeast-1; acw_tc=0a03e53917509898782217414e520e5edfcdef667dcbd83b767c0ce464fad4; token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTM1ODE4ODV9.Npy24ubI717JmdSWMrodWSvVRHENgbJ7Knd-Yf158YE; atpsida=705b922fe336ee0d63fcc329_1750989888_2; SERVERID=e8c2af088c314df080fffe7d0976a96b|1750989892|1750910540; tfstk=gGtsWsqG4IKUeosYhNDUAMIBJRIbcvoz6-6vEKEaHGIOG-O2eZBabAYXRIR16hSOMpQpNtDMbtpTlWd2wNEAWA4XAOWy0FJtS6Ef3IDMbiQvps65XZYNg15fcKASLbor4dvGmGlra0WjM37NqSBAMS5d9TSfBJ35KivGmihEsEHyxdAMR0lwBiHCvt6uMiBYDMHC3TXOD1QY9yBR9iIAktIdpOX0DlCYWv9dtOsAMIQtdMChHfD7Ftg1sdMwtHJ00Jm2p6ZYDH6Ki1p6F9XBAwQOwwCQD9-CCN1JBhJB9QBXy3_MwXzN6UTkNTRZvlOWBCTRyhFKOivePI6WXYU5GCvpbwKt3zXhmFLRXnG76ppJBeLJBXzCdepwAw--No_MJCYllnlEqG8yUnbJXcNlTaXXNGLI9lOR4urPNGl0lJ_uc91rdva0oJN5AmdFjVAhW9X18vMQ6EbOK96ndva0oNBhCOMId5Lc.; isg=BNfX7gH7c3OJX_gfCBykQ2rtZk0hHKt-YCofVCkEq6YJWPSaPe8Dz9o-uvjGsIP2; ssxmod_itna=iqGxRDuQqWqxgDUxeKYI5q=xBDeMDWK07DzxC5750CDmxjKidKDUGQq7qdOamuu9XYkRGGm01DBL4qbDnqD80DQeDvYxk0K4MUPhDwpaW8YRw3Mz7GGb48aIzZGzY=0DgSdfOLpmxbD884rDYoDCqDSDxD99OdD4+3Dt4DIDAYDDxDWCeDBBWriDGpdhmbQVqmqvi2dxi3i3mPiDit8xi5bZendVL4zvDDlKPGf3WPt5xGnD0jmxhpdx038aoODzLiDbxEY698DtkHqPOK=MlTiRUXxAkDb9RG=Y2U3iA4G3DhkCXU3QBhxCqM2eeQmkeNzCwkjw/006DDAY2DlqTWweL04MKBeHhY5om5NUwYHuFiieQ0=/R=9iO9xTBhND4KF4dvyqz0/toqlqlzGDD; ssxmod_itna2=iqGxRDuQqWqxgDUxeKYI5q=xBDeMDWK07DzxC5750CDmxjKidKDUGQq7qdOamuu9XYkRGGmibDG85+YNY=exGa3Y64u5DBwiW7r++DxFqCdl=l77NQwckyAaCG64hkCOjO1pkcMRBdqj70N7nk=e94KEQYUxlf+2Dw=ViA+XKDde0uGS+eXgFkQqzYWe0Dd4oGbUj8L4QY4og345X2DjKDNOfQRgfeIKVRFQjqR098dBUrQsXBNQZcG1oBFAp4xkLYHl+W3OQW9ybPF4sML3t1tPX2T4DmCqKL+jN1XX94xpyA6k9+sgyBFY4zXOq7dHOuO3Gd3lidwdrk=8dNrOdrYQo33fobVS=MRF7nNQBC5d3kBbYdwtoxNBKmBiXoTfOTzOp3MT=ODXhxfO16Tta4vSW=ubtkEGgeQ/gKOwsVjmKDEY0NZ+ee7xlitvWmBbtk7ma7x1PinxtbitdadtYQOqG5AFEZbFxiSE6rDky7jiatQ0Fe7z6uDmYx4z5MGxMA5iDY7DtSLfNUYxU44D
42
+ """.strip()
43
+
44
+
45
+ class Completions(object):
46
+ def __init__(self, api_key: Optional[str] = None):
47
+ self.api_key = api_key
48
+
49
+ async def create(self, request: CompletionRequest, api_key: Optional[str] = None, cookie: Optional[str] = None):
50
+ api_key = api_key or await get_next_token_for_polling(FEISHU_URL)
51
+
52
+ self.client = AsyncOpenAI(
53
+ base_url=base_url,
54
+ api_key=api_key,
55
+ default_headers={
56
+ 'User-Agent': ua.random,
57
+ 'Cookie': cookie or COOKIE
58
+ }
59
+ )
60
+
61
+ chat_id = await self.create_new_chat()
62
+
63
+
64
+ payload = {
65
+ "chat_id": chat_id,
66
+ "incremental_output": True,
67
+ "chat_mode": "normal",
68
+ "model": "qwen3-235b-a22b",
69
+ "messages": [
70
+ {
71
+ "role": "user",
72
+ "content": "这只熊拿着五彩画板和画笔,站在画板前画画。",
73
+
74
+ "user_action": "recommendation",
75
+ "files": [
76
+ {
77
+ "type": "image",
78
+ "name": "example.png",
79
+ "file_type": "image",
80
+ "showType": "image",
81
+ "file_class": "vision",
82
+ "url": "https://img.alicdn.com/imgextra/i2/O1CN0137EBmZ276dnmyY0kx_!!6000000007748-2-tps-1024-1024.png"
83
+ }
84
+ ],
85
+ "models": [
86
+ "qwen3-235b-a22b"
87
+ ],
88
+ # "chat_type": "t2t",
89
+ "chat_type": "image_edit",
90
+
91
+ "feature_config": {
92
+ "thinking_enabled": request.enable_thinking or False,
93
+ "output_schema": "phase"
94
+ },
95
+ "extra": {
96
+ "meta": {
97
+ "subChatType": "t2t"
98
+ }
99
+ }
100
+ }
101
+ ]
102
+ }
103
+
104
+ payload = {**request.model_dump(), **payload}
105
+
106
+ data = to_openai_params(payload)
107
+ response = await self.client.chat.completions.create(**data, extra_query={"chat_id": chat_id})
108
+ # response = self.do_response(response)
109
+
110
+ if isinstance(response, AsyncStream):
111
+ async for i in response:
112
+ print(i)
113
+
114
+ else:
115
+ prompt_tokens = len(token_encoder.encode(str(request.messages)))
116
+ completion_tokens = len(token_encoder.encode(str(response.choices[0].message.content)))
117
+ usage = {
118
+ "prompt_tokens": prompt_tokens,
119
+ "completion_tokens": completion_tokens,
120
+ "total_tokens": prompt_tokens + completion_tokens
121
+ }
122
+ response.usage = usage
123
+ print(response)
124
+
125
+ # return response
126
+
127
+ async def create_new_chat(self):
128
+
129
+ payload = {
130
+ "title": "新建对话",
131
+ "models": [DEFAUL_MODEL],
132
+ "chat_mode": "normal",
133
+ "chat_type": "t2i",
134
+ "timestamp": time.time() * 1000 // 1
135
+ }
136
+ resp = await self.client.post('/chats/new', body=payload, cast_to=object)
137
+ logger.debug(resp)
138
+ return resp['data']['id']
139
+
140
+
141
+ if __name__ == '__main__':
142
+ token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgxNTc1Njh9.eihH3NVrzJCg9bdWb9mim9rGKTLKn1a66kW2Cqc0uPM"
143
+ request = CompletionRequest(
144
+ model="qwen3-235b-a22b",
145
+ messages=[
146
+
147
+ {
148
+ "role": "user",
149
+ # "content": [{"type": "text", "text": "周杰伦"}],
150
+ "content": "这只熊拿着五彩画板和画笔,站在画板前画画。",
151
+
152
+ }
153
+ ],
154
+ stream=True,
155
+
156
+ enable_thinking=True
157
+
158
+ )
159
+
160
+ arun(Completions().create(request, token))
@@ -9,6 +9,7 @@
9
9
  # @Description :
10
10
 
11
11
  from openai import AsyncOpenAI
12
+ from openai import APIStatusError
12
13
 
13
14
  from meutils.pipe import *
14
15
  from meutils.io.files_utils import to_base64, to_url
@@ -111,19 +112,30 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None):
111
112
  data = to_openai_images_params(request)
112
113
  # logger.debug(data)
113
114
 
114
- # 代理
115
- http_client = None
116
- if request.user == 'proxy':
117
- http_client = httpx.AsyncClient(proxy=await get_one_proxy(), timeout=60)
118
-
119
- client = AsyncOpenAI(base_url=BASE_URL, api_key=api_key, http_client=http_client, timeout=60)
120
- response = await client.images.generate(**data)
121
- response.model = ""
122
- # logger.debug(response)
115
+ try:
116
+ client = AsyncOpenAI(base_url=BASE_URL, api_key=api_key)
117
+ response = await client.images.generate(**data)
118
+ # logger.debug(response)
119
+
120
+ except APIStatusError as e:
121
+ logger.debug(e)
122
+ # logger.debug(e.response.json())
123
+ # logger.debug(e.response.status_code)
124
+
125
+ if e.response.status_code > 403 and any(i in BASE_URL for i in {"siliconflow", "modelscope"}):
126
+ proxy = await get_one_proxy()
127
+ client = AsyncOpenAI(
128
+ base_url=BASE_URL,
129
+ api_key=api_key,
130
+ http_client=httpx.AsyncClient(proxy=proxy, timeout=100)
131
+ )
132
+ response = await client.images.generate(**data)
133
+ raise e
123
134
 
124
135
  # response.data[0].url = response.data[0].url.replace(r'\u0026', '&')
125
- send_message(f"request: {request.model}\n{request.prompt}\nresponse: {response.data[0].url}",)
136
+ send_message(f"request: {request.model}\n{request.prompt}\nresponse: {response.data[0].url}", )
126
137
 
138
+ response.model = ""
127
139
  response.data[0].url = await to_url(response.data[0].url, content_type="image/png")
128
140
  if request.response_format == "b64_json":
129
141
  b64_json = await to_base64(response.data[0].url)
@@ -140,10 +152,11 @@ if __name__ == '__main__':
140
152
  from meutils.pipe import *
141
153
 
142
154
  data = {
143
- # "model": "flux-schnell",
144
- "model": "black-forest-labs/FLUX.1-Krea-dev",
155
+ "model": "flux-schnell",
156
+ # "model": "black-forest-labs/FLUX.1-Krea-dev",
145
157
 
146
- "prompt": "(Chinese dragon soaring through the clouds).(majestic, colorful, mythical, powerful, ancient).(DSLR camera).(wide-angle lens).(dawn)(fantasy photography).(Kodak Ektar 100)",
158
+ "prompt": "a dog",
159
+ # "prompt": "(Chinese dragon soaring through the clouds).(majestic, colorful, mythical, powerful, ancient).(DSLR camera).(wide-angle lens).(dawn)(fantasy photography).(Kodak Ektar 100)",
147
160
  "negative_prompt": "",
148
161
  "n": 1,
149
162
  # "response_format": "url",
@@ -173,8 +186,11 @@ if __name__ == '__main__':
173
186
  # request = FluxImageRequest(model="flux", prompt="a dog", size="1024x1024", num_inference_steps=1)
174
187
  # request = FluxImageRequest(model="flux-pro", prompt="a dog", size="10x10", num_inference_steps=1)
175
188
 
176
- data = {'model': 'black-forest-labs/FLUX.1-Krea-dev', 'prompt': '画一个2025年电脑如何一键重装系统win10教程详解的封面图', 'n': 1,
177
- 'size': '680x400'}
189
+ # data = {
190
+ # 'model': 'black-forest-labs/FLUX.1-Krea-dev',
191
+ # 'prompt': '画一个2025年电脑如何一键重装系统win10教程详解的封面图', 'n': 1,
192
+ # 'size': '1024x1024'
193
+ # }
178
194
  request = FluxImageRequest(**data)
179
195
 
180
196
  print(request)
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.08.16.10.41.04
1
+ 2025.08.19.22.03.25
@@ -218,7 +218,7 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
218
218
  chunk = None
219
219
  usage = None
220
220
  async for chunk in chunks:
221
- logger.debug(chunk)
221
+ # logger.debug(chunk)
222
222
  if not chunk.choices: continue
223
223
 
224
224
  content = chunk.choices[0].delta.content or ""
@@ -227,7 +227,7 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
227
227
  chunk.choices[0].delta.reasoning_content = content
228
228
  nostream_reasoning_content += content
229
229
 
230
- logger.debug(chunk.choices[0].delta.content)
230
+ # logger.debug(chunk.choices[0].delta.content)
231
231
  nostream_content += chunk.choices[0].delta.content
232
232
  usage = chunk.usage or usage
233
233
 
@@ -35,7 +35,7 @@ async def billing_for_async_task(
35
35
  model: str = "async-task",
36
36
  task_id: str = "sync",
37
37
  n: float = 1,
38
- api_key: Optional[str] = None
38
+ api_key: Optional[str] = None ########## 注意
39
39
  ):
40
40
  model = model.lower().replace('/', '-') # 统一小写 # wan-ai-wan2.1-t2v-14b
41
41
  if n := int(np.round(n)):
@@ -27,7 +27,7 @@ from meutils.schemas.openai_types import chat_completion, chat_completion_chunk,
27
27
 
28
28
  from meutils.schemas.image_types import ImageRequest, ImageEditRequest
29
29
 
30
- token_encoder = tiktoken.get_encoding('cl100k_base')
30
+ token_encoder = tiktoken.get_encoding('cl100k_base') # o200k_base
31
31
  token_encoder_with_cache = lru_cache(maxsize=1024)(token_encoder.encode)
32
32
 
33
33
  CHAT_COMPLETION_PARAMS = get_function_params()
@@ -320,4 +320,6 @@ if __name__ == '__main__':
320
320
  #
321
321
  # print(token_encoder.encode('hi'))
322
322
 
323
- logger.debug(IMAGES_EDIT_PARAMS)
323
+ # logger.debug(IMAGES_EDIT_PARAMS)
324
+
325
+ print(token_encoder_with_cache('hi'))
@@ -58,7 +58,13 @@ if __name__ == '__main__':
58
58
  url = "https://chat.tune.app/?id=7f268d94-d2d4-4bd4-a732-f196aa20dceb"
59
59
  url = "https://app.yinxiang.com/fx/8b8bba1e-b254-40ff-81e1-fa3427429efe"
60
60
 
61
- print(Crawler(url).xpath('//script//text()'))
61
+ # print(Crawler(url).xpath('//script//text()'))
62
+
63
+
64
+ url = "https://docs.bigmodel.cn/cn/guide/models/free"
65
+ print(Crawler(url).xpath('//*[@id="sidebar-group"]/li[8]//text()'))
66
+
67
+ # 'GLM-4.5-Flash', 'GLM-4.1V-Thinking-Flash', 'GLM-4-Flash-250414', 'GLM-4V-Flash', 'GLM-Z1-Flash', 'Cogview-3-Flash'
62
68
 
63
69
  # html_content = httpx.get(url).text
64
70
 
@@ -119,7 +119,7 @@ class ImageRequest(BaseModel): # openai
119
119
  def __init__(self, /, **data: Any):
120
120
  super().__init__(**data)
121
121
 
122
- if self.aspect_ratio and self.size is None: # 适配比例
122
+ if self.aspect_ratio: # 适配比例
123
123
  self.size = ASPECT_RATIOS.get(self.aspect_ratio, '1024x1024')
124
124
 
125
125
  elif self.size == 'auto':