MeUtils 2025.4.9.10.39.6__py3-none-any.whl → 2025.4.11.17.37.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {MeUtils-2025.4.9.10.39.6.dist-info → MeUtils-2025.4.11.17.37.3.dist-info}/METADATA +266 -266
- {MeUtils-2025.4.9.10.39.6.dist-info → MeUtils-2025.4.11.17.37.3.dist-info}/RECORD +40 -37
- examples/_openaisdk/openai_google.py +38 -31
- meutils/apis/{google_apis → google}/audios.py +5 -2
- meutils/apis/google/chat.py +372 -0
- meutils/apis/google/files.py +29 -0
- meutils/apis/{google_apis → google}/google2openai.py +8 -4
- meutils/apis/google/images.py +27 -0
- meutils/apis/{google_apis → google}/search.py +7 -5
- meutils/apis/jimeng/images.py +41 -8
- meutils/apis/search/metaso.py +2 -2
- meutils/apis/siliconflow/images.py +5 -3
- meutils/caches/acache.py +1 -1
- meutils/common.py +1 -0
- meutils/data/VERSION +1 -1
- meutils/decorators/catch.py +0 -8
- meutils/decorators/common.py +86 -19
- meutils/decorators/contextmanagers.py +103 -14
- meutils/decorators/demo.py +76 -14
- meutils/io/files_utils.py +2 -3
- meutils/io/openai_files.py +11 -6
- meutils/llm/check_utils.py +20 -1
- meutils/llm/openai_polling/__init__.py +11 -0
- meutils/llm/openai_polling/chat.py +45 -0
- meutils/{apis/google_apis → llm/openai_polling}/images.py +4 -2
- meutils/llm/openai_utils/common.py +1 -1
- meutils/notice/feishu.py +5 -2
- meutils/schemas/image_types.py +26 -3
- meutils/schemas/oneapi/common.py +12 -0
- meutils/schemas/openai_types.py +10 -0
- meutils/serving/fastapi/dependencies/__init__.py +4 -1
- meutils/serving/fastapi/dependencies/auth.py +10 -6
- meutils/serving/fastapi/exceptions/http_error.py +2 -2
- meutils/str_utils/__init__.py +30 -2
- meutils/apis/google_apis/common.py +0 -243
- meutils/apis/google_apis/files.py +0 -19
- {MeUtils-2025.4.9.10.39.6.dist-info → MeUtils-2025.4.11.17.37.3.dist-info}/LICENSE +0 -0
- {MeUtils-2025.4.9.10.39.6.dist-info → MeUtils-2025.4.11.17.37.3.dist-info}/WHEEL +0 -0
- {MeUtils-2025.4.9.10.39.6.dist-info → MeUtils-2025.4.11.17.37.3.dist-info}/entry_points.txt +0 -0
- {MeUtils-2025.4.9.10.39.6.dist-info → MeUtils-2025.4.11.17.37.3.dist-info}/top_level.txt +0 -0
- /meutils/apis/{google_apis → google}/__init__.py +0 -0
- /meutils/apis/{google_apis → google}/gemini_sdk.py +0 -0
@@ -0,0 +1,372 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : common
|
5
|
+
# @Time : 2025/4/2 13:03
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description : https://ai.google.dev/gemini-api/docs/openai?hl=zh-cn
|
10
|
+
# genai => openai
|
11
|
+
# https://googleapis.github.io/python-genai/genai.html#module-genai.models
|
12
|
+
|
13
|
+
|
14
|
+
from meutils.pipe import *
|
15
|
+
from meutils.decorators.retry import retrying
|
16
|
+
|
17
|
+
from meutils.io.files_utils import to_url, to_bytes, guess_mime_type
|
18
|
+
from meutils.str_utils.regular_expression import parse_url
|
19
|
+
|
20
|
+
from meutils.schemas.image_types import ImageRequest, ImagesResponse
|
21
|
+
from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage
|
22
|
+
|
23
|
+
from meutils.config_utils.lark_utils import get_next_token_for_polling
|
24
|
+
from google import genai
|
25
|
+
from google.genai import types
|
26
|
+
from google.genai.types import HttpOptions, GenerateContentConfig, HarmCategory, HarmBlockThreshold, Part
|
27
|
+
from google.genai.types import UserContent, ModelContent, Content
|
28
|
+
from google.genai.types import Tool, GoogleSearch
|
29
|
+
|
30
|
+
FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=bK9ZTt" # 200
|
31
|
+
|
32
|
+
"""
|
33
|
+
Gemini 1.5 Pro 和 1.5 Flash 最多支持 3,600 个文档页面。文档页面必须采用以下文本数据 MIME 类型之一:
|
34
|
+
|
35
|
+
PDF - application/pdf
|
36
|
+
JavaScript - application/x-javascript、text/javascript
|
37
|
+
Python - application/x-python、text/x-python
|
38
|
+
TXT - text/plain
|
39
|
+
HTML - text/html
|
40
|
+
CSS - text/css
|
41
|
+
Markdown - text/md
|
42
|
+
CSV - text/csv
|
43
|
+
XML - text/xml
|
44
|
+
RTF - text/rtf
|
45
|
+
|
46
|
+
- 小文件
|
47
|
+
- 大文件: 需要等待处理
|
48
|
+
"""
|
49
|
+
tools = [
|
50
|
+
Tool(
|
51
|
+
google_search=GoogleSearch()
|
52
|
+
)
|
53
|
+
]
|
54
|
+
|
55
|
+
|
56
|
+
class Completions(object):
|
57
|
+
def __init__(self, api_key: Optional[str] = None):
|
58
|
+
self.api_key = api_key
|
59
|
+
self.base_url = "https://all.chatfire.cc/genai"
|
60
|
+
self.client = None ####
|
61
|
+
|
62
|
+
async def create_for_search(self, request: CompletionRequest):
|
63
|
+
self.client = self.client or await self.get_client()
|
64
|
+
|
65
|
+
if request.model.endswith("-search"):
|
66
|
+
request.model = request.model.replace("-search", "")
|
67
|
+
|
68
|
+
chat = self.client.aio.chats.create(
|
69
|
+
model=request.model,
|
70
|
+
config=GenerateContentConfig(
|
71
|
+
tools=tools,
|
72
|
+
system_instruction=request.system_instruction or "请根据用户的语言偏好自动调整回复语言"
|
73
|
+
),
|
74
|
+
)
|
75
|
+
# print(response.candidates[0].grounding_metadata.search_entry_point.rendered_content)
|
76
|
+
# print(response.candidates[0].grounding_metadata.grounding_chunks)
|
77
|
+
|
78
|
+
chunks = await chat.send_message_stream(request.last_user_content)
|
79
|
+
async for chunk in chunks:
|
80
|
+
if chunk.candidates and chunk.candidates[0].content:
|
81
|
+
parts = chunk.candidates[0].content.parts or []
|
82
|
+
for part in parts:
|
83
|
+
# logger.debug(part)
|
84
|
+
if part.text:
|
85
|
+
yield part.text
|
86
|
+
|
87
|
+
if chunk.candidates and chunk.candidates[0].grounding_metadata:
|
88
|
+
grounding_chunks = chunk.candidates[0].grounding_metadata.grounding_chunks or []
|
89
|
+
for grounding_chunk in grounding_chunks:
|
90
|
+
if grounding_chunk.web:
|
91
|
+
yield f"\n\n[{grounding_chunk.web.title}]({grounding_chunk.web.uri})"
|
92
|
+
|
93
|
+
async def create_for_files(self, request: CompletionRequest):
|
94
|
+
"""todo: 大文件解析"""
|
95
|
+
self.client = self.client or await self.get_client()
|
96
|
+
|
97
|
+
contents = []
|
98
|
+
if urls := sum(request.last_urls.values(), []):
|
99
|
+
# https://ai.google.dev/gemini-api/docs/document-processing?hl=zh-cn&lang=python
|
100
|
+
file_objects = await self.upload(urls)
|
101
|
+
contents += file_objects
|
102
|
+
contents.append(request.last_user_content)
|
103
|
+
|
104
|
+
elif request.last_user_content.startswith("http"):
|
105
|
+
url, user_content = request.last_user_content.split(maxsplit=1)
|
106
|
+
file_object = await self.upload(url)
|
107
|
+
contents += [file_object, user_content]
|
108
|
+
else:
|
109
|
+
contents.append(request.last_user_content)
|
110
|
+
|
111
|
+
# logger.debug(contents)
|
112
|
+
|
113
|
+
chat = self.client.aio.chats.create( # todo: system_instruction
|
114
|
+
model=request.model,
|
115
|
+
config=GenerateContentConfig(
|
116
|
+
response_modalities=['Text'],
|
117
|
+
system_instruction=request.system_instruction
|
118
|
+
)
|
119
|
+
)
|
120
|
+
for i in range(5):
|
121
|
+
try:
|
122
|
+
chunks = await chat.send_message_stream(contents)
|
123
|
+
async for chunk in chunks:
|
124
|
+
if chunk.candidates and chunk.candidates[0].content:
|
125
|
+
parts = chunk.candidates[0].content.parts or []
|
126
|
+
for part in parts:
|
127
|
+
# logger.debug(part)
|
128
|
+
if part.text:
|
129
|
+
yield part.text
|
130
|
+
|
131
|
+
break
|
132
|
+
|
133
|
+
except Exception as e:
|
134
|
+
logger.debug(f"重试{i}: {e}")
|
135
|
+
if "The model is overloaded." in str(e):
|
136
|
+
await asyncio.sleep(1)
|
137
|
+
continue
|
138
|
+
else:
|
139
|
+
yield e
|
140
|
+
raise e
|
141
|
+
|
142
|
+
@retrying(title=__name__)
|
143
|
+
async def generate(self, request: ImageRequest): # OpenaiD3
|
144
|
+
request.model = "gemini-2.0-flash-exp-image-generation"
|
145
|
+
image, prompt = request.image_and_prompt
|
146
|
+
parts = [Part.from_text(text=prompt)]
|
147
|
+
if image:
|
148
|
+
data = await to_bytes(image)
|
149
|
+
parts.append(Part.from_bytes(data=data, mime_type="image/png"))
|
150
|
+
|
151
|
+
self.client = self.client or await self.get_client()
|
152
|
+
chat = self.client.aio.chats.create(
|
153
|
+
model=request.model,
|
154
|
+
config=GenerateContentConfig(
|
155
|
+
response_modalities=['Text', 'Image'],
|
156
|
+
)
|
157
|
+
)
|
158
|
+
image_response = ImagesResponse()
|
159
|
+
|
160
|
+
response = await chat.send_message(parts)
|
161
|
+
if response.candidates and response.candidates[0].content:
|
162
|
+
parts = response.candidates[0].content.parts or []
|
163
|
+
for part in parts:
|
164
|
+
if part.inline_data:
|
165
|
+
image_url = await to_url(part.inline_data.data, mime_type=part.inline_data.mime_type)
|
166
|
+
image_response.data.append({"url": image_url, "revised_prompt": part.text})
|
167
|
+
|
168
|
+
return image_response
|
169
|
+
|
170
|
+
async def create_for_images(self, request: CompletionRequest):
|
171
|
+
request.model = "gemini-2.0-flash-exp-image-generation" ####### 目前是强行
|
172
|
+
|
173
|
+
messages = await self.to_image_messages(request)
|
174
|
+
|
175
|
+
if len(messages) > 1:
|
176
|
+
history = messages[:-1]
|
177
|
+
message = messages[-1].parts
|
178
|
+
else:
|
179
|
+
history = []
|
180
|
+
message = messages[-1].parts
|
181
|
+
|
182
|
+
self.client = self.client or await self.get_client()
|
183
|
+
chat = self.client.aio.chats.create( # todo: system_instruction
|
184
|
+
model=request.model,
|
185
|
+
config=GenerateContentConfig(
|
186
|
+
response_modalities=['Text', 'Image'],
|
187
|
+
# system_instruction=request.system_instruction
|
188
|
+
),
|
189
|
+
history=history
|
190
|
+
)
|
191
|
+
|
192
|
+
# logger.debug(message)
|
193
|
+
|
194
|
+
# message = [
|
195
|
+
# Part.from_text(text="画条狗")
|
196
|
+
# ]
|
197
|
+
|
198
|
+
for i in range(5):
|
199
|
+
try:
|
200
|
+
chunks = await chat.send_message_stream(message)
|
201
|
+
async for chunk in chunks:
|
202
|
+
|
203
|
+
if chunk.candidates and chunk.candidates[0].content:
|
204
|
+
parts = chunk.candidates[0].content.parts or []
|
205
|
+
for part in parts:
|
206
|
+
# logger.debug(part)
|
207
|
+
if part.text:
|
208
|
+
yield part.text
|
209
|
+
|
210
|
+
if part.inline_data:
|
211
|
+
image_url = await to_url(
|
212
|
+
part.inline_data.data,
|
213
|
+
mime_type=part.inline_data.mime_type
|
214
|
+
)
|
215
|
+
yield f""
|
216
|
+
break
|
217
|
+
|
218
|
+
except Exception as e:
|
219
|
+
logger.debug(f"重试{i}: {e}")
|
220
|
+
if "The model is overloaded." in str(e):
|
221
|
+
await asyncio.sleep(1)
|
222
|
+
continue
|
223
|
+
else:
|
224
|
+
yield e
|
225
|
+
raise e
|
226
|
+
|
227
|
+
async def to_image_messages(self, request: CompletionRequest):
|
228
|
+
# 两轮即可连续编辑图片
|
229
|
+
|
230
|
+
messages = []
|
231
|
+
for m in request.messages:
|
232
|
+
contents = m.get("content")
|
233
|
+
if m.get("role") == "assistant":
|
234
|
+
assistant_content = str(contents)
|
235
|
+
if urls := parse_url(assistant_content): # assistant
|
236
|
+
datas = await asyncio.gather(*map(to_bytes, urls))
|
237
|
+
|
238
|
+
parts = [
|
239
|
+
Part.from_bytes(
|
240
|
+
data=data,
|
241
|
+
mime_type="image/png"
|
242
|
+
)
|
243
|
+
for data in datas
|
244
|
+
]
|
245
|
+
parts += [
|
246
|
+
Part.from_text(
|
247
|
+
text=request.last_assistant_content
|
248
|
+
),
|
249
|
+
]
|
250
|
+
messages.append(ModelContent(parts=parts))
|
251
|
+
|
252
|
+
elif m.get("role") == "user":
|
253
|
+
if isinstance(contents, list):
|
254
|
+
parts = []
|
255
|
+
for content in contents:
|
256
|
+
if content.get("type") == "image_url":
|
257
|
+
image_url = content.get("image_url", {}).get("url")
|
258
|
+
data = await to_bytes(image_url)
|
259
|
+
|
260
|
+
parts += [
|
261
|
+
Part.from_bytes(data=data, mime_type="image/png")
|
262
|
+
]
|
263
|
+
|
264
|
+
elif content.get("type") == "text":
|
265
|
+
text = content.get("text")
|
266
|
+
if text.startswith('http'):
|
267
|
+
image_url, text = text.split(maxsplit=1)
|
268
|
+
data = await to_bytes(image_url)
|
269
|
+
|
270
|
+
parts += [
|
271
|
+
Part.from_bytes(data=data, mime_type="image/png"),
|
272
|
+
Part.from_text(text=text)
|
273
|
+
]
|
274
|
+
else:
|
275
|
+
parts += [
|
276
|
+
Part.from_text(text=text)
|
277
|
+
]
|
278
|
+
|
279
|
+
messages.append(UserContent(parts=parts))
|
280
|
+
|
281
|
+
else: # str
|
282
|
+
if str(contents).startswith('http'): # 修正提问格式, 兼容 url
|
283
|
+
image_url, text = str(contents).split(maxsplit=1)
|
284
|
+
data = await to_bytes(image_url)
|
285
|
+
parts = [
|
286
|
+
Part.from_bytes(data=data, mime_type="image/png"),
|
287
|
+
Part.from_text(text=text)
|
288
|
+
]
|
289
|
+
else:
|
290
|
+
parts = [
|
291
|
+
Part.from_text(text=str(contents)),
|
292
|
+
]
|
293
|
+
messages.append(UserContent(parts=parts))
|
294
|
+
|
295
|
+
return messages
|
296
|
+
|
297
|
+
async def upload(self, files: Union[str, List[str]]): # => openai files
|
298
|
+
self.client = self.client or await self.get_client()
|
299
|
+
|
300
|
+
if isinstance(files, list):
|
301
|
+
return await asyncio.gather(*map(self.upload, files))
|
302
|
+
|
303
|
+
file_config = {"name": f"{shortuuid.random().lower()}", "mime_type": guess_mime_type(files)}
|
304
|
+
return await self.client.aio.files.upload(file=io.BytesIO(await to_bytes(files)), config=file_config)
|
305
|
+
|
306
|
+
async def get_client(self):
|
307
|
+
api_key = self.api_key or await get_next_token_for_polling(feishu_url=FEISHU_URL, from_redis=True)
|
308
|
+
|
309
|
+
logger.info(f"GeminiClient: {api_key}")
|
310
|
+
|
311
|
+
return genai.Client(
|
312
|
+
api_key=api_key,
|
313
|
+
http_options=HttpOptions(
|
314
|
+
base_url=self.base_url
|
315
|
+
)
|
316
|
+
)
|
317
|
+
|
318
|
+
|
319
|
+
if __name__ == '__main__':
|
320
|
+
file = "https://oss.ffire.cc/files/kling_watermark.png"
|
321
|
+
|
322
|
+
api_key = os.getenv("GOOGLE_API_KEY")
|
323
|
+
|
324
|
+
# arun(GeminiClient(api_key=api_key).upload(file))
|
325
|
+
# arun(GeminiClient(api_key=api_key).upload([file] * 2))
|
326
|
+
# arun(GeminiClient(api_key=api_key).create())
|
327
|
+
url = "https://oss.ffire.cc/files/nsfw.jpg"
|
328
|
+
content = [
|
329
|
+
|
330
|
+
# {"type": "text", "text": "https://oss.ffire.cc/files/nsfw.jpg 移除右下角的水印"},
|
331
|
+
# {"type": "text", "text": "https://oss.ffire.cc/files/kling_watermark.png 总结下"},
|
332
|
+
{"type": "text", "text": "https://oss.ffire.cc/files/nsfw.jpg 总结下"},
|
333
|
+
|
334
|
+
# {"type": "text", "text": "总结下"},
|
335
|
+
# {"type": "image_url", "image_url": {"url": url}},
|
336
|
+
|
337
|
+
# {"type": "video_url", "video_url": {"url": url}}
|
338
|
+
|
339
|
+
]
|
340
|
+
|
341
|
+
content = "亚洲多国回应“特朗普关税暂停”"
|
342
|
+
|
343
|
+
# content = "https://oss.ffire.cc/files/nsfw.jpg 移除右下角的水印"
|
344
|
+
|
345
|
+
#
|
346
|
+
request = CompletionRequest(
|
347
|
+
# model="qwen-turbo-2024-11-01",
|
348
|
+
# model="gemini-all",
|
349
|
+
# model="gemini-2.0-flash-exp-image-generation",
|
350
|
+
model="gemini-2.0-flash",
|
351
|
+
|
352
|
+
messages=[
|
353
|
+
{
|
354
|
+
'role': 'user',
|
355
|
+
'content': content
|
356
|
+
},
|
357
|
+
|
358
|
+
],
|
359
|
+
stream=True,
|
360
|
+
)
|
361
|
+
|
362
|
+
arun(Completions(api_key=api_key).create_for_search(request))
|
363
|
+
|
364
|
+
# arun(Completions(api_key=api_key).create_for_images(request))
|
365
|
+
# arun(Completions(api_key=api_key).create_for_files(request))
|
366
|
+
# request = ImageRequest(
|
367
|
+
# prompt="https://oss.ffire.cc/files/nsfw.jpg 移除右下角 白色的水印",
|
368
|
+
# # prompt="画条可爱的狗",
|
369
|
+
#
|
370
|
+
# )
|
371
|
+
#
|
372
|
+
# arun(Completions(api_key=api_key).generate(request))
|
@@ -0,0 +1,29 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : files
|
5
|
+
# @Time : 2025/4/2 10:40
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description : todo: 一般大文件问答需要
|
10
|
+
# https://ai.google.dev/gemini-api/docs/document-processing?hl=zh-cn&lang=python
|
11
|
+
|
12
|
+
from meutils.pipe import *
|
13
|
+
|
14
|
+
file = "/Users/betterme/PycharmProjects/AI/QR.png"
|
15
|
+
#
|
16
|
+
# file_object = client.files.upload(file=file)
|
17
|
+
# prompt = "一句话总结"
|
18
|
+
|
19
|
+
# file_object = client.aio.files.upload(file=file)
|
20
|
+
|
21
|
+
|
22
|
+
async def upload(self, files: Union[str, List[str]], client: Optional[genai.Client] = None): # => openai files
|
23
|
+
client = client or await self.get_client()
|
24
|
+
|
25
|
+
if isinstance(files, list):
|
26
|
+
return await asyncio.gather(*map(self.upload, files))
|
27
|
+
|
28
|
+
file_config = {"name": f"{shortuuid.random().lower()}", "mime_type": guess_mime_type(files)}
|
29
|
+
return await client.aio.files.upload(file=io.BytesIO(await to_bytes(files)), config=file_config)
|
@@ -27,7 +27,7 @@ config = GenerateContentConfig(
|
|
27
27
|
|
28
28
|
temperature=0.7,
|
29
29
|
top_p=0.8,
|
30
|
-
response_modalities=['Text', 'Image'],
|
30
|
+
# response_modalities=['Text', 'Image'],
|
31
31
|
|
32
32
|
# 公民诚信类别的默认屏蔽阈值为 Block none(对于别名为 gemini-2.0-flash、gemini-2.0-pro-exp-02-05 和 gemini-2.0-flash-lite 的 gemini-2.0-flash-001),适用于 Google AI Studio 和 Gemini API;仅适用于 Google AI Studio 中的所有其他模型的 Block most。
|
33
33
|
# safety_settings=[
|
@@ -61,15 +61,19 @@ file = "/Users/betterme/PycharmProjects/AI/QR.png"
|
|
61
61
|
# 'futuristic scifi city with lots of greenery?')
|
62
62
|
#
|
63
63
|
# prompt = "9.11 9.8哪个大呢"
|
64
|
-
|
64
|
+
|
65
|
+
uri='https://generativelanguage.googleapis.com/v1beta/files/88n7hk8tau7g'
|
65
66
|
|
66
67
|
# client.aio.chats.create(
|
67
68
|
# model="gemini-2.0-flash-exp-image-generation",
|
68
69
|
# )
|
69
70
|
|
70
71
|
response = client.models.generate_content(
|
71
|
-
model="gemini-2.0-flash
|
72
|
-
contents=[
|
72
|
+
model="gemini-2.0-flash",
|
73
|
+
contents=[
|
74
|
+
'解释下',
|
75
|
+
Part.from_uri(file_uri=uri, mime_type='image/png')
|
76
|
+
],
|
73
77
|
|
74
78
|
# model="gemini-2.5-pro-exp-03-25",
|
75
79
|
# model="gemini-2.0-flash",
|
@@ -0,0 +1,27 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : images
|
5
|
+
# @Time : 2025/4/7 13:07
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description : D3 生图、编辑图
|
10
|
+
|
11
|
+
from meutils.pipe import *
|
12
|
+
|
13
|
+
from meutils.apis.google.chat import Completions, CompletionRequest
|
14
|
+
|
15
|
+
from meutils.schemas.image_types import ImageRequest
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
|
20
|
+
async def generate(request: ImageRequest, api_key: Optional[str] = None):
|
21
|
+
request = CompletionRequest(
|
22
|
+
model="gemini-2.0-flash-exp-image-generation",
|
23
|
+
messages=[
|
24
|
+
|
25
|
+
],
|
26
|
+
)
|
27
|
+
return Completions().create_for_images(request)
|
@@ -10,7 +10,7 @@
|
|
10
10
|
|
11
11
|
from meutils.pipe import *
|
12
12
|
from google import genai
|
13
|
-
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch,HttpOptions
|
13
|
+
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch, HttpOptions, ToolCodeExecution, Retrieval
|
14
14
|
|
15
15
|
client = genai.Client(
|
16
16
|
api_key="AIzaSyD19pv1qsYjx4ZKbfH6qvNdYzHMV2TxmPU",
|
@@ -19,19 +19,20 @@ client = genai.Client(
|
|
19
19
|
)
|
20
20
|
)
|
21
21
|
|
22
|
-
|
23
22
|
google_search_tool = Tool(
|
24
|
-
google_search=GoogleSearch()
|
23
|
+
google_search=GoogleSearch(),
|
25
24
|
)
|
26
25
|
|
27
|
-
|
26
|
+
# google_search_tool = {'function_declarations': None, 'retrieval': None, 'google_search': {}, 'google_search_retrieval': None, 'code_execution': None}
|
27
|
+
print(google_search_tool.model_dump())
|
28
28
|
model_id = "gemini-2.0-flash"
|
29
29
|
|
30
30
|
response = client.models.generate_content(
|
31
31
|
model=model_id,
|
32
|
-
contents="
|
32
|
+
contents="亚洲多国回应“特朗普关税暂停”",
|
33
33
|
config=GenerateContentConfig(
|
34
34
|
tools=[google_search_tool],
|
35
|
+
system_instruction="用中文回答",
|
35
36
|
# response_modalities=["TEXT"],
|
36
37
|
)
|
37
38
|
)
|
@@ -44,3 +45,4 @@ for each in response.candidates[0].content.parts:
|
|
44
45
|
# To get grounding metadata as web content.
|
45
46
|
print(response.candidates[0].grounding_metadata.search_entry_point.rendered_content)
|
46
47
|
print(response.candidates[0].grounding_metadata.grounding_chunks)
|
48
|
+
# response.candidates[0].grounding_metadata.grounding_chunks[0].web
|
meutils/apis/jimeng/images.py
CHANGED
@@ -33,7 +33,10 @@ from fake_useragent import UserAgent
|
|
33
33
|
|
34
34
|
ua = UserAgent()
|
35
35
|
|
36
|
+
VERSION = "3.1.5"
|
36
37
|
|
38
|
+
|
39
|
+
#
|
37
40
|
async def create_draft_content(request: ImageRequest, token: str):
|
38
41
|
"""
|
39
42
|
创建草稿内容
|
@@ -44,7 +47,7 @@ async def create_draft_content(request: ImageRequest, token: str):
|
|
44
47
|
|
45
48
|
request.model = MODELS_MAP.get(request.model, MODELS_MAP["default"])
|
46
49
|
|
47
|
-
height = width =
|
50
|
+
height = width = 1328
|
48
51
|
if 'x' in request.size:
|
49
52
|
height, width = map(int, request.size.split('x'))
|
50
53
|
|
@@ -84,7 +87,9 @@ async def create_draft_content(request: ImageRequest, token: str):
|
|
84
87
|
"type": "",
|
85
88
|
"id": str(uuid.uuid4()),
|
86
89
|
"height": height,
|
87
|
-
"width": width
|
90
|
+
"width": width,
|
91
|
+
|
92
|
+
# "resolution_type": "2k"
|
88
93
|
},
|
89
94
|
},
|
90
95
|
"ability_list": [
|
@@ -164,7 +169,9 @@ async def create_draft_content(request: ImageRequest, token: str):
|
|
164
169
|
"type": "",
|
165
170
|
"id": str(uuid.uuid4()),
|
166
171
|
"height": height,
|
167
|
-
"width": width
|
172
|
+
"width": width,
|
173
|
+
|
174
|
+
"resolution_type": "1k"
|
168
175
|
}
|
169
176
|
},
|
170
177
|
"history_option": {
|
@@ -181,7 +188,7 @@ async def create_draft_content(request: ImageRequest, token: str):
|
|
181
188
|
"min_version": "3.0.2",
|
182
189
|
"min_features": [],
|
183
190
|
"is_from_tsn": True,
|
184
|
-
"version":
|
191
|
+
"version": VERSION,
|
185
192
|
"main_component_id": main_component_id,
|
186
193
|
"component_list": [component]
|
187
194
|
}
|
@@ -191,8 +198,15 @@ async def create_draft_content(request: ImageRequest, token: str):
|
|
191
198
|
return draft_content
|
192
199
|
|
193
200
|
|
201
|
+
def key_builder(*args, **kwargs):
|
202
|
+
logger.debug(args)
|
203
|
+
logger.debug(kwargs)
|
204
|
+
|
205
|
+
return args[1].prompt
|
206
|
+
|
207
|
+
|
194
208
|
@retrying()
|
195
|
-
@rcache(ttl=30 * 24 * 3600, serializer="pickle", key_builder=lambda *args, **kwargs: args[1].prompt)
|
209
|
+
@rcache(ttl=30 * 24 * 3600, serializer="pickle", key_builder=lambda *args, **kwargs: f"{args[1].seed} {args[1].prompt}")
|
196
210
|
async def create_task(request: ImageRequest, token: Optional[str] = None): # todo: 图片
|
197
211
|
token = token or await get_next_token_for_polling(FEISHU_URL, check_token)
|
198
212
|
|
@@ -229,7 +243,21 @@ async def create_task(request: ImageRequest, token: Optional[str] = None): # to
|
|
229
243
|
data = response.json()
|
230
244
|
logger.debug(bjson(data))
|
231
245
|
|
232
|
-
|
246
|
+
# {
|
247
|
+
# "ret": "1000",
|
248
|
+
# "errmsg": "invalid parameter",
|
249
|
+
# "systime": "1744354538",
|
250
|
+
# "logid": "20250411145538E30D2FF8347A9A710F49",
|
251
|
+
# "data": {
|
252
|
+
# "aigc_data": null,
|
253
|
+
# "fail_code": "",
|
254
|
+
# "fail_starling_key": "",
|
255
|
+
# "fail_starling_message": ""
|
256
|
+
# }
|
257
|
+
# }
|
258
|
+
|
259
|
+
aigc_data = (data.get("data") or {}).get("aigc_data") or {}
|
260
|
+
if task_id := aigc_data.get("history_record_id"): # bug
|
233
261
|
return TaskResponse(task_id=task_id, system_fingerprint=token)
|
234
262
|
else:
|
235
263
|
|
@@ -280,7 +308,7 @@ async def get_task(task_id, token):
|
|
280
308
|
"status": 50,
|
281
309
|
"""
|
282
310
|
|
283
|
-
image_data = map(lambda x: x.get("image", {}).get("large_images"), item_list)
|
311
|
+
image_data = map(lambda x: x.get("image", {}).get("large_images", []), item_list)
|
284
312
|
|
285
313
|
task_data = sum(image_data, []) | xmap_(lambda x: {"url": x.get("image_url")})
|
286
314
|
|
@@ -377,7 +405,12 @@ if __name__ == '__main__':
|
|
377
405
|
|
378
406
|
# arun(generate(ImageRequest(prompt="fuck you")))
|
379
407
|
prompt = "A plump Chinese beauty wearing a wedding dress revealing her skirt and underwear is swinging on the swing,Happy smile,cleavage,Exposed thighs,Spread your legs open,Extend your leg,panties,upskirt,Barefoot,sole"
|
380
|
-
|
408
|
+
prompt = "a dog"
|
409
|
+
request = ImageRequest(prompt=prompt, size="1328x1328")
|
410
|
+
# request = ImageRequest(prompt=prompt, size="1024x1024")
|
411
|
+
|
412
|
+
# request = ImageRequest(prompt=prompt, size="2048*2048")
|
413
|
+
|
381
414
|
# task = arun(create_task(ImageRequest(**data), token=token))
|
382
415
|
|
383
416
|
# arun(get_task(task.task_id, task.system_fingerprint))
|
meutils/apis/search/metaso.py
CHANGED
@@ -110,7 +110,7 @@ async def create(request: Union[ChatCompletionRequest, CompletionRequest]):
|
|
110
110
|
_, engine_type = request.model.split(':')
|
111
111
|
|
112
112
|
model = None
|
113
|
-
if request.model.startswith(("
|
113
|
+
if request.model.startswith(("deepseek",)):
|
114
114
|
model = "ds-r1"
|
115
115
|
system_fingerprint = "deepseek-r1"
|
116
116
|
|
@@ -195,7 +195,7 @@ if __name__ == '__main__':
|
|
195
195
|
request = ChatCompletionRequest(
|
196
196
|
# model="deepseek-search",
|
197
197
|
# model="deepseek-r1-search",
|
198
|
-
model="
|
198
|
+
model="11meta-deepresearch",
|
199
199
|
# model="ai-search",
|
200
200
|
# model="ai-search:scholar",
|
201
201
|
# model="ai-search-pro:scholar",
|
@@ -25,8 +25,7 @@ from meutils.schemas.image_types import ImageRequest, FluxImageRequest, SDImageR
|
|
25
25
|
FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=3aA5dH"
|
26
26
|
FEISHU_URL_FREE = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=xlvlrH"
|
27
27
|
|
28
|
-
BASE_URL =
|
29
|
-
# BASE_URL = "https://api.siliconflow.cn/v1"
|
28
|
+
BASE_URL = "https://api.siliconflow.cn/v1"
|
30
29
|
|
31
30
|
DEFAULT_MODEL = "black-forest-labs/FLUX.1-schnell"
|
32
31
|
MODELS = {
|
@@ -116,6 +115,8 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None):
|
|
116
115
|
response = await client.images.generate(**data)
|
117
116
|
response.model = ""
|
118
117
|
logger.debug(response)
|
118
|
+
|
119
|
+
response.data[0].url = unquote(response.data[0].url or "")
|
119
120
|
if request.response_format == "b64_json":
|
120
121
|
b64_json = await to_base64(response.data[0].url)
|
121
122
|
|
@@ -162,7 +163,8 @@ if __name__ == '__main__':
|
|
162
163
|
# request = FluxImageRequest(model="flux", prompt="a dog", size="1024x1024", num_inference_steps=1)
|
163
164
|
# request = FluxImageRequest(model="flux-pro", prompt="a dog", size="10x10", num_inference_steps=1)
|
164
165
|
|
165
|
-
data = {'model': 'flux-schnell', 'prompt': '画一个2025年电脑如何一键重装系统win10教程详解的封面图', 'n': 1,
|
166
|
+
data = {'model': 'flux-schnell', 'prompt': '画一个2025年电脑如何一键重装系统win10教程详解的封面图', 'n': 1,
|
167
|
+
'size': '680x400'}
|
166
168
|
request = FluxImageRequest(**data)
|
167
169
|
|
168
170
|
print(request)
|
meutils/caches/acache.py
CHANGED
meutils/common.py
CHANGED
@@ -68,6 +68,7 @@ from queue import Queue
|
|
68
68
|
from pprint import pprint
|
69
69
|
from abc import abstractmethod
|
70
70
|
from dataclasses import dataclass
|
71
|
+
from urllib.parse import unquote, unquote_plus, urlparse, urljoin
|
71
72
|
from functools import reduce, lru_cache, partial
|
72
73
|
|
73
74
|
from collections import Counter, OrderedDict
|
meutils/data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
2025.04.
|
1
|
+
2025.04.11.17.37.03
|