g4f 6.9.7__py3-none-any.whl → 6.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,7 @@ from ..providers.response import ImageResponse, Reasoning, VideoResponse, JsonRe
24
24
  from ..tools.media import render_messages
25
25
  from ..tools.run_tools import AuthManager
26
26
  from ..cookies import get_cookies_dir
27
+ from ..tools.files import secure_filename
27
28
  from .template.OpenaiTemplate import read_response
28
29
  from .. import debug
29
30
 
@@ -31,7 +32,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
31
32
  label = "Pollinations AI 🌸"
32
33
  url = "https://pollinations.ai"
33
34
  login_url = "https://enter.pollinations.ai"
34
- api_key = "pk", "_B9YJX5SBohhm2ePq"
35
35
  active_by_default = True
36
36
  working = True
37
37
  supports_system_message = True
@@ -44,7 +44,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
44
44
  gen_text_api_endpoint = "https://gen.pollinations.ai/v1/chat/completions"
45
45
  image_models_endpoint = "https://gen.pollinations.ai/image/models"
46
46
  text_models_endpoint = "https://gen.pollinations.ai/text/models"
47
- BALANCE_ENDPOINT = "https://gen.pollinations.ai/account/balance"
47
+ balance_endpoint = "https://api.gpt4free.workers.dev/api/pollinations/account/balance"
48
+ worker_api_endpoint = "https://api.gpt4free.workers.dev/api/pollinations/chat/completions"
49
+ worker_models_endpoint = "https://api.gpt4free.workers.dev/api/pollinations/text/models"
48
50
 
49
51
  # Models configuration
50
52
  default_model = "openai"
@@ -56,8 +58,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
56
58
  image_models = [default_image_model, "turbo", "kontext"]
57
59
  audio_models = {}
58
60
  vision_models = [default_vision_model]
59
- _gen_models_loaded = False
60
- _free_models_loaded = False
61
61
  model_aliases = {
62
62
  "gpt-4.1-nano": "openai-fast",
63
63
  "llama-4-scout": "llamascout",
@@ -74,12 +74,15 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
74
74
  }
75
75
  swap_model_aliases = {v: k for k, v in model_aliases.items()}
76
76
  balance: Optional[float] = None
77
+ current_models_endpoint: Optional[str] = None
77
78
 
78
79
  @classmethod
79
80
  def get_balance(cls, api_key: str, timeout: Optional[float] = None) -> Optional[float]:
80
81
  try:
81
- headers = {"authorization": f"Bearer {api_key}"}
82
- response = requests.get(cls.BALANCE_ENDPOINT, headers=headers, timeout=timeout)
82
+ headers = None
83
+ if api_key:
84
+ headers = {"authorization": f"Bearer {api_key}"}
85
+ response = requests.get(cls.balance_endpoint, headers=headers, timeout=timeout)
83
86
  response.raise_for_status()
84
87
  data = response.json()
85
88
  cls.balance = float(data.get("balance", 0.0))
@@ -103,17 +106,18 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
103
106
 
104
107
  if not api_key:
105
108
  api_key = AuthManager.load_api_key(cls)
106
- if not api_key or api_key.startswith("g4f_") or api_key.startswith("gfs_"):
107
- api_key = "".join(cls.api_key)
108
-
109
- if cls.balance or cls.balance is None and cls.get_balance(api_key, timeout) and cls.balance > 0:
110
- debug.log(f"Authenticated with Pollinations AI using API key.")
109
+ if (not api_key or api_key.startswith("g4f_") or api_key.startswith("gfs_")) and cls.balance or cls.balance is None and cls.get_balance(api_key, timeout) and cls.balance > 0:
110
+ debug.log(f"Authenticated with Pollinations AI using G4F API.")
111
+ models_url = cls.worker_models_endpoint
112
+ elif api_key:
113
+ debug.log(f"Using Pollinations AI with provided API key.")
114
+ models_url = cls.gen_text_api_endpoint
111
115
  else:
112
116
  debug.log(f"Using Pollinations AI without authentication.")
113
- api_key = None
117
+ models_url = cls.text_models_endpoint
114
118
 
115
- if not cls._free_models_loaded or api_key and not cls._gen_models_loaded:
116
- path = Path(get_cookies_dir()) / "models" / datetime.today().strftime('%Y-%m-%d') / f"{cls.__name__}{'-auth' if api_key else ''}.json"
119
+ if cls.current_models_endpoint != models_url:
120
+ path = Path(get_cookies_dir()) / "models" / datetime.today().strftime('%Y-%m-%d') / f"{secure_filename(models_url)}.json"
117
121
  if path.exists():
118
122
  try:
119
123
  data = path.read_text()
@@ -180,10 +184,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
180
184
  cls.swap_model_aliases = {v: k for k, v in cls.model_aliases.items()}
181
185
 
182
186
  finally:
183
- if api_key:
184
- cls._gen_models_loaded = True
185
- else:
186
- cls._free_models_loaded = True
187
+ cls.current_models_endpoint = models_url
187
188
  # Return unique models across all categories
188
189
  all_models = cls.text_models.copy()
189
190
  all_models.extend(cls.image_models)
@@ -262,7 +263,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
262
263
  has_audio = True
263
264
  break
264
265
  model = "openai-audio" if has_audio else cls.default_model
265
- if cls.get_models(api_key=api_key, timeout=kwargs.get("timeout")):
266
+ if cls.get_models(api_key=api_key, timeout=kwargs.get("timeout", 15)):
266
267
  if model in cls.model_aliases:
267
268
  model = cls.model_aliases[model]
268
269
  debug.log(f"Using model: {model}")
@@ -480,17 +481,17 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
480
481
  seed=None if "tools" in extra_body else seed,
481
482
  **extra_body
482
483
  )
484
+ if (not api_key or api_key.startswith("g4f_") or api_key.startswith("gfs_")) and cls.balance and cls.balance > 0:
485
+ endpoint = cls.worker_api_endpoint
486
+ elif api_key:
487
+ endpoint = cls.gen_text_api_endpoint
488
+ else:
489
+ endpoint = cls.text_api_endpoint
483
490
  headers = None
484
- if api_key and not api_key.startswith("g4f_") and not api_key.startswith("gfs_"):
491
+ if api_key:
485
492
  headers = {"authorization": f"Bearer {api_key}"}
486
- elif cls.balance and cls.balance > 0:
487
- headers = {"authorization": f"Bearer {''.join(cls.api_key)}"}
488
493
  yield JsonRequest.from_dict(data)
489
- if headers:
490
- url = cls.gen_text_api_endpoint
491
- else:
492
- url = cls.text_api_endpoint
493
- async with session.post(url, json=data, headers=headers) as response:
494
+ async with session.post(endpoint, json=data, headers=headers) as response:
494
495
  if response.status in (400, 500):
495
496
  debug.error(f"Error: {response.status} - Bad Request: {data}")
496
497
  async for chunk in read_response(response, stream, format_media_prompt(messages), cls.get_dict(),
g4f/client/__init__.py CHANGED
@@ -15,7 +15,7 @@ from typing import Union, AsyncIterator, Iterator, Awaitable, Optional, List, Di
15
15
 
16
16
  from ..image.copy_images import copy_media, get_media_dir
17
17
  from ..typing import Messages, ImageType
18
- from ..providers.types import ProviderType, BaseRetryProvider, BaseProvider
18
+ from ..providers.types import ProviderType, BaseProvider
19
19
  from ..providers.response import *
20
20
  from ..errors import NoMediaResponseError, ProviderNotFoundError
21
21
  from ..providers.retry_provider import IterListProvider
@@ -70,14 +70,15 @@ def iter_response(
70
70
  stream: bool,
71
71
  response_format: Optional[dict] = None,
72
72
  max_tokens: Optional[int] = None,
73
- stop: Optional[list[str]] = None
73
+ stop: Optional[list[str]] = None,
74
+ provider_info: Optional[ProviderInfo] = None
74
75
  ) -> ChatCompletionResponseType:
75
76
  content = ""
76
77
  reasoning = []
77
78
  finish_reason = None
78
79
  tool_calls = None
79
80
  usage = None
80
- provider: ProviderInfo = None
81
+ provider_info: ProviderInfo = None
81
82
  conversation: JsonConversation = None
82
83
  completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
83
84
  idx = 0
@@ -100,7 +101,7 @@ def iter_response(
100
101
  usage = chunk
101
102
  continue
102
103
  elif isinstance(chunk, ProviderInfo):
103
- provider = chunk
104
+ provider_info = chunk
104
105
  continue
105
106
  elif isinstance(chunk, Reasoning):
106
107
  reasoning.append(chunk)
@@ -122,9 +123,9 @@ def iter_response(
122
123
 
123
124
  if stream:
124
125
  chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
125
- if provider is not None:
126
- chunk.provider = provider.name
127
- chunk.model = provider.model
126
+ if provider_info is not None:
127
+ chunk.provider = provider_info.name
128
+ chunk.model = provider_info.model
128
129
  yield chunk
129
130
 
130
131
  if finish_reason is not None:
@@ -153,29 +154,18 @@ def iter_response(
153
154
  conversation=None if conversation is None else conversation.get_dict(),
154
155
  reasoning=reasoning if reasoning else None
155
156
  )
156
- if provider is not None:
157
- chat_completion.provider = provider.name
158
- chat_completion.model = provider.model
157
+ if provider_info is not None:
158
+ chat_completion.provider = provider_info.name
159
+ chat_completion.model = provider_info.model
159
160
  yield chat_completion
160
161
 
161
- # Synchronous iter_append_model_and_provider function
162
- def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
163
- if isinstance(last_provider, BaseRetryProvider):
164
- yield from response
165
- return
166
- for chunk in response:
167
- if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
168
- if chunk.provider is None and last_provider is not None:
169
- chunk.model = getattr(last_provider, "last_model", last_model)
170
- chunk.provider = last_provider.__name__
171
- yield chunk
172
-
173
162
  async def async_iter_response(
174
163
  response: AsyncIterator[Union[str, ResponseType]],
175
164
  stream: bool,
176
165
  response_format: Optional[dict] = None,
177
166
  max_tokens: Optional[int] = None,
178
- stop: Optional[list[str]] = None
167
+ stop: Optional[list[str]] = None,
168
+ provider_info: Optional[ProviderInfo] = None
179
169
  ) -> AsyncChatCompletionResponseType:
180
170
  content = ""
181
171
  reasoning = []
@@ -184,7 +174,6 @@ async def async_iter_response(
184
174
  idx = 0
185
175
  tool_calls = None
186
176
  usage = None
187
- provider: ProviderInfo = None
188
177
  conversation: JsonConversation = None
189
178
 
190
179
  try:
@@ -203,7 +192,7 @@ async def async_iter_response(
203
192
  usage = chunk
204
193
  continue
205
194
  elif isinstance(chunk, ProviderInfo):
206
- provider = chunk
195
+ provider_info = chunk
207
196
  continue
208
197
  elif isinstance(chunk, Reasoning) and not stream:
209
198
  reasoning.append(chunk)
@@ -225,9 +214,9 @@ async def async_iter_response(
225
214
 
226
215
  if stream:
227
216
  chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
228
- if provider is not None:
229
- chunk.provider = provider.name
230
- chunk.model = provider.model
217
+ if provider_info is not None:
218
+ chunk.provider = provider_info.name
219
+ chunk.model = provider_info.model
231
220
  yield chunk
232
221
 
233
222
  if finish_reason is not None:
@@ -256,32 +245,13 @@ async def async_iter_response(
256
245
  conversation=conversation,
257
246
  reasoning=reasoning if reasoning else None
258
247
  )
259
- if provider is not None:
260
- chat_completion.provider = provider.name
261
- chat_completion.model = provider.model
248
+ if provider_info is not None:
249
+ chat_completion.provider = provider_info.name
250
+ chat_completion.model = provider_info.model
262
251
  yield chat_completion
263
252
  finally:
264
253
  await safe_aclose(response)
265
254
 
266
- async def async_iter_append_model_and_provider(
267
- response: AsyncChatCompletionResponseType,
268
- last_model: str,
269
- last_provider: ProviderType
270
- ) -> AsyncChatCompletionResponseType:
271
- try:
272
- if isinstance(last_provider, BaseRetryProvider):
273
- async for chunk in response:
274
- yield chunk
275
- return
276
- async for chunk in response:
277
- if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
278
- if chunk.provider is None and last_provider is not None:
279
- chunk.model = getattr(last_provider, "last_model", last_model)
280
- chunk.provider = last_provider.__name__
281
- yield chunk
282
- finally:
283
- await safe_aclose(response)
284
-
285
255
  class Client(BaseClient):
286
256
  def __init__(
287
257
  self,
@@ -350,9 +320,10 @@ class Completions:
350
320
  **kwargs
351
321
  )
352
322
 
323
+ provider_info = ProviderInfo(**provider.get_dict(), model=model)
324
+
353
325
  def fallback(response):
354
- response = iter_response(response, stream, response_format, max_tokens, stop)
355
- return iter_append_model_and_provider(response, model, provider)
326
+ return iter_response(response, stream, response_format, max_tokens, stop, provider_info)
356
327
 
357
328
  if raw:
358
329
  def raw_response(response):
@@ -689,8 +660,8 @@ class AsyncCompletions:
689
660
  )
690
661
 
691
662
  def fallback(response):
692
- response = async_iter_response(response, stream, response_format, max_tokens, stop)
693
- return async_iter_append_model_and_provider(response, model, provider)
663
+ provider_info = ProviderInfo(**provider.get_dict(), model=model)
664
+ return async_iter_response(response, stream, response_format, max_tokens, stop, provider_info)
694
665
 
695
666
  if raw:
696
667
  async def raw_response(response):
@@ -816,7 +787,7 @@ class ClientFactory:
816
787
  )
817
788
 
818
789
  # Create async client
819
- async_client = ClientFactory.createAsyncClient("PollinationsAI")
790
+ async_client = ClientFactory.create_async_client("PollinationsAI")
820
791
  """
821
792
 
822
793
  # Registry of live/custom providers
@@ -850,7 +821,7 @@ class ClientFactory:
850
821
  elif provider.startswith("custom:"):
851
822
  if provider.startswith("custom:"):
852
823
  serverId = provider[7:]
853
- base_url = f"https://g4f.dev/custom/{serverId}"
824
+ base_url = f"https://api.gpt4free.workers.dev/custom/{serverId}"
854
825
  if not base_url:
855
826
  raise ValueError("base_url is required for custom providers")
856
827
  provider = create_custom_provider(base_url, api_key, name=name, **kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: g4f
3
- Version: 6.9.7
3
+ Version: 6.9.8
4
4
  Summary: The official gpt4free repository | various collection of powerful language models
5
5
  Home-page: https://github.com/xtekky/gpt4free
6
6
  Author: Tekky
@@ -23,7 +23,7 @@ g4f/Provider/Mintlify.py,sha256=3Bvy1oh6scg4xs9pw5RY4mmi8XEvQDelnNp664TefbU,7937
23
23
  g4f/Provider/OIVSCodeSer.py,sha256=clebON7Ssd9syewh3BWT59XOeB-WORXF6FPOwbzfRmo,1366
24
24
  g4f/Provider/OperaAria.py,sha256=sLnTOKzbW9BxTxNmHXJ-YDnhPU6Dj6MBdDfqDh-Zz-c,14662
25
25
  g4f/Provider/Perplexity.py,sha256=GUebbVin9hCg4FDei9RX2N6WboFcDDPm061WnnpPRaI,13236
26
- g4f/Provider/PollinationsAI.py,sha256=lgrwBxYrZOyKSUF7AJu9TA7S3YA1rdbAw08-W94IFwU,21506
26
+ g4f/Provider/PollinationsAI.py,sha256=pbZSxqezkGma80OXno_bFecvrdD9EUCGQPkscPYI6ZU,21786
27
27
  g4f/Provider/PollinationsImage.py,sha256=wdGY9kbPGlqAkyeJcjXgWOG3HLVPU4QK-JENwg3gmwk,2297
28
28
  g4f/Provider/Qwen.py,sha256=XPJHRlobijqjdDGVqA-aVyRx2SeM28zSWf-NmzxJtgE,23876
29
29
  g4f/Provider/Startnest.py,sha256=OocXEAK3pKG-tC_D_chGE7GD22dZr67J1m7JBhLxniM,9526
@@ -148,7 +148,7 @@ g4f/api/stubs.py,sha256=9xoJDAmRyUtL_jlqhVI8Q8G-gF9zTse46iNBGEYvD9s,4252
148
148
  g4f/cli/__init__.py,sha256=GUr_JFjLQDmDHmEyafHPvjaPYCQ5oeJclSazoEfRcxA,9167
149
149
  g4f/cli/__main__.py,sha256=SYf3GG2ZCYygnpZ25muxdmT40Dfbx-J0ionEAnfBxuI,94
150
150
  g4f/cli/client.py,sha256=azUTVzdOtfwj8Z6twhODzBQsONiwBBGq90h61Wu_PyY,11859
151
- g4f/client/__init__.py,sha256=cnrZBjFoj2bYIDkvdauSn74UpFcj-RdgR_qmxiNM8yQ,36739
151
+ g4f/client/__init__.py,sha256=dji7F8Xx5YdzsDVR9Yc3LKQQPooLxkh72RZkoyvkOok,35516
152
152
  g4f/client/helper.py,sha256=SwdS7I7CkWBG05INo04eMuukLRPUTkX4awgMHGPLpPI,1852
153
153
  g4f/client/models.py,sha256=H8wu1UMFu8oCw7jy6x5B46GjPJPUa35h42Pq87Dp6lI,2318
154
154
  g4f/client/service.py,sha256=lElAhe0y0ZzllkKjX2CBzy_kqgkwYw8P9fZWHivgWqw,5330
@@ -210,9 +210,9 @@ g4f/tools/files.py,sha256=bUTbeNp8ujTZQiW3GLTtFcgl3-1AnH3cDyIKHfh6Mjc,23397
210
210
  g4f/tools/media.py,sha256=AE9hGVRxQBVZzQ_Ylzeoo2TJUGXSBXO5RbLwj1I2ZTE,4701
211
211
  g4f/tools/run_tools.py,sha256=Yb0osWdDt4wUnkvwln4R6qcLapdddC3n2giZMPXWkzM,16662
212
212
  g4f/tools/web_search.py,sha256=vAZJD-qy15llsgAbkXzoEltqdFB6TlKLbqDJ1DS-6vs,2086
213
- g4f-6.9.7.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
214
- g4f-6.9.7.dist-info/METADATA,sha256=hkmc6fZKRbRlrEGsojk2_hJBtaC3H6B5XT0J7nooFKE,23255
215
- g4f-6.9.7.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
216
- g4f-6.9.7.dist-info/entry_points.txt,sha256=J7Usl6dNjXJlvuzGAUEm6cuDXWpVdGq7SfK-tPoiZSI,67
217
- g4f-6.9.7.dist-info/top_level.txt,sha256=bMRlTupWYCcLWy80AnnKZkhpBsXsF8gI3BaMhSZSgRo,4
218
- g4f-6.9.7.dist-info/RECORD,,
213
+ g4f-6.9.8.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
214
+ g4f-6.9.8.dist-info/METADATA,sha256=MeXnOddLJO79rAsc_cNZWoW0GgQMNdRqPXTp9sIqaIA,23255
215
+ g4f-6.9.8.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
216
+ g4f-6.9.8.dist-info/entry_points.txt,sha256=J7Usl6dNjXJlvuzGAUEm6cuDXWpVdGq7SfK-tPoiZSI,67
217
+ g4f-6.9.8.dist-info/top_level.txt,sha256=bMRlTupWYCcLWy80AnnKZkhpBsXsF8gI3BaMhSZSgRo,4
218
+ g4f-6.9.8.dist-info/RECORD,,
File without changes