g4f 6.9.7__py3-none-any.whl → 6.9.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,7 @@ from ..providers.response import ImageResponse, Reasoning, VideoResponse, JsonRe
24
24
  from ..tools.media import render_messages
25
25
  from ..tools.run_tools import AuthManager
26
26
  from ..cookies import get_cookies_dir
27
+ from ..tools.files import secure_filename
27
28
  from .template.OpenaiTemplate import read_response
28
29
  from .. import debug
29
30
 
@@ -31,7 +32,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
31
32
  label = "Pollinations AI 🌸"
32
33
  url = "https://pollinations.ai"
33
34
  login_url = "https://enter.pollinations.ai"
34
- api_key = "pk", "_B9YJX5SBohhm2ePq"
35
35
  active_by_default = True
36
36
  working = True
37
37
  supports_system_message = True
@@ -44,7 +44,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
44
44
  gen_text_api_endpoint = "https://gen.pollinations.ai/v1/chat/completions"
45
45
  image_models_endpoint = "https://gen.pollinations.ai/image/models"
46
46
  text_models_endpoint = "https://gen.pollinations.ai/text/models"
47
- BALANCE_ENDPOINT = "https://gen.pollinations.ai/account/balance"
47
+ balance_endpoint = "https://g4f.space/api/pollinations/account/balance"
48
+ worker_api_endpoint = "https://g4f.space/api/pollinations/chat/completions"
49
+ worker_models_endpoint = "https://g4f.space/api/pollinations/models"
48
50
 
49
51
  # Models configuration
50
52
  default_model = "openai"
@@ -56,8 +58,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
56
58
  image_models = [default_image_model, "turbo", "kontext"]
57
59
  audio_models = {}
58
60
  vision_models = [default_vision_model]
59
- _gen_models_loaded = False
60
- _free_models_loaded = False
61
61
  model_aliases = {
62
62
  "gpt-4.1-nano": "openai-fast",
63
63
  "llama-4-scout": "llamascout",
@@ -74,12 +74,15 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
74
74
  }
75
75
  swap_model_aliases = {v: k for k, v in model_aliases.items()}
76
76
  balance: Optional[float] = None
77
+ current_models_endpoint: Optional[str] = None
77
78
 
78
79
  @classmethod
79
80
  def get_balance(cls, api_key: str, timeout: Optional[float] = None) -> Optional[float]:
80
81
  try:
81
- headers = {"authorization": f"Bearer {api_key}"}
82
- response = requests.get(cls.BALANCE_ENDPOINT, headers=headers, timeout=timeout)
82
+ headers = None
83
+ if api_key:
84
+ headers = {"authorization": f"Bearer {api_key}"}
85
+ response = requests.get(cls.balance_endpoint, headers=headers, timeout=timeout)
83
86
  response.raise_for_status()
84
87
  data = response.json()
85
88
  cls.balance = float(data.get("balance", 0.0))
@@ -103,17 +106,18 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
103
106
 
104
107
  if not api_key:
105
108
  api_key = AuthManager.load_api_key(cls)
106
- if not api_key or api_key.startswith("g4f_") or api_key.startswith("gfs_"):
107
- api_key = "".join(cls.api_key)
108
-
109
- if cls.balance or cls.balance is None and cls.get_balance(api_key, timeout) and cls.balance > 0:
110
- debug.log(f"Authenticated with Pollinations AI using API key.")
109
+ if (not api_key or api_key.startswith("g4f_") or api_key.startswith("gfs_")) and cls.balance or cls.balance is None and cls.get_balance(api_key, timeout) and cls.balance > 0:
110
+ debug.log(f"Authenticated with Pollinations AI using G4F API.")
111
+ models_url = cls.worker_models_endpoint
112
+ elif api_key:
113
+ debug.log(f"Using Pollinations AI with provided API key.")
114
+ models_url = cls.gen_text_api_endpoint
111
115
  else:
112
116
  debug.log(f"Using Pollinations AI without authentication.")
113
- api_key = None
117
+ models_url = cls.text_models_endpoint
114
118
 
115
- if not cls._free_models_loaded or api_key and not cls._gen_models_loaded:
116
- path = Path(get_cookies_dir()) / "models" / datetime.today().strftime('%Y-%m-%d') / f"{cls.__name__}{'-auth' if api_key else ''}.json"
119
+ if cls.current_models_endpoint != models_url:
120
+ path = Path(get_cookies_dir()) / "models" / datetime.today().strftime('%Y-%m-%d') / f"{secure_filename(models_url)}.json"
117
121
  if path.exists():
118
122
  try:
119
123
  data = path.read_text()
@@ -180,10 +184,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
180
184
  cls.swap_model_aliases = {v: k for k, v in cls.model_aliases.items()}
181
185
 
182
186
  finally:
183
- if api_key:
184
- cls._gen_models_loaded = True
185
- else:
186
- cls._free_models_loaded = True
187
+ cls.current_models_endpoint = models_url
187
188
  # Return unique models across all categories
188
189
  all_models = cls.text_models.copy()
189
190
  all_models.extend(cls.image_models)
@@ -262,7 +263,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
262
263
  has_audio = True
263
264
  break
264
265
  model = "openai-audio" if has_audio else cls.default_model
265
- if cls.get_models(api_key=api_key, timeout=kwargs.get("timeout")):
266
+ if cls.get_models(api_key=api_key, timeout=kwargs.get("timeout", 15)):
266
267
  if model in cls.model_aliases:
267
268
  model = cls.model_aliases[model]
268
269
  debug.log(f"Using model: {model}")
@@ -480,17 +481,17 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
480
481
  seed=None if "tools" in extra_body else seed,
481
482
  **extra_body
482
483
  )
484
+ if (not api_key or api_key.startswith("g4f_") or api_key.startswith("gfs_")) and cls.balance and cls.balance > 0:
485
+ endpoint = cls.worker_api_endpoint
486
+ elif api_key:
487
+ endpoint = cls.gen_text_api_endpoint
488
+ else:
489
+ endpoint = cls.text_api_endpoint
483
490
  headers = None
484
- if api_key and not api_key.startswith("g4f_") and not api_key.startswith("gfs_"):
491
+ if api_key:
485
492
  headers = {"authorization": f"Bearer {api_key}"}
486
- elif cls.balance and cls.balance > 0:
487
- headers = {"authorization": f"Bearer {''.join(cls.api_key)}"}
488
493
  yield JsonRequest.from_dict(data)
489
- if headers:
490
- url = cls.gen_text_api_endpoint
491
- else:
492
- url = cls.text_api_endpoint
493
- async with session.post(url, json=data, headers=headers) as response:
494
+ async with session.post(endpoint, json=data, headers=headers) as response:
494
495
  if response.status in (400, 500):
495
496
  debug.error(f"Error: {response.status} - Bad Request: {data}")
496
497
  async for chunk in read_response(response, stream, format_media_prompt(messages), cls.get_dict(),
g4f/Provider/Yupp.py CHANGED
@@ -305,8 +305,8 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
305
305
  }
306
306
  )
307
307
  resp.raise_for_status()
308
- data = resp.json()
309
- return data[0]["result"]["data"]["json"]["signed_url"]
308
+ data = resp.json()[0]["result"]["data"]["json"]
309
+ return data.get("signed_url", data.get("signedURL"))
310
310
 
311
311
  @classmethod
312
312
  async def get_signed_image(cls, scraper: CloudScraper, image_id: str) -> str:
g4f/Provider/__init__.py CHANGED
@@ -49,9 +49,7 @@ from .OperaAria import OperaAria
49
49
  from .Perplexity import Perplexity
50
50
  from .PollinationsAI import PollinationsAI
51
51
  from .PollinationsImage import PollinationsImage
52
- from .Startnest import Startnest
53
52
  from .Qwen import Qwen
54
- from .StringableInference import StringableInference
55
53
  from .TeachAnything import TeachAnything
56
54
  from .WeWordle import WeWordle
57
55
  from .Yqcloud import Yqcloud
@@ -13,6 +13,7 @@ from ...typing import AsyncResult, Messages
13
13
  class Ollama(OpenaiTemplate):
14
14
  label = "Ollama 🦙"
15
15
  url = "https://ollama.com"
16
+ base_url = "https://g4f.space/api/ollama"
16
17
  login_url = "https://ollama.com/settings/keys"
17
18
  needs_auth = False
18
19
  working = True
@@ -34,7 +35,7 @@ class Ollama(OpenaiTemplate):
34
35
  cls.live += 1
35
36
  cls.models = [model["name"] for model in models]
36
37
  if base_url is None:
37
- host = os.getenv("OLLAMA_HOST", "127.0.0.1")
38
+ host = os.getenv("OLLAMA_HOST", "localhost")
38
39
  port = os.getenv("OLLAMA_PORT", "11434")
39
40
  url = f"http://{host}:{port}/api/tags"
40
41
  else:
@@ -66,7 +67,7 @@ class Ollama(OpenaiTemplate):
66
67
  base_url: str = f"http://{host}:{port}/v1"
67
68
  if model in cls.local_models:
68
69
  async with StreamSession(headers={"Authorization": f"Bearer {api_key}"}, proxy=proxy) as session:
69
- async with session.post(f"{base_url}/api/chat", json={
70
+ async with session.post(f"{base_url.replace('/v1', '')}/api/chat", json={
70
71
  "model": model,
71
72
  "messages": messages,
72
73
  }) as response:
@@ -14,7 +14,7 @@ from ..helper import format_media_prompt
14
14
  class Azure(OpenaiTemplate):
15
15
  label = "Azure ☁️"
16
16
  url = "https://ai.azure.com"
17
- base_url = "https://g4f.dev/api/azure"
17
+ base_url = "https://g4f.space/api/azure"
18
18
  working = True
19
19
  active_by_default = False
20
20
  login_url = "https://discord.gg/qXA4Wf4Fsm"
@@ -9,7 +9,7 @@ from ..template import OpenaiTemplate
9
9
  class Claude(OpenaiTemplate):
10
10
  label = "Claude 💥"
11
11
  url = "https://claude.ai"
12
- base_url = "https://g4f.dev/api/claude"
12
+ base_url = "https://g4f.space/api/claude"
13
13
  working = True
14
14
  active_by_default = True
15
15
  login_url = "https://discord.gg/qXA4Wf4Fsm"
@@ -7,7 +7,7 @@ class GeminiPro(OpenaiTemplate):
7
7
  url = "https://ai.google.dev"
8
8
  login_url = "https://aistudio.google.com/u/0/apikey"
9
9
  base_url = "https://generativelanguage.googleapis.com/v1beta/openai"
10
- backup_url = "https://g4f.dev/custom/srv_mjnryskw9fe0567fa267"
10
+ backup_url = "https://g4f.space/api/gemini-v1beta"
11
11
  active_by_default = True
12
12
  working = True
13
13
  default_model = "models/gemini-2.5-flash"
@@ -7,6 +7,7 @@ class Groq(OpenaiTemplate):
7
7
  url = "https://console.groq.com/playground"
8
8
  login_url = "https://console.groq.com/keys"
9
9
  base_url = "https://api.groq.com/openai/v1"
10
+ backup_url = "https://g4f.space/api/groq"
10
11
  working = True
11
12
  active_by_default = True
12
13
  default_model = DEFAULT_MODEL
@@ -6,6 +6,7 @@ from ...config import DEFAULT_MODEL
6
6
  class Nvidia(OpenaiTemplate):
7
7
  label = "Nvidia"
8
8
  base_url = "https://integrate.api.nvidia.com/v1"
9
+ backup_url = "https://g4f.space/api/nvidia"
9
10
  login_url = "https://google.com"
10
11
  url = "https://build.nvidia.com"
11
12
  working = True
@@ -13,6 +13,7 @@ class OpenRouter(OpenaiTemplate):
13
13
 
14
14
  class OpenRouterFree(OpenRouter):
15
15
  label = "OpenRouter (free)"
16
+ base_url = "https://g4f.space/api/openrouter"
16
17
  max_tokens = 4096
17
18
  active_by_default = True
18
19
 
g4f/client/__init__.py CHANGED
@@ -15,7 +15,7 @@ from typing import Union, AsyncIterator, Iterator, Awaitable, Optional, List, Di
15
15
 
16
16
  from ..image.copy_images import copy_media, get_media_dir
17
17
  from ..typing import Messages, ImageType
18
- from ..providers.types import ProviderType, BaseRetryProvider, BaseProvider
18
+ from ..providers.types import ProviderType, BaseProvider
19
19
  from ..providers.response import *
20
20
  from ..errors import NoMediaResponseError, ProviderNotFoundError
21
21
  from ..providers.retry_provider import IterListProvider
@@ -70,14 +70,15 @@ def iter_response(
70
70
  stream: bool,
71
71
  response_format: Optional[dict] = None,
72
72
  max_tokens: Optional[int] = None,
73
- stop: Optional[list[str]] = None
73
+ stop: Optional[list[str]] = None,
74
+ provider_info: Optional[ProviderInfo] = None
74
75
  ) -> ChatCompletionResponseType:
75
76
  content = ""
76
77
  reasoning = []
77
78
  finish_reason = None
78
79
  tool_calls = None
79
80
  usage = None
80
- provider: ProviderInfo = None
81
+ provider_info: ProviderInfo = None
81
82
  conversation: JsonConversation = None
82
83
  completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
83
84
  idx = 0
@@ -100,7 +101,7 @@ def iter_response(
100
101
  usage = chunk
101
102
  continue
102
103
  elif isinstance(chunk, ProviderInfo):
103
- provider = chunk
104
+ provider_info = chunk
104
105
  continue
105
106
  elif isinstance(chunk, Reasoning):
106
107
  reasoning.append(chunk)
@@ -122,9 +123,9 @@ def iter_response(
122
123
 
123
124
  if stream:
124
125
  chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
125
- if provider is not None:
126
- chunk.provider = provider.name
127
- chunk.model = provider.model
126
+ if provider_info is not None:
127
+ chunk.provider = provider_info.name
128
+ chunk.model = provider_info.model
128
129
  yield chunk
129
130
 
130
131
  if finish_reason is not None:
@@ -153,29 +154,18 @@ def iter_response(
153
154
  conversation=None if conversation is None else conversation.get_dict(),
154
155
  reasoning=reasoning if reasoning else None
155
156
  )
156
- if provider is not None:
157
- chat_completion.provider = provider.name
158
- chat_completion.model = provider.model
157
+ if provider_info is not None:
158
+ chat_completion.provider = provider_info.name
159
+ chat_completion.model = provider_info.model
159
160
  yield chat_completion
160
161
 
161
- # Synchronous iter_append_model_and_provider function
162
- def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
163
- if isinstance(last_provider, BaseRetryProvider):
164
- yield from response
165
- return
166
- for chunk in response:
167
- if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
168
- if chunk.provider is None and last_provider is not None:
169
- chunk.model = getattr(last_provider, "last_model", last_model)
170
- chunk.provider = last_provider.__name__
171
- yield chunk
172
-
173
162
  async def async_iter_response(
174
163
  response: AsyncIterator[Union[str, ResponseType]],
175
164
  stream: bool,
176
165
  response_format: Optional[dict] = None,
177
166
  max_tokens: Optional[int] = None,
178
- stop: Optional[list[str]] = None
167
+ stop: Optional[list[str]] = None,
168
+ provider_info: Optional[ProviderInfo] = None
179
169
  ) -> AsyncChatCompletionResponseType:
180
170
  content = ""
181
171
  reasoning = []
@@ -184,7 +174,6 @@ async def async_iter_response(
184
174
  idx = 0
185
175
  tool_calls = None
186
176
  usage = None
187
- provider: ProviderInfo = None
188
177
  conversation: JsonConversation = None
189
178
 
190
179
  try:
@@ -203,7 +192,7 @@ async def async_iter_response(
203
192
  usage = chunk
204
193
  continue
205
194
  elif isinstance(chunk, ProviderInfo):
206
- provider = chunk
195
+ provider_info = chunk
207
196
  continue
208
197
  elif isinstance(chunk, Reasoning) and not stream:
209
198
  reasoning.append(chunk)
@@ -225,9 +214,9 @@ async def async_iter_response(
225
214
 
226
215
  if stream:
227
216
  chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
228
- if provider is not None:
229
- chunk.provider = provider.name
230
- chunk.model = provider.model
217
+ if provider_info is not None:
218
+ chunk.provider = provider_info.name
219
+ chunk.model = provider_info.model
231
220
  yield chunk
232
221
 
233
222
  if finish_reason is not None:
@@ -256,32 +245,13 @@ async def async_iter_response(
256
245
  conversation=conversation,
257
246
  reasoning=reasoning if reasoning else None
258
247
  )
259
- if provider is not None:
260
- chat_completion.provider = provider.name
261
- chat_completion.model = provider.model
248
+ if provider_info is not None:
249
+ chat_completion.provider = provider_info.name
250
+ chat_completion.model = provider_info.model
262
251
  yield chat_completion
263
252
  finally:
264
253
  await safe_aclose(response)
265
254
 
266
- async def async_iter_append_model_and_provider(
267
- response: AsyncChatCompletionResponseType,
268
- last_model: str,
269
- last_provider: ProviderType
270
- ) -> AsyncChatCompletionResponseType:
271
- try:
272
- if isinstance(last_provider, BaseRetryProvider):
273
- async for chunk in response:
274
- yield chunk
275
- return
276
- async for chunk in response:
277
- if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
278
- if chunk.provider is None and last_provider is not None:
279
- chunk.model = getattr(last_provider, "last_model", last_model)
280
- chunk.provider = last_provider.__name__
281
- yield chunk
282
- finally:
283
- await safe_aclose(response)
284
-
285
255
  class Client(BaseClient):
286
256
  def __init__(
287
257
  self,
@@ -350,9 +320,10 @@ class Completions:
350
320
  **kwargs
351
321
  )
352
322
 
323
+ provider_info = ProviderInfo(**provider.get_dict(), model=model)
324
+
353
325
  def fallback(response):
354
- response = iter_response(response, stream, response_format, max_tokens, stop)
355
- return iter_append_model_and_provider(response, model, provider)
326
+ return iter_response(response, stream, response_format, max_tokens, stop, provider_info)
356
327
 
357
328
  if raw:
358
329
  def raw_response(response):
@@ -689,8 +660,8 @@ class AsyncCompletions:
689
660
  )
690
661
 
691
662
  def fallback(response):
692
- response = async_iter_response(response, stream, response_format, max_tokens, stop)
693
- return async_iter_append_model_and_provider(response, model, provider)
663
+ provider_info = ProviderInfo(**provider.get_dict(), model=model)
664
+ return async_iter_response(response, stream, response_format, max_tokens, stop, provider_info)
694
665
 
695
666
  if raw:
696
667
  async def raw_response(response):
@@ -816,7 +787,7 @@ class ClientFactory:
816
787
  )
817
788
 
818
789
  # Create async client
819
- async_client = ClientFactory.createAsyncClient("PollinationsAI")
790
+ async_client = ClientFactory.create_async_client("PollinationsAI")
820
791
  """
821
792
 
822
793
  # Registry of live/custom providers
@@ -850,7 +821,7 @@ class ClientFactory:
850
821
  elif provider.startswith("custom:"):
851
822
  if provider.startswith("custom:"):
852
823
  serverId = provider[7:]
853
- base_url = f"https://g4f.dev/custom/{serverId}"
824
+ base_url = f"https://g4f.space/custom/{serverId}"
854
825
  if not base_url:
855
826
  raise ValueError("base_url is required for custom providers")
856
827
  provider = create_custom_provider(base_url, api_key, name=name, **kwargs)
g4f/models.py CHANGED
@@ -19,7 +19,6 @@ from .Provider import (
19
19
  OIVSCodeSer0501,
20
20
  OperaAria,
21
21
  Perplexity,
22
- Startnest,
23
22
  OpenAIFM,
24
23
  PollinationsAI,
25
24
  PollinationsImage,
@@ -157,7 +156,6 @@ default = Model(
157
156
  Copilot,
158
157
  DeepInfra,
159
158
  OperaAria,
160
- Startnest,
161
159
  GLM,
162
160
  PollinationsAI,
163
161
  Qwen,
@@ -179,7 +177,6 @@ default_vision = VisionModel(
179
177
  OIVSCodeSer2,
180
178
  PollinationsAI,
181
179
  OperaAria,
182
- Startnest,
183
180
  Together,
184
181
  HuggingSpace,
185
182
  GeminiPro,
@@ -207,7 +204,7 @@ gpt_4o = VisionModel(
207
204
  gpt_4o_mini = Model(
208
205
  name = 'gpt-4o-mini',
209
206
  base_provider = 'OpenAI',
210
- best_provider = IterListProvider([Chatai, OIVSCodeSer2, Startnest, OpenaiChat])
207
+ best_provider = IterListProvider([Chatai, OIVSCodeSer2, OpenaiChat])
211
208
  )
212
209
 
213
210
  # gpt_4o_mini_audio = AudioModel(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: g4f
3
- Version: 6.9.7
3
+ Version: 6.9.9
4
4
  Summary: The official gpt4free repository | various collection of powerful language models
5
5
  Home-page: https://github.com/xtekky/gpt4free
6
6
  Author: Tekky
@@ -5,7 +5,7 @@ g4f/cookies.py,sha256=gx68OX5he2bYvsvvIQEESMUfwnVI18DOMJt9KvQrM8g,7915
5
5
  g4f/debug.py,sha256=Dh0cmlxUydMDfjJBiPWrO9dAuvv2sTElb3R036ax-34,1021
6
6
  g4f/errors.py,sha256=8Mp77UMW9_c8Flr1Z0Cvdrv0ixOF9CE0j1QX-cc5Zfc,2368
7
7
  g4f/files.py,sha256=JzlWe3V6Xc7FN4eag7Utc4P9dHN1s1sMKFvYSaM0NQg,907
8
- g4f/models.py,sha256=95ZV0HaQF7faPcEV7D_g9pjZc09tWU1oOF0ip78yKt8,24991
8
+ g4f/models.py,sha256=QZV4UAis0QYaWGVO4Nv6n_3wMrLQvWHJ9V11Lo1EmBQ,24927
9
9
  g4f/typing.py,sha256=fBBb74HkxtLa358vjcd1U5yr3ThlKzP2DfE8wdx01lA,2388
10
10
  g4f/version.py,sha256=Lt6aOvuLBfDOHUSAtmncRMuNoDtisb1m8a447o7DkoM,4382
11
11
  g4f/Provider/ApiAirforce.py,sha256=gYaq0MjLDVtzfXpnkhGWdKAZqnfC2UK1dji5u_EvhsQ,1148
@@ -23,16 +23,14 @@ g4f/Provider/Mintlify.py,sha256=3Bvy1oh6scg4xs9pw5RY4mmi8XEvQDelnNp664TefbU,7937
23
23
  g4f/Provider/OIVSCodeSer.py,sha256=clebON7Ssd9syewh3BWT59XOeB-WORXF6FPOwbzfRmo,1366
24
24
  g4f/Provider/OperaAria.py,sha256=sLnTOKzbW9BxTxNmHXJ-YDnhPU6Dj6MBdDfqDh-Zz-c,14662
25
25
  g4f/Provider/Perplexity.py,sha256=GUebbVin9hCg4FDei9RX2N6WboFcDDPm061WnnpPRaI,13236
26
- g4f/Provider/PollinationsAI.py,sha256=lgrwBxYrZOyKSUF7AJu9TA7S3YA1rdbAw08-W94IFwU,21506
26
+ g4f/Provider/PollinationsAI.py,sha256=Mhrb-Ym9N6RXTONWOlElBOW__g4KPiwpyeUa9L72waI,21736
27
27
  g4f/Provider/PollinationsImage.py,sha256=wdGY9kbPGlqAkyeJcjXgWOG3HLVPU4QK-JENwg3gmwk,2297
28
28
  g4f/Provider/Qwen.py,sha256=XPJHRlobijqjdDGVqA-aVyRx2SeM28zSWf-NmzxJtgE,23876
29
- g4f/Provider/Startnest.py,sha256=OocXEAK3pKG-tC_D_chGE7GD22dZr67J1m7JBhLxniM,9526
30
- g4f/Provider/StringableInference.py,sha256=ZohMZrVAn6G82zrYpLTvELkzfds4nxu09lx7wizFnbk,999
31
29
  g4f/Provider/TeachAnything.py,sha256=ST87YdOdxtIc5JMaKzGdVy9J9wlhdhYIchRtXBljIBU,2843
32
30
  g4f/Provider/WeWordle.py,sha256=ocKEfMYBKWPzv2WoDntn-WC-DkrSg5b_ipL9VYhTgsw,7007
33
31
  g4f/Provider/Yqcloud.py,sha256=xzX-G-Lv0pyOHTypSJdavmbm3GPxnL-wRoT3VmjzUIM,3213
34
- g4f/Provider/Yupp.py,sha256=flSbW_PDMqdnJU7jCyg1wWmHr-QV3DHIzdlEI5w6-TY,32365
35
- g4f/Provider/__init__.py,sha256=l6qxw1XlGgFqTTI2Q8YjlP-Yr17viOiPj1eaU8K9LEo,2782
32
+ g4f/Provider/Yupp.py,sha256=StAyO9qFalvwOsj3RkkOJdkJMItvmBYwgwFx8GV0IT8,32392
33
+ g4f/Provider/__init__.py,sha256=2azA2zjXZjkH3yDJSoWKeB3SCGB7SIa_0_DN44Lf39g,2684
36
34
  g4f/Provider/base_provider.py,sha256=lAd80-2hO1kzs9n9TUKv6fR4zHjSdZBqVErpm_gna9o,199
37
35
  g4f/Provider/helper.py,sha256=_4fO_b2U8BnpuF6kijUM4SaFbX2Pcz6mksxK9mVXhP4,111
38
36
  g4f/Provider/audio/EdgeTTS.py,sha256=VXcLCZDdf8DL0jIEO3tvduPK4NguUMEDJg9U5MkNvJk,2822
@@ -55,16 +53,16 @@ g4f/Provider/hf_space/StabilityAI_SD35Large.py,sha256=-VT4qa_K6MshG2TX6y7k01NMGH
55
53
  g4f/Provider/hf_space/__init__.py,sha256=licdlcuTs0yPwADmjBUA5uoN4ao-FnCUmlKiMWoHJ2g,3602
56
54
  g4f/Provider/hf_space/raise_for_status.py,sha256=xoVwrZSwJUvqQkSfeUAMUst8SyobpSKPux1iYu4SNH0,935
57
55
  g4f/Provider/local/Local.py,sha256=b6vSZcr5CfEXD5GMqNtwCfXNJtKyWKPg3jqBWJEZEwQ,1251
58
- g4f/Provider/local/Ollama.py,sha256=ukgvoCSDiXEqhPc6i-awylqiuSnFLpau0uMX9YfsJuA,3770
56
+ g4f/Provider/local/Ollama.py,sha256=MtB36eUcn6iyJ9XZWfEHIrkCGeZ0Z6Hysm7AgHQUwE0,3835
59
57
  g4f/Provider/local/__init__.py,sha256=cEFsdfkq0bgVAIM__Sxc6WfIoGa3_Ymol26kQb6x14k,73
60
58
  g4f/Provider/needs_auth/AIBadgr.py,sha256=Oc05Cs-x40sF7tcrAyUqyoZ-TbtmV4wnMIm5aEjQ0xw,420
61
59
  g4f/Provider/needs_auth/Anthropic.py,sha256=-8hRSpSGaC3-AzhkuIsf1K1w8ZVKKb_T6Pwy3N8JPak,10200
62
- g4f/Provider/needs_auth/Azure.py,sha256=XPgC4ffl8AitnzQB4K77v2eyXm_rLznz1VUiHIeRqaA,6107
60
+ g4f/Provider/needs_auth/Azure.py,sha256=q8Un1ouuG57-9WDyHnRdMfhTjfYM8ky0_8SIYOIkPck,6109
63
61
  g4f/Provider/needs_auth/BingCreateImages.py,sha256=_GrPx7KuttKrQ4IKX0fFwDPVsiCW9xTLycULNClZ6KQ,2066
64
62
  g4f/Provider/needs_auth/BlackboxPro.py,sha256=BA-eiaVGczyB7BHQ47jehq6dTkEslithw1nN2whceZo,270382
65
63
  g4f/Provider/needs_auth/CablyAI.py,sha256=113DsXPXAq3DO2QPHPJ0UJwMpXxomb7BM07RrcV6YQk,1253
66
64
  g4f/Provider/needs_auth/Cerebras.py,sha256=WcPgAVW9QRqoDJ9-KgWYDl08buEVRl1pWiwy7giAGlE,1753
67
- g4f/Provider/needs_auth/Claude.py,sha256=Mg9_g3S60WU-po4C1rYwLJuRPU9wq-9U0qp_hg8oufA,1398
65
+ g4f/Provider/needs_auth/Claude.py,sha256=Xk6kXNhygZ62DbTuDNDo2K8qD_8CZMqc_IwCQBuaoR0,1400
68
66
  g4f/Provider/needs_auth/Cohere.py,sha256=NhHzZrYD2uLxFtEsBKR1FR6UywT3hhie5jIh2Ppvbrg,5851
69
67
  g4f/Provider/needs_auth/CopilotAccount.py,sha256=VCjfUzlPZxU-SduLy3V-YFrUN_KOLZoT2b-OZmZD4rw,394
70
68
  g4f/Provider/needs_auth/Custom.py,sha256=bD6ao9IkIG_oW7bYAHCSEZrMEnDX9u2X96q9LkCMbWc,347
@@ -73,19 +71,19 @@ g4f/Provider/needs_auth/DeepSeekAPI.py,sha256=UaN4W5b_1nXsyB5MN81NzHkQLz1V6DOzte
73
71
  g4f/Provider/needs_auth/FenayAI.py,sha256=2NkDnhdEVDMD1Xk13rp78nRt7oGwbgsX0jlonWt-Ym8,519
74
72
  g4f/Provider/needs_auth/Gemini.py,sha256=p0idmlvIbjg3kMEGsOv5N9b-j5b-blNT2yxO2xnAmiQ,22853
75
73
  g4f/Provider/needs_auth/GeminiCLI.py,sha256=BNQv_Hqzu3E0GtVRbZDZdcq--ZHpJDdLRF5ssG4yX9o,23554
76
- g4f/Provider/needs_auth/GeminiPro.py,sha256=zZBvjc7N313aqp08s8OyNb0kZDn_YTpVdFvSo2GuqD8,767
74
+ g4f/Provider/needs_auth/GeminiPro.py,sha256=Q2G8tuZEZQOUSE-m3PoIs5NMyXuOO55784kXk8rFdsM,755
77
75
  g4f/Provider/needs_auth/GigaChat.py,sha256=2IqQlav_-g-4ZUMJ0tO8KGpvchg9_0ap6rzGP_5dLRI,6325
78
76
  g4f/Provider/needs_auth/GithubCopilot.py,sha256=AUoapqX6lwgC5WPIpKdKdhXcuXo4Stq7I0W5bVHMEbI,6093
79
77
  g4f/Provider/needs_auth/GithubCopilotAPI.py,sha256=O-8Bq6eWrVZTYFS5QufMDJ9SiSFsd0Pz91yejb6spOI,353
80
78
  g4f/Provider/needs_auth/GlhfChat.py,sha256=qSPKXnQ7igjF6_kiBnFhwq-YAqGmpZg_yu4OMRliSP4,1189
81
79
  g4f/Provider/needs_auth/Grok.py,sha256=3uwt0vjsSNHLZKS2DcUHTztiNqPIemwW82E2x0AQRTw,12884
82
- g4f/Provider/needs_auth/Groq.py,sha256=30XyhPJYnZG-_t_gZx8vkLmJK-sYyvUE03edEJjbPbE,548
80
+ g4f/Provider/needs_auth/Groq.py,sha256=zErhEO8-oo4dOZY1u2uz0Xcxb3N76epEBhbuiOjYbx0,594
83
81
  g4f/Provider/needs_auth/LMArena.py,sha256=dKwHEgdvEY9ctjNZzi-COdV8RHqV_z7qEoKiLGGhxus,121399
84
82
  g4f/Provider/needs_auth/MetaAI.py,sha256=Bz9pvJUVH7RtCAP1Huvay-EgO057fL362mhx3GtVAqM,10653
85
83
  g4f/Provider/needs_auth/MetaAIAccount.py,sha256=D4LnhAt2MuKx1a4uSgX2lUbQrzAkeIYm8JCnZieZiak,672
86
84
  g4f/Provider/needs_auth/MicrosoftDesigner.py,sha256=4sJdjBPgiW9TEh4CeplCTNPXv6ZtZtFh0SYAiVfnrqk,7178
87
- g4f/Provider/needs_auth/Nvidia.py,sha256=URMUSrLxUAA9YpI_DsdgQ3QlaOJ0JYRSClmYbTwXWM4,391
88
- g4f/Provider/needs_auth/OpenRouter.py,sha256=KxALUf-mdoHHdYxEU53Rviyw7DRooFz39UMupDl-9V8,850
85
+ g4f/Provider/needs_auth/Nvidia.py,sha256=RcVVSpTZM9VCMhLZ4z3g6j9yxHPQETeWQJb24SPfaII,439
86
+ g4f/Provider/needs_auth/OpenRouter.py,sha256=KKWKKrzw2BGscd05m3BVw21TOHWRMkFETzOPQ1o5YNA,900
89
87
  g4f/Provider/needs_auth/OpenaiAPI.py,sha256=KpJ6qvUlFsFqpQbcwikNYLVsFx6K9h393Pu-yf_uS3g,362
90
88
  g4f/Provider/needs_auth/OpenaiAccount.py,sha256=vSe6Je-DoNbdGGEE-gNaW0Sa81rL8FPMaNyYr1U4PcI,209
91
89
  g4f/Provider/needs_auth/OpenaiChat.py,sha256=dVSm-HAzlvb3KZqZNf0XYFxvIBTfXajKQfe8-JuO-vM,67285
@@ -148,7 +146,7 @@ g4f/api/stubs.py,sha256=9xoJDAmRyUtL_jlqhVI8Q8G-gF9zTse46iNBGEYvD9s,4252
148
146
  g4f/cli/__init__.py,sha256=GUr_JFjLQDmDHmEyafHPvjaPYCQ5oeJclSazoEfRcxA,9167
149
147
  g4f/cli/__main__.py,sha256=SYf3GG2ZCYygnpZ25muxdmT40Dfbx-J0ionEAnfBxuI,94
150
148
  g4f/cli/client.py,sha256=azUTVzdOtfwj8Z6twhODzBQsONiwBBGq90h61Wu_PyY,11859
151
- g4f/client/__init__.py,sha256=cnrZBjFoj2bYIDkvdauSn74UpFcj-RdgR_qmxiNM8yQ,36739
149
+ g4f/client/__init__.py,sha256=Cx6-yGyo4rkeIJq6Q5ECA0GINkvdbXhIkkr0cpUQk-4,35501
152
150
  g4f/client/helper.py,sha256=SwdS7I7CkWBG05INo04eMuukLRPUTkX4awgMHGPLpPI,1852
153
151
  g4f/client/models.py,sha256=H8wu1UMFu8oCw7jy6x5B46GjPJPUa35h42Pq87Dp6lI,2318
154
152
  g4f/client/service.py,sha256=lElAhe0y0ZzllkKjX2CBzy_kqgkwYw8P9fZWHivgWqw,5330
@@ -210,9 +208,9 @@ g4f/tools/files.py,sha256=bUTbeNp8ujTZQiW3GLTtFcgl3-1AnH3cDyIKHfh6Mjc,23397
210
208
  g4f/tools/media.py,sha256=AE9hGVRxQBVZzQ_Ylzeoo2TJUGXSBXO5RbLwj1I2ZTE,4701
211
209
  g4f/tools/run_tools.py,sha256=Yb0osWdDt4wUnkvwln4R6qcLapdddC3n2giZMPXWkzM,16662
212
210
  g4f/tools/web_search.py,sha256=vAZJD-qy15llsgAbkXzoEltqdFB6TlKLbqDJ1DS-6vs,2086
213
- g4f-6.9.7.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
214
- g4f-6.9.7.dist-info/METADATA,sha256=hkmc6fZKRbRlrEGsojk2_hJBtaC3H6B5XT0J7nooFKE,23255
215
- g4f-6.9.7.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
216
- g4f-6.9.7.dist-info/entry_points.txt,sha256=J7Usl6dNjXJlvuzGAUEm6cuDXWpVdGq7SfK-tPoiZSI,67
217
- g4f-6.9.7.dist-info/top_level.txt,sha256=bMRlTupWYCcLWy80AnnKZkhpBsXsF8gI3BaMhSZSgRo,4
218
- g4f-6.9.7.dist-info/RECORD,,
211
+ g4f-6.9.9.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
212
+ g4f-6.9.9.dist-info/METADATA,sha256=dpbXvC6fuScpC1DzPLZsVooh8dnODUlQdonjYOzYx2U,23255
213
+ g4f-6.9.9.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
214
+ g4f-6.9.9.dist-info/entry_points.txt,sha256=J7Usl6dNjXJlvuzGAUEm6cuDXWpVdGq7SfK-tPoiZSI,67
215
+ g4f-6.9.9.dist-info/top_level.txt,sha256=bMRlTupWYCcLWy80AnnKZkhpBsXsF8gI3BaMhSZSgRo,4
216
+ g4f-6.9.9.dist-info/RECORD,,
g4f/Provider/Startnest.py DELETED
@@ -1,215 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from aiohttp import ClientSession
4
- import json
5
- import time
6
- import hashlib
7
-
8
- from ..typing import AsyncResult, Messages, MediaListType
9
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
10
- from .helper import format_prompt
11
- from ..tools.media import merge_media
12
- from ..image import to_data_uri
13
- from ..providers.response import FinishReason
14
-
15
-
16
- class Startnest(AsyncGeneratorProvider, ProviderModelMixin):
17
- label = "Startnest"
18
- url = "https://play.google.com/store/apps/details?id=starnest.aitype.aikeyboard.chatbot.chatgpt"
19
- api_endpoint = "https://api.startnest.uk/api/completions/stream"
20
-
21
- working = False
22
- needs_auth = False
23
- supports_stream = True
24
- supports_system_message = True
25
- supports_message_history = True
26
-
27
- default_model = 'gpt-4o-mini'
28
- models = [default_model]
29
- vision_models = models
30
-
31
- @classmethod
32
- def generate_signature(cls, timestamp: int) -> str:
33
- """
34
- Generate signature for authorization header
35
- You may need to adjust this based on the actual signature algorithm
36
- """
37
- # This is a placeholder - the actual signature generation might involve:
38
- # - A secret key
39
- # - Specific string formatting
40
- # - Different hash input
41
-
42
- # Example implementation (adjust as needed):
43
- kid = "36ccfe00-78fc-4cab-9c5b-5460b0c78513"
44
- algorithm = "sha256"
45
- validity = 90
46
- user_id = ""
47
-
48
- # The actual signature generation logic needs to be determined
49
- # This is just a placeholder that creates a hash from timestamp
50
- signature_input = f"{kid}{timestamp}{validity}".encode()
51
- signature_value = hashlib.sha256(signature_input).hexdigest()
52
-
53
- return f"Signature kid={kid}&algorithm={algorithm}&timestamp={timestamp}&validity={validity}&userId={user_id}&value={signature_value}"
54
-
55
- @classmethod
56
- async def create_async_generator(
57
- cls,
58
- model: str,
59
- messages: Messages,
60
- proxy: str = None,
61
- media: MediaListType = None,
62
- stream: bool = True,
63
- max_tokens: int = None,
64
- **kwargs
65
- ) -> AsyncResult:
66
- model = cls.get_model(model)
67
-
68
- # Generate current timestamp
69
- timestamp = int(time.time())
70
-
71
- headers = {
72
- "Accept-Encoding": "gzip",
73
- "app_name": "AIKEYBOARD",
74
- "Authorization": cls.generate_signature(timestamp),
75
- "Connection": "Keep-Alive",
76
- "Content-Type": "application/json; charset=UTF-8",
77
- "Host": "api.startnest.uk",
78
- "User-Agent": "okhttp/4.9.0",
79
- }
80
-
81
- async with ClientSession() as session:
82
- # Merge media with messages
83
- media = list(merge_media(media, messages))
84
-
85
- # Convert messages to the required format
86
- formatted_messages = []
87
- for i, msg in enumerate(messages):
88
- if isinstance(msg, dict):
89
- role = msg.get("role", "user")
90
- content = msg.get("content", "")
91
-
92
- # Create content array
93
- content_array = []
94
-
95
- # Add images if this is the last user message and media exists
96
- if media and role == "user" and i == len(messages) - 1:
97
- for image, _ in media:
98
- image_data_uri = to_data_uri(image)
99
- content_array.append({
100
- "image_url": {
101
- "url": image_data_uri
102
- },
103
- "type": "image_url"
104
- })
105
-
106
- # Add text content
107
- if content:
108
- content_array.append({
109
- "text": content,
110
- "type": "text"
111
- })
112
-
113
- formatted_messages.append({
114
- "role": role,
115
- "content": content_array
116
- })
117
-
118
- # If only one message and no media, use format_prompt as requested
119
- if len(messages) == 1 and not media:
120
- prompt_text = format_prompt(messages)
121
- formatted_messages = [{
122
- "role": "user",
123
- "content": [{"text": prompt_text, "type": "text"}]
124
- }]
125
-
126
- data = {
127
- "isVip": True,
128
- "max_tokens": max_tokens,
129
- "messages": formatted_messages,
130
- "stream": stream
131
- }
132
-
133
- # Add advanceToolType if media is present
134
- if media:
135
- data["advanceToolType"] = "upload_and_ask"
136
-
137
- async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
138
- response.raise_for_status()
139
-
140
- if stream:
141
- # Handle streaming response (SSE format)
142
- async for line in response.content:
143
- if line:
144
- line = line.decode('utf-8').strip()
145
- if line.startswith("data: "):
146
- data_str = line[6:]
147
- if data_str == "[DONE]":
148
- break
149
- try:
150
- json_data = json.loads(data_str)
151
- if "choices" in json_data and len(json_data["choices"]) > 0:
152
- choice = json_data["choices"][0]
153
-
154
- # Handle content
155
- delta = choice.get("delta", {})
156
- content = delta.get("content", "")
157
- if content:
158
- yield content
159
-
160
- # Handle finish_reason
161
- if "finish_reason" in choice and choice["finish_reason"] is not None:
162
- yield FinishReason(choice["finish_reason"])
163
- break
164
-
165
- except json.JSONDecodeError:
166
- continue
167
- else:
168
- # Handle non-streaming response (regular JSON)
169
- response_text = await response.text()
170
- try:
171
- json_data = json.loads(response_text)
172
- if "choices" in json_data and len(json_data["choices"]) > 0:
173
- choice = json_data["choices"][0]
174
- if "message" in choice and "content" in choice["message"]:
175
- content = choice["message"]["content"]
176
- if content:
177
- yield content.strip()
178
-
179
- # Handle finish_reason for non-streaming
180
- if "finish_reason" in choice and choice["finish_reason"] is not None:
181
- yield FinishReason(choice["finish_reason"])
182
- return
183
-
184
- except json.JSONDecodeError:
185
- # If it's still SSE format even when stream=False, handle it
186
- lines = response_text.strip().split('\n')
187
- full_content = []
188
- finish_reason_value = None
189
-
190
- for line in lines:
191
- if line.startswith("data: "):
192
- data_str = line[6:]
193
- if data_str == "[DONE]":
194
- break
195
- try:
196
- json_data = json.loads(data_str)
197
- if "choices" in json_data and len(json_data["choices"]) > 0:
198
- choice = json_data["choices"][0]
199
- delta = choice.get("delta", {})
200
- content = delta.get("content", "")
201
- if content:
202
- full_content.append(content)
203
-
204
- # Store finish_reason
205
- if "finish_reason" in choice and choice["finish_reason"] is not None:
206
- finish_reason_value = choice["finish_reason"]
207
-
208
- except json.JSONDecodeError:
209
- continue
210
-
211
- if full_content:
212
- yield ''.join(full_content)
213
-
214
- if finish_reason_value:
215
- yield FinishReason(finish_reason_value)
@@ -1,31 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import secrets
4
- import string
5
-
6
- from .template import OpenaiTemplate
7
-
8
- class StringableInference(OpenaiTemplate):
9
- label = "Stringable Inference"
10
- url = "https://stringable-inference.onrender.com"
11
- base_url = "https://stringableinf.com/api"
12
- api_endpoint = "https://stringableinf.com/api/v1/chat/completions"
13
-
14
- working = False
15
- active_by_default = True
16
- default_model = "deepseek-v3.2"
17
- default_vision_model = "gpt-oss-120b"
18
-
19
- @classmethod
20
- def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
21
- return {
22
- "Accept": "text/event-stream" if stream else "application/json",
23
- "Content-Type": "application/json",
24
- "HTTP-Referer": "https://g4f.dev/",
25
- "X-Title": "G4F Python",
26
- **(
27
- {"Authorization": f"Bearer {api_key}"}
28
- if api_key else {}
29
- ),
30
- **({} if headers is None else headers)
31
- }
File without changes