webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -33,7 +33,6 @@ class Cloudflare(Provider):
33
33
  "@cf/meta/llama-2-7b-chat-int8",
34
34
  "@cf/meta/llama-3-8b-instruct",
35
35
  "@cf/meta/llama-3-8b-instruct-awq",
36
- "@cf/meta/llama-3.1-8b-instruct",
37
36
  "@cf/meta/llama-3.1-8b-instruct-awq",
38
37
  "@cf/meta/llama-3.1-8b-instruct-fp8",
39
38
  "@cf/meta/llama-3.2-11b-vision-instruct",
@@ -24,12 +24,32 @@ class DeepInfra(Provider):
24
24
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
25
25
  "deepseek-ai/DeepSeek-R1-Turbo",
26
26
  "deepseek-ai/DeepSeek-V3",
27
-
27
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
28
28
  "google/gemma-2-27b-it",
29
29
  "google/gemma-2-9b-it",
30
- "google/gemma-3-27b-it",
31
30
  "google/gemma-3-12b-it",
31
+ "google/gemma-3-27b-it",
32
32
  "google/gemma-3-4b-it",
33
+ "meta-llama/Llama-3.3-70B-Instruct",
34
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
35
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
36
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
37
+ "meta-llama/Llama-Guard-4-12B",
38
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
39
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
40
+ "microsoft/Phi-4-multimodal-instruct",
41
+ "microsoft/WizardLM-2-8x22B",
42
+ "microsoft/phi-4",
43
+ "microsoft/phi-4-reasoning-plus",
44
+ "mistralai/Mistral-Small-24B-Instruct-2501",
45
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
46
+ "Qwen/QwQ-32B",
47
+ "Qwen/Qwen2.5-72B-Instruct",
48
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
49
+ "Qwen/Qwen3-14B",
50
+ "Qwen/Qwen3-30B-A3B",
51
+ "Qwen/Qwen3-32B",
52
+ "Qwen/Qwen3-235B-A22B",
33
53
  # "google/gemini-1.5-flash", # >>>> NOT WORKING
34
54
  # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
35
55
  # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
@@ -38,37 +58,19 @@ class DeepInfra(Provider):
38
58
 
39
59
  # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
40
60
  # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
41
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
42
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
43
61
  # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
44
62
  # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
45
- "meta-llama/Llama-3.3-70B-Instruct",
46
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
47
63
  # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
48
64
  # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
49
65
  # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
50
66
  # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
51
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
52
- "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
53
67
  # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
54
-
55
- "microsoft/phi-4",
56
- "microsoft/Phi-4-multimodal-instruct",
57
- "microsoft/WizardLM-2-8x22B",
58
68
  # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
59
69
  # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
60
70
  # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
61
- "mistralai/Mistral-Small-24B-Instruct-2501",
62
- "nvidia/Llama-3.1-Nemotron-70B-Instruct",
63
71
  # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
64
72
  # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
65
- "Qwen/QwQ-32B",
66
73
  # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
67
- "Qwen/Qwen2.5-72B-Instruct",
68
- "Qwen/Qwen2.5-Coder-32B-Instruct",
69
- "Qwen/Qwen3-14B",
70
- "Qwen/Qwen3-30B-A3B",
71
- "Qwen/Qwen3-32B",
72
74
  # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
73
75
  # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
74
76
  ]
@@ -335,4 +337,4 @@ if __name__ == "__main__":
335
337
  display_text = "Empty or invalid response"
336
338
  print(f"\r{model:<50} {status:<10} {display_text}")
337
339
  except Exception as e:
338
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
340
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,217 @@
1
+ import requests # Use requests for compatibility with zstd streaming
2
+ from requests import Session
3
+ import zstandard as zstd
4
+ from typing import Any, Dict, Generator, Union
5
+ import uuid
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class Flowith(Provider):
15
+ """
16
+ A provider class for interacting with the Flowith API.
17
+ """
18
+ AVAILABLE_MODELS = ["gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
19
+
20
+ def __init__(
21
+ self,
22
+ is_conversation: bool = True,
23
+ max_tokens: int = 2048,
24
+ timeout: int = 30,
25
+ intro: str = None,
26
+ filepath: str = None,
27
+ update_file: bool = True,
28
+ proxies: dict = {},
29
+ history_offset: int = 10250,
30
+ act: str = None,
31
+ model: str = "gpt-4.1-mini",
32
+ system_prompt: str = "You are a helpful assistant.",
33
+ browser: str = "chrome"
34
+ ):
35
+ if model not in self.AVAILABLE_MODELS:
36
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
+
38
+ self.url = "https://edge.flowith.net/ai/chat?mode=general"
39
+ self.session = Session()
40
+ self.agent = LitAgent()
41
+ self.fingerprint = self.agent.generate_fingerprint(browser)
42
+ self.headers = {
43
+ "accept": "*/*",
44
+ "accept-encoding": "gzip, deflate, br, zstd",
45
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
46
+ "content-type": "application/json",
47
+ "origin": "https://flowith.io",
48
+ "referer": "https://edge.flowith.net/",
49
+ "user-agent": self.fingerprint["user_agent"],
50
+ "dnt": "1",
51
+ "sec-gpc": "1"
52
+ }
53
+ self.session.headers.update(self.headers)
54
+ self.session.proxies = proxies
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.system_prompt = system_prompt
61
+ self.node_id = str(uuid.uuid4())
62
+ self.__available_optimizers = (
63
+ method
64
+ for method in dir(Optimizers)
65
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
+ )
67
+ Conversation.intro = (
68
+ AwesomePrompts().get_act(
69
+ act, raise_not_found=True, default=None, case_insensitive=True
70
+ )
71
+ if act
72
+ else intro or Conversation.intro
73
+ )
74
+ self.conversation = Conversation(
75
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
76
+ )
77
+ self.conversation.history_offset = history_offset
78
+
79
+ def ask(
80
+ self,
81
+ prompt: str,
82
+ stream: bool = False,
83
+ raw: bool = False,
84
+ optimizer: str = None,
85
+ conversationally: bool = False,
86
+ ) -> Union[Dict[str, Any], Generator]:
87
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
+ if optimizer:
89
+ if optimizer in self.__available_optimizers:
90
+ conversation_prompt = getattr(Optimizers, optimizer)(
91
+ conversation_prompt if conversationally else prompt
92
+ )
93
+ else:
94
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
95
+
96
+ payload = {
97
+ "model": self.model,
98
+ "messages": [
99
+ {"content": self.system_prompt, "role": "system"},
100
+ {"content": conversation_prompt, "role": "user"}
101
+ ],
102
+ "stream": stream,
103
+ "nodeId": self.node_id
104
+ }
105
+
106
+ def for_stream():
107
+ try:
108
+ response = requests.post(
109
+ self.url,
110
+ headers=self.headers,
111
+ json=payload,
112
+ stream=True,
113
+ timeout=self.timeout
114
+ )
115
+ encoding = response.headers.get('Content-Encoding', '').lower()
116
+ streaming_text = ""
117
+ if encoding == 'zstd':
118
+ dctx = zstd.ZstdDecompressor()
119
+ with dctx.stream_reader(response.raw) as reader:
120
+ while True:
121
+ chunk = reader.read(4096)
122
+ if not chunk:
123
+ break
124
+ text = chunk.decode('utf-8', errors='replace')
125
+ streaming_text += text
126
+ yield text if raw else dict(text=text)
127
+ else:
128
+ for chunk in response.iter_content(chunk_size=4096):
129
+ if not chunk:
130
+ break
131
+ text = chunk.decode('utf-8', errors='replace')
132
+ streaming_text += text
133
+ yield text if raw else dict(text=text)
134
+ self.last_response.update(dict(text=streaming_text))
135
+ self.conversation.update_chat_history(
136
+ prompt, self.get_message(self.last_response)
137
+ )
138
+ except Exception as e:
139
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
140
+
141
+ def for_non_stream():
142
+ try:
143
+ response = requests.post(
144
+ self.url,
145
+ headers=self.headers,
146
+ json=payload,
147
+ timeout=self.timeout
148
+ )
149
+ encoding = response.headers.get('Content-Encoding', '').lower()
150
+ if encoding == 'zstd':
151
+ dctx = zstd.ZstdDecompressor()
152
+ with dctx.stream_reader(response.raw) as reader:
153
+ decompressed = reader.read()
154
+ text = decompressed.decode('utf-8', errors='replace')
155
+ else:
156
+ text = response.text
157
+ self.last_response.update(dict(text=text))
158
+ self.conversation.update_chat_history(
159
+ prompt, self.get_message(self.last_response)
160
+ )
161
+ return self.last_response
162
+ except Exception as e:
163
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
164
+
165
+ return for_stream() if stream else for_non_stream()
166
+
167
+ def chat(
168
+ self,
169
+ prompt: str,
170
+ stream: bool = False,
171
+ optimizer: str = None,
172
+ conversationally: bool = False,
173
+ ) -> Union[str, Generator[str, None, None]]:
174
+ def for_stream():
175
+ for response in self.ask(
176
+ prompt, True, optimizer=optimizer, conversationally=conversationally
177
+ ):
178
+ yield self.get_message(response)
179
+
180
+ def for_non_stream():
181
+ return self.get_message(
182
+ self.ask(
183
+ prompt,
184
+ False,
185
+ optimizer=optimizer,
186
+ conversationally=conversationally,
187
+ )
188
+ )
189
+
190
+ return for_stream() if stream else for_non_stream()
191
+
192
+ def get_message(self, response: dict) -> str:
193
+ # Always return a non-empty string for the assistant's message
194
+ if not isinstance(response, dict):
195
+ return ""
196
+ text = response.get("text", None)
197
+ if text is None or not isinstance(text, str) or not text.strip():
198
+ # Fallback: return a placeholder to avoid Conversation error
199
+ return "[No response generated]"
200
+ return text
201
+
202
+ if __name__ == "__main__":
203
+ print("-" * 80)
204
+ print(f"{'Model':<20} {'Status':<10} {'Response'}")
205
+ print("-" * 80)
206
+ for model in Flowith.AVAILABLE_MODELS:
207
+ try:
208
+ ai = Flowith(model=model, timeout=60)
209
+ response = ai.chat("Say 'Hello' in one word", stream=True)
210
+ response_text = ""
211
+ for chunk in response:
212
+ response_text += chunk
213
+ status = '✓' if response_text.strip() else '✗'
214
+ display_text = response_text.strip()[:100]
215
+ print(f"{model:<20} {status:<10} {display_text}")
216
+ except Exception as e:
217
+ print(f"{model:<20} {'✗':<10} {str(e)}")
@@ -0,0 +1,250 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ FreeGemini API client for the free-gemini.vercel.app service.
4
+ Supports streaming responses from Gemini 2.0 Flash model.
5
+ """
6
+
7
+ import json
8
+ from curl_cffi.requests import Session
9
+ from curl_cffi import CurlError
10
+ from typing import Dict, Generator, Any, Union, Optional
11
+
12
+ from webscout import exceptions
13
+ from webscout.AIutel import Optimizers, AwesomePrompts, sanitize_stream
14
+ from webscout.conversation import Conversation
15
+ from webscout.litagent import LitAgent
16
+ from webscout.AIbase import Provider
17
+
18
+
19
+ class FreeGemini(Provider):
20
+ """
21
+ A class to interact with the free-gemini.vercel.app API,
22
+ which provides access to Gemini models.
23
+ """
24
+ AVAILABLE_MODELS = ["gemini-2.0-flash"]
25
+
26
+ @staticmethod
27
+ def _gemini_extractor(data: Dict) -> Optional[str]:
28
+ """Extract text content from Gemini API response."""
29
+ try:
30
+ if "candidates" in data and data["candidates"]:
31
+ candidate = data["candidates"][0]
32
+ if "content" in candidate and "parts" in candidate["content"]:
33
+ parts = candidate["content"]["parts"]
34
+ if parts and "text" in parts[0]:
35
+ return parts[0]["text"]
36
+ except (KeyError, IndexError, TypeError):
37
+ pass
38
+ return None
39
+
40
+ def __init__(
41
+ self,
42
+ is_conversation: bool = True,
43
+ max_tokens: int = 4000,
44
+ temperature: float = 0.5,
45
+ top_p: float = 1.0,
46
+ timeout: int = 120, # Default timeout for this specific API
47
+ proxies: dict = {}, # Standard proxy support
48
+ filepath: str = None,
49
+ update_file: bool = True,
50
+ history_offset: int = 10250,
51
+ intro: str = None,
52
+ act: str = None,
53
+ model: str = "gemini-2.0-flash",
54
+ system_prompt: str = "You are a helpful assistant.", # For consistency, though not directly used in payload
55
+ ):
56
+ """Initialize the FreeGemini client.
57
+
58
+ Args:
59
+ is_conversation (bool): Enable conversation history. Defaults to True.
60
+ max_tokens (int): Maximum tokens to sample. Defaults to 4000.
61
+ temperature (float): Sampling temperature. Defaults to 0.5.
62
+ top_p (float): Nucleus sampling parameter. Defaults to 1.0.
63
+ timeout (int): Request timeout in seconds. Defaults to 120.
64
+ proxies (dict): HTTP proxies. Defaults to {}.
65
+ filepath (str, optional): Path to save conversation history. Defaults to None.
66
+ update_file (bool): Update conversation history file. Defaults to True.
67
+ history_offset (int): Limit conversation history. Defaults to 10250.
68
+ intro (str, optional): Introduction for the conversation.
69
+ act (str, optional): Act for AwesomePrompts.
70
+ model (str): Model to use. Defaults to "gemini-2.0-flash".
71
+ system_prompt (str): System prompt (primarily for API consistency).
72
+ """
73
+ if model not in self.AVAILABLE_MODELS:
74
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
75
+
76
+ self.session = Session()
77
+ self.model = model
78
+ self.is_conversation = is_conversation
79
+ self.max_tokens_to_sample = max_tokens # Consistent naming
80
+ self.temperature = temperature
81
+ self.top_p = top_p
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.system_prompt = system_prompt # Stored for consistency
85
+
86
+ self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
87
+
88
+ self.agent = LitAgent()
89
+ self.headers = {
90
+ "Content-Type": "application/json",
91
+ "Accept": "application/json, text/event-stream",
92
+ "User-Agent": self.agent.random(),
93
+ "Origin": "https://free-gemini.vercel.app",
94
+ "Referer": "https://free-gemini.vercel.app/",
95
+ }
96
+
97
+ self.session.headers.update(self.headers)
98
+ self.session.proxies = proxies
99
+
100
+ self.__available_optimizers = (
101
+ method for method in dir(Optimizers)
102
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
103
+ )
104
+ Conversation.intro = (
105
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
106
+ if act else intro or Conversation.intro
107
+ )
108
+ self.conversation = Conversation(
109
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
110
+ )
111
+ self.conversation.history_offset = history_offset
112
+
113
+ def ask(
114
+ self,
115
+ prompt: str,
116
+ stream: bool = False, # Default to False for consistency
117
+ raw: bool = False,
118
+ optimizer: str = None,
119
+ conversationally: bool = False,
120
+ ) -> Union[Dict[str, Any], Generator]:
121
+ """Sends a prompt to the FreeGemini API and returns the response.
122
+
123
+ Args:
124
+ prompt (str): The prompt to send to the model.
125
+ stream (bool): Whether to stream the response. Defaults to False.
126
+ raw (bool): Return raw response instead of parsed text. Defaults to False.
127
+ optimizer (str, optional): Optimizer to use for the prompt.
128
+ conversationally (bool, optional): Whether to apply optimizer conversationally.
129
+
130
+ Returns:
131
+ Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
132
+ The generated response as a dictionary or generator.
133
+ """
134
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
135
+ if optimizer:
136
+ if optimizer in self.__available_optimizers:
137
+ conversation_prompt = getattr(Optimizers, optimizer)(
138
+ conversation_prompt if conversationally else prompt
139
+ )
140
+ else:
141
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
142
+
143
+ payload = {
144
+ "contents": [{"role": "user", "parts": [{"text": conversation_prompt}]}],
145
+ "generationConfig": {
146
+ "temperature": self.temperature,
147
+ "maxOutputTokens": self.max_tokens_to_sample,
148
+ "topP": self.top_p
149
+ },
150
+ "safetySettings": [
151
+ # Default safety settings from original class
152
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
153
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
154
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
155
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}
156
+ ]
157
+ }
158
+
159
+ # Internal generator for handling API call and history update
160
+ def _generate_content_and_update_history():
161
+ streaming_text_accumulator = ""
162
+ try:
163
+ response = self.session.post(
164
+ self.api_endpoint,
165
+ json=payload,
166
+ stream=True, # API always streams
167
+ timeout=self.timeout,
168
+ impersonate="chrome120"
169
+ )
170
+ response.raise_for_status()
171
+
172
+ processed_stream = sanitize_stream(
173
+ data=response.iter_content(chunk_size=None),
174
+ intro_value="data:",
175
+ to_json=True,
176
+ content_extractor=self._gemini_extractor,
177
+ yield_raw_on_error=False
178
+ )
179
+
180
+ for content_chunk_str in processed_stream: # yields string
181
+ if content_chunk_str and isinstance(content_chunk_str, str):
182
+ streaming_text_accumulator += content_chunk_str
183
+ yield content_chunk_str # Yield the raw text chunk
184
+
185
+ except CurlError as e:
186
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
187
+ except Exception as e:
188
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
189
+ finally:
190
+ if streaming_text_accumulator:
191
+ self.last_response = {"text": streaming_text_accumulator}
192
+ self.conversation.update_chat_history(prompt, streaming_text_accumulator)
193
+
194
+ if stream:
195
+ def stream_wrapper():
196
+ for text_chunk in _generate_content_and_update_history():
197
+ yield {"text": text_chunk} if not raw else text_chunk
198
+ return stream_wrapper()
199
+ else: # Not streaming from the perspective of the caller of `ask`
200
+ full_text_response = ""
201
+ for text_chunk in _generate_content_and_update_history():
202
+ full_text_response += text_chunk
203
+
204
+ # self.last_response and history are updated by the generator's `finally`
205
+ return {"text": full_text_response} if not raw else full_text_response
206
+
207
+ def chat(
208
+ self,
209
+ prompt: str,
210
+ stream: bool = False,
211
+ optimizer: str = None,
212
+ conversationally: bool = False,
213
+ ) -> Union[str, Generator[str, None, None]]:
214
+ """Generates a response from the FreeGemini API.
215
+
216
+ Args:
217
+ prompt (str): The prompt to send to the API.
218
+ stream (bool): Whether to stream the response.
219
+ optimizer (str): Optimizer to use for the prompt.
220
+ conversationally (bool): Whether to generate the prompt conversationally.
221
+
222
+ Returns:
223
+ Union[str, Generator[str, None, None]]: The API response.
224
+ """
225
+ def for_stream_chat():
226
+ gen = self.ask(
227
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
228
+ optimizer=optimizer, conversationally=conversationally
229
+ )
230
+ for response_dict in gen:
231
+ yield self.get_message(response_dict)
232
+
233
+ def for_non_stream_chat():
234
+ response_data = self.ask(
235
+ prompt, stream=False, raw=False, # Ensure ask returns dict
236
+ optimizer=optimizer, conversationally=conversationally
237
+ )
238
+ return self.get_message(response_data)
239
+
240
+ return for_stream_chat() if stream else for_non_stream_chat()
241
+
242
+ def get_message(self, response: dict) -> str:
243
+ assert isinstance(response, dict), "Response should be of dict data-type only"
244
+ return response.get("text", "")
245
+
246
+ if __name__ == "__main__":
247
+ # Example usage
248
+ free_gemini = FreeGemini()
249
+ response = free_gemini.chat("What is the capital of France?", stream=False)
250
+ print(response) # Should print the response from the API
@@ -2,7 +2,7 @@ import os
2
2
  import base64
3
3
  import random
4
4
  import json
5
- from typing import Union, Dict, Any, Optional
5
+ from typing import Union, Dict, Any, Optional, Generator
6
6
  from urllib import response
7
7
 
8
8
  from curl_cffi import CurlError
@@ -240,7 +240,7 @@ class GizAI(Provider):
240
240
  stream: bool = False, # Parameter kept for compatibility but not used
241
241
  optimizer: str = None,
242
242
  conversationally: bool = False,
243
- ) -> str:
243
+ ) -> 'Generator[str, None, None]':
244
244
  """
245
245
  Generates a response from the GizAI API.
246
246
 
@@ -251,7 +251,7 @@ class GizAI(Provider):
251
251
  conversationally (bool): Whether to generate the prompt conversationally.
252
252
 
253
253
  Returns:
254
- str: The API response text.
254
+ Generator[str, None, None]: The API response text as a generator.
255
255
 
256
256
  Examples:
257
257
  >>> ai = GizAI()
@@ -262,7 +262,11 @@ class GizAI(Provider):
262
262
  prompt, stream=False, raw=False,
263
263
  optimizer=optimizer, conversationally=conversationally
264
264
  )
265
- return self.get_message(response_data)
265
+ result = self.get_message(response_data)
266
+ if stream:
267
+ yield result
268
+ else:
269
+ return result
266
270
 
267
271
  def get_message(self, response: Union[dict, str]) -> str:
268
272
  """
@@ -282,4 +286,10 @@ class GizAI(Provider):
282
286
  if isinstance(response, str):
283
287
  return response
284
288
  assert isinstance(response, dict), "Response should be either dict or str"
285
- return response.get("text", "")
289
+ return response.get("text", "")
290
+
291
+ if __name__ == "__main__":
292
+ ai = GizAI()
293
+ response = ai.chat("Hello, how are you?", stream=True)
294
+ for chunk in response:
295
+ print(chunk, end="", flush=True)
@@ -1,8 +1,10 @@
1
- from curl_cffi import CurlError
2
- from curl_cffi.requests import Session
1
+ import cloudscraper
2
+ # from curl_cffi.requests import Session
3
3
  import json
4
4
  from typing import Union, Any, Dict, Generator, Optional, List
5
5
 
6
+ from curl_cffi import CurlError
7
+
6
8
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
7
9
  from webscout.AIbase import Provider
8
10
  from webscout import exceptions
@@ -40,7 +42,7 @@ class GliderAI(Provider):
40
42
  if model not in self.AVAILABLE_MODELS:
41
43
  raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
42
44
 
43
- self.session = Session() # Use curl_cffi Session
45
+ self.session = cloudscraper.create_scraper() # Use cloudscraper Session
44
46
  self.is_conversation = is_conversation
45
47
  self.max_tokens_to_sample = max_tokens
46
48
  self.api_endpoint = "https://glider.so/api/chat"
@@ -52,10 +54,11 @@ class GliderAI(Provider):
52
54
  self.headers = {
53
55
  "accept": "*/*",
54
56
  "accept-language": "en-US,en;q=0.9",
55
- "content-type": "application/json",
57
+ "content-type": "text/plain;charset=UTF-8",
56
58
  "origin": "https://glider.so",
57
59
  "referer": "https://glider.so/",
58
60
  "user-agent": Lit().random(),
61
+ "cookie": "_vcrcs=1.1746977094.3600.NDlmNmM5YWFmNzMxZWUyNzE4ZjBhOTJlZGZlZDU3MGU=.850a77f5f36f60ae5da2f51b55231a54",
59
62
  }
60
63
  self.session.headers.update(self.headers)
61
64
  self.session.proxies = proxies # Assign proxies directly
@@ -115,8 +118,8 @@ class GliderAI(Provider):
115
118
 
116
119
  payload = {
117
120
  "messages": [
118
- {"role": "user", "content": conversation_prompt},
119
- {"role": "system", "content": self.system_prompt}
121
+ {"role": "system", "content": self.system_prompt},
122
+ {"role": "user", "content": conversation_prompt}
120
123
  ],
121
124
  "model": self.model,
122
125
  }
@@ -124,9 +127,9 @@ class GliderAI(Provider):
124
127
  def for_stream():
125
128
  streaming_text = ""
126
129
  try:
130
+ import json
127
131
  response = self.session.post(
128
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout,
129
- impersonate="chrome120" # Add impersonate
132
+ self.api_endpoint, data=json.dumps(payload), stream=True, timeout=self.timeout
130
133
  )
131
134
  response.raise_for_status()
132
135