webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,16 +1,35 @@
1
1
  import requests
2
- import re
3
2
  import json
4
- from webscout.AIutel import Optimizers
5
- from webscout.AIutel import Conversation
6
- from webscout.AIutel import AwesomePrompts, sanitize_stream
7
- from webscout.AIbase import Provider, AsyncProvider
3
+ from typing import Any, Dict, Optional, Union, Generator, List
4
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
5
+ from webscout.AIbase import Provider
8
6
  from webscout import exceptions
9
- from typing import Any, AsyncGenerator, Dict
10
- import httpx
7
+ from webscout.Litlogger import Logger, LogFormat
8
+
9
+ class BLACKBOXAI(Provider):
10
+ """
11
+ BlackboxAI provider for interacting with the Blackbox API.
12
+ Supports synchronous operations with multiple models.
13
+ """
14
+ url = "https://api.blackbox.ai"
15
+ api_endpoint = "https://api.blackbox.ai/api/chat"
16
+
17
+
18
+
19
+ AVAILABLE_MODELS = {
20
+ "deepseek-v3": "deepseek-ai/DeepSeek-V3",
21
+ "deepseek-r1": "deepseek-ai/DeepSeek-R1",
22
+ "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
23
+ "mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501",
24
+ "dbrx-instruct": "databricks/dbrx-instruct",
25
+ "qwq-32b": "Qwen/QwQ-32B-Preview",
26
+ "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
27
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
28
+ "gemini-1.5-flash": "gemini-1.5-flash",
29
+ "gemini-1.5-pro": "gemini-pro",
30
+ "gemini-2.0-flash": "Gemini-Flash-2.0",
31
+ }
11
32
 
12
- #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
13
- class BLACKBOXAI:
14
33
  def __init__(
15
34
  self,
16
35
  is_conversation: bool = True,
@@ -22,51 +41,38 @@ class BLACKBOXAI:
22
41
  proxies: dict = {},
23
42
  history_offset: int = 10250,
24
43
  act: str = None,
25
- model: str = None,
44
+ model: str = "deepseek-ai/DeepSeek-V3",
45
+ logging: bool = False,
46
+ system_message: str = "You are a helpful AI assistant."
26
47
  ):
27
- """Instantiates BLACKBOXAI
28
-
29
- Args:
30
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
31
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
32
- timeout (int, optional): Http request timeout. Defaults to 30.
33
- intro (str, optional): Conversation introductory prompt. Defaults to None.
34
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
35
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
36
- proxies (dict, optional): Http request proxies. Defaults to {}.
37
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
38
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
39
- model (str, optional): Model name. Defaults to "Phind Model".
40
- """
48
+ """Initialize BlackboxAI with enhanced configuration options."""
49
+ self.logger = Logger(
50
+ name="BlackboxAI",
51
+ format=LogFormat.MODERN_EMOJI,
52
+
53
+ ) if logging else None
54
+
41
55
  self.session = requests.Session()
42
56
  self.max_tokens_to_sample = max_tokens
43
57
  self.is_conversation = is_conversation
44
- self.chat_endpoint = "https://api.blackbox.ai/api/chat"
45
- self.stream_chunk_size = 64
46
58
  self.timeout = timeout
47
59
  self.last_response = {}
48
- self.model = model
49
- self.previewToken: str = None
50
- self.userId: str = ""
51
- self.codeModelMode: bool = True
52
- self.id: str = ""
53
- self.agentMode: dict = {}
54
- self.trendingAgentMode: dict = {}
55
- self.isMicMode: bool = False
60
+ self.model = self.get_model(model)
61
+ self.system_message = system_message
56
62
 
57
63
  self.headers = {
58
64
  "Content-Type": "application/json",
59
- "User-Agent": "",
60
65
  "Accept": "*/*",
61
- "Accept-Encoding": "Identity",
62
66
  }
63
67
 
68
+ if self.logger:
69
+ self.logger.info(f"Initializing BlackboxAI with model: {self.model}")
70
+
64
71
  self.__available_optimizers = (
65
- method
66
- for method in dir(Optimizers)
72
+ method for method in dir(Optimizers)
67
73
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
74
  )
69
- self.session.headers.update(self.headers)
75
+
70
76
  Conversation.intro = (
71
77
  AwesomePrompts().get_act(
72
78
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -74,12 +80,62 @@ class BLACKBOXAI:
74
80
  if act
75
81
  else intro or Conversation.intro
76
82
  )
83
+
77
84
  self.conversation = Conversation(
78
85
  is_conversation, self.max_tokens_to_sample, filepath, update_file
79
86
  )
80
87
  self.conversation.history_offset = history_offset
81
88
  self.session.proxies = proxies
82
89
 
90
+ @classmethod
91
+ def get_model(self, model: str) -> str:
92
+ """Resolve model name from alias"""
93
+ if model in self.AVAILABLE_MODELS:
94
+ return self.AVAILABLE_MODELS[model]
95
+ raise ValueError(f"Unknown model: {model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
96
+
97
+ def _make_request(
98
+ self,
99
+ messages: List[Dict[str, str]],
100
+ stream: bool = False
101
+ ) -> Generator[str, None, None]:
102
+ """Make synchronous request to BlackboxAI API."""
103
+ if self.logger:
104
+ self.logger.debug(f"Making request with {len(messages)} messages")
105
+
106
+ data = {
107
+ "messages": messages,
108
+ "model": self.model,
109
+ "max_tokens": self.max_tokens_to_sample
110
+ }
111
+
112
+ try:
113
+ response = self.session.post(
114
+ self.api_endpoint,
115
+ json=data,
116
+ headers=self.headers,
117
+ stream=stream,
118
+ timeout=self.timeout
119
+ )
120
+
121
+ if not response.ok:
122
+ error_msg = f"API request failed: {response.status_code} - {response.text}"
123
+ if self.logger:
124
+ self.logger.error(error_msg)
125
+ raise exceptions.FailedToGenerateResponseError(error_msg)
126
+
127
+ if stream:
128
+ for line in response.iter_lines(decode_unicode=True):
129
+ if line:
130
+ yield line
131
+ else:
132
+ yield response.text
133
+
134
+ except requests.exceptions.RequestException as e:
135
+ if self.logger:
136
+ self.logger.error(f"Request failed: {str(e)}")
137
+ raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
138
+
83
139
  def ask(
84
140
  self,
85
141
  prompt: str,
@@ -87,83 +143,36 @@ class BLACKBOXAI:
87
143
  raw: bool = False,
88
144
  optimizer: str = None,
89
145
  conversationally: bool = False,
90
- ) -> dict:
91
- """Chat with AI
92
-
93
- Args:
94
- prompt (str): Prompt to be send.
95
- stream (bool, optional): Flag for streaming response. Defaults to False.
96
- raw (bool, optional): Stream back raw response as received. Defaults to False.
97
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
98
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
- Returns:
100
- dict : {}
101
- ```json
102
- {
103
- "text" : "print('How may I help you today?')"
104
- }
105
- ```
106
- """
146
+ ) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
147
+ """Send a prompt to BlackboxAI API and return the response."""
148
+ if self.logger:
149
+ self.logger.debug(f"Processing request [stream={stream}]")
150
+
107
151
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
152
  if optimizer:
109
153
  if optimizer in self.__available_optimizers:
110
154
  conversation_prompt = getattr(Optimizers, optimizer)(
111
155
  conversation_prompt if conversationally else prompt
112
156
  )
157
+ if self.logger:
158
+ self.logger.debug(f"Applied optimizer: {optimizer}")
113
159
  else:
114
- raise Exception(
115
- f"Optimizer is not one of {self.__available_optimizers}"
116
- )
160
+ if self.logger:
161
+ self.logger.error(f"Invalid optimizer: {optimizer}")
162
+ raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
117
163
 
118
- self.session.headers.update(self.headers)
119
- payload = {
120
- "messages": [
121
- # json.loads(prev_messages),
122
- {"content": conversation_prompt, "role": "user"}
123
- ],
124
- "id": self.id,
125
- "previewToken": self.previewToken,
126
- "userId": self.userId,
127
- "codeModelMode": self.codeModelMode,
128
- "agentMode": self.agentMode,
129
- "trendingAgentMode": self.trendingAgentMode,
130
- "isMicMode": self.isMicMode,
131
- }
164
+ messages = [
165
+ {"role": "system", "content": self.system_message},
166
+ {"role": "user", "content": conversation_prompt}
167
+ ]
132
168
 
133
169
  def for_stream():
134
- response = self.session.post(
135
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
136
- )
137
- if (
138
- not response.ok
139
- or not response.headers.get("Content-Type")
140
- == "text/plain; charset=utf-8"
141
- ):
142
- raise Exception(
143
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144
- )
145
- streaming_text = ""
146
- for value in response.iter_lines(
147
- decode_unicode=True,
148
- chunk_size=self.stream_chunk_size,
149
-
150
- ):
151
- try:
152
- if bool(value):
153
- streaming_text += value + ("\n" if stream else "")
154
-
155
- resp = dict(text=streaming_text)
156
- self.last_response.update(resp)
157
- yield value if raw else resp
158
- except json.decoder.JSONDecodeError:
159
- pass
160
- self.conversation.update_chat_history(
161
- prompt, self.get_message(self.last_response)
162
- )
170
+ for text in self._make_request(messages, stream=True):
171
+ yield {"text": text}
163
172
 
164
173
  def for_non_stream():
165
- for _ in for_stream():
166
- pass
174
+ response_text = next(self._make_request(messages, stream=False))
175
+ self.last_response = {"text": response_text}
167
176
  return self.last_response
168
177
 
169
178
  return for_stream() if stream else for_non_stream()
@@ -174,20 +183,17 @@ class BLACKBOXAI:
174
183
  stream: bool = False,
175
184
  optimizer: str = None,
176
185
  conversationally: bool = False,
177
- ) -> str:
178
- """Generate response `str`
179
- Args:
180
- prompt (str): Prompt to be send.
181
- stream (bool, optional): Flag for streaming response. Defaults to False.
182
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
183
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184
- Returns:
185
- str: Response generated
186
- """
186
+ ) -> Union[str, Generator[str, None, None]]:
187
+ """Generate response as string."""
188
+ if self.logger:
189
+ self.logger.debug(f"Chat request initiated [stream={stream}]")
187
190
 
188
191
  def for_stream():
189
192
  for response in self.ask(
190
- prompt, True, optimizer=optimizer, conversationally=conversationally
193
+ prompt,
194
+ stream=True,
195
+ optimizer=optimizer,
196
+ conversationally=conversationally
191
197
  ):
192
198
  yield self.get_message(response)
193
199
 
@@ -195,7 +201,7 @@ class BLACKBOXAI:
195
201
  return self.get_message(
196
202
  self.ask(
197
203
  prompt,
198
- False,
204
+ stream=False,
199
205
  optimizer=optimizer,
200
206
  conversationally=conversationally,
201
207
  )
@@ -203,28 +209,21 @@ class BLACKBOXAI:
203
209
 
204
210
  return for_stream() if stream else for_non_stream()
205
211
 
206
- def get_message(self, response: dict) -> str:
207
- """Retrieves message only from response
208
-
209
- Args:
210
- response (dict): Response generated by `self.ask`
211
-
212
- Returns:
213
- str: Message extracted
214
- """
212
+ def get_message(self, response: Dict[str, Any]) -> str:
213
+ """Extract message from response dictionary."""
215
214
  assert isinstance(response, dict), "Response should be of dict data-type only"
216
215
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
217
216
 
218
- # Function to clean the response text
219
- def clean_response(response_text: str) -> str:
220
- # Remove web search results
221
- cleaned_response = re.sub(r'\$~~~\$.*?\$~~~\$', '', response_text, flags=re.DOTALL)
222
- # Remove any remaining special characters or markers
223
- cleaned_response = re.sub(r'\$~~~', '', cleaned_response)
224
- return cleaned_response.strip()
225
- if __name__ == '__main__':
217
+ if __name__ == "__main__":
226
218
  from rich import print
227
- ai = BLACKBOXAI()
228
- response = ai.chat("tell me about india")
229
- for chunk in response:
230
- print(chunk, end="", flush=True)
219
+
220
+ # Example usage
221
+ ai = BLACKBOXAI(model="deepseek-v3", logging=True)
222
+
223
+ try:
224
+ print("Non-streaming response:")
225
+ response = ai.chat("What is quantum computing?")
226
+ print(response)
227
+
228
+ except Exception as e:
229
+ print(f"Error: {str(e)}")
@@ -0,0 +1,226 @@
1
+ from typing import Any, Dict, Generator, Optional
2
+ import requests
3
+ import json
4
+
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+ from webscout.Litlogger import Logger, LogFormat
9
+ from webscout import LitAgent as Lit
10
+
11
+
12
+ class ChatGPTGratis(Provider):
13
+ """
14
+ A class to interact with the chatgptgratis.eu backend API with logging and real-time streaming.
15
+ """
16
+ AVAILABLE_MODELS = [
17
+ "Meta-Llama-3.2-1B-Instruct",
18
+ "Meta-Llama-3.2-3B-Instruct",
19
+ "Meta-Llama-3.1-8B-Instruct",
20
+ "Meta-Llama-3.1-70B-Instruct",
21
+ "Meta-Llama-3.1-405B-Instruct",
22
+ "gpt4o"
23
+
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ model: str = "gpt4o",
29
+ timeout: int = 30,
30
+ logging: bool = False,
31
+ proxies: Optional[Dict[str, str]] = None,
32
+ intro: Optional[str] = None,
33
+ filepath: Optional[str] = None,
34
+ update_file: bool = True,
35
+ history_offset: int = 10250,
36
+ act: Optional[str] = None,
37
+ ) -> None:
38
+ """
39
+ Initializes the ChatGPTGratis.
40
+ """
41
+ if model not in self.AVAILABLE_MODELS:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
+
44
+ self.logger = Logger(
45
+ name="ChatGPTGratis",
46
+ format=LogFormat.MODERN_EMOJI,
47
+ ) if logging else None
48
+
49
+ if self.logger:
50
+ self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
51
+
52
+ self.session = requests.Session()
53
+ self.timeout = timeout
54
+ self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
55
+ self.model = model
56
+
57
+ # Set up headers similar to a browser request with dynamic User-Agent
58
+ self.headers = {
59
+ "Accept": "*/*",
60
+ "Content-Type": "application/json",
61
+ "Origin": "https://chatgptgratis.eu",
62
+ "Referer": "https://chatgptgratis.eu/chat.html",
63
+ "User-Agent": Lit().random(),
64
+ }
65
+ self.session.headers.update(self.headers)
66
+ self.session.proxies = proxies or {}
67
+
68
+ # Set up conversation history and prompts
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+ self.conversation = Conversation(
77
+ True, 8096, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+
81
+ if self.logger:
82
+ self.logger.info("ChatGPTGratis initialized successfully.")
83
+
84
+ def ask(
85
+ self,
86
+ prompt: str,
87
+ stream: bool = False,
88
+ raw: bool = False,
89
+ optimizer: Optional[str] = None,
90
+ conversationally: bool = False,
91
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
92
+ """
93
+ Sends a request to the API and returns the response.
94
+ If stream is True, yields response chunks as they are received.
95
+ """
96
+ if self.logger:
97
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
98
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
99
+
100
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
+ if optimizer:
102
+ available_opts = (
103
+ method for method in dir(Optimizers)
104
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
105
+ )
106
+ if optimizer in available_opts:
107
+ conversation_prompt = getattr(Optimizers, optimizer)(
108
+ conversation_prompt if conversationally else prompt
109
+ )
110
+ if self.logger:
111
+ self.logger.debug(f"Applied optimizer: {optimizer}")
112
+ else:
113
+ if self.logger:
114
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
115
+ raise Exception(f"Optimizer is not one of {list(available_opts)}")
116
+
117
+ payload = {
118
+ "message": conversation_prompt,
119
+ "model": self.model,
120
+
121
+ }
122
+
123
+ def for_stream() -> Generator[Dict[str, Any], None, None]:
124
+ if self.logger:
125
+ self.logger.debug("Initiating streaming request to API")
126
+ response = self.session.post(
127
+ self.api_endpoint,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self.timeout
131
+ )
132
+ if not response.ok:
133
+ if self.logger:
134
+ self.logger.error(
135
+ f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
136
+ )
137
+ raise exceptions.FailedToGenerateResponseError(
138
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
139
+ )
140
+ if self.logger:
141
+ self.logger.info(f"API connection established. Status: {response.status_code}")
142
+
143
+ full_response = ""
144
+ for line in response.iter_lines():
145
+ if line:
146
+ line_decoded = line.decode('utf-8').strip()
147
+ if line_decoded == "data: [DONE]":
148
+ if self.logger:
149
+ self.logger.debug("Stream completed.")
150
+ break
151
+ if line_decoded.startswith("data: "):
152
+ try:
153
+ json_data = json.loads(line_decoded[6:])
154
+ choices = json_data.get("choices", [])
155
+ if choices and "delta" in choices[0]:
156
+ content = choices[0]["delta"].get("content", "")
157
+ else:
158
+ content = ""
159
+ full_response += content
160
+ yield content if raw else {"text": content}
161
+ except json.JSONDecodeError as e:
162
+ if self.logger:
163
+ self.logger.error(f"JSON parsing error: {str(e)}")
164
+ continue
165
+ # Update last response and conversation history.
166
+ self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
167
+ if self.logger:
168
+ self.logger.debug("Response processing completed.")
169
+
170
+ def for_non_stream() -> Dict[str, Any]:
171
+ if self.logger:
172
+ self.logger.debug("Processing non-streaming request")
173
+ collected = ""
174
+ for chunk in for_stream():
175
+ collected += chunk["text"] if isinstance(chunk, dict) else chunk
176
+ return {"text": collected}
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def chat(
181
+ self,
182
+ prompt: str,
183
+ stream: bool = False,
184
+ optimizer: Optional[str] = None,
185
+ conversationally: bool = False,
186
+ ) -> str | Generator[str, None, None]:
187
+ """
188
+ Returns the response as a string.
189
+ For streaming requests, yields each response chunk as a string.
190
+ """
191
+ if self.logger:
192
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
193
+
194
+ def stream_response() -> Generator[str, None, None]:
195
+ for response in self.ask(
196
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
197
+ ):
198
+ yield self.get_message(response)
199
+
200
+ def non_stream_response() -> str:
201
+ return self.get_message(self.ask(
202
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
203
+ ))
204
+
205
+ return stream_response() if stream else non_stream_response()
206
+
207
+ def get_message(self, response: dict) -> str:
208
+ """
209
+ Extracts and returns the text message from the response dictionary.
210
+ """
211
+ assert isinstance(response, dict), "Response must be a dictionary."
212
+ return response.get("text", "")
213
+
214
+
215
+ if __name__ == "__main__":
216
+ from rich import print
217
+
218
+ # Create an instance of the ChatGPTGratis with logging enabled for testing.
219
+ client = ChatGPTGratis(
220
+ model="Meta-Llama-3.2-1B-Instruct",
221
+ logging=False
222
+ )
223
+ prompt_input = input(">>> ")
224
+ response = client.chat(prompt_input, stream=True)
225
+ for chunk in response:
226
+ print(chunk, end="", flush=True)