webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/C4ai.py DELETED
@@ -1,432 +0,0 @@
1
- import requests
2
- import uuid
3
- import json
4
- import time
5
- import random
6
- import re
7
- from typing import Any, Dict, List, Optional, Union, Generator
8
-
9
- from webscout.AIutel import Conversation
10
- from webscout.AIbase import Provider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent
13
-
14
- class C4ai(Provider):
15
- """
16
- A class to interact with the Hugging Face Chat API.
17
- """
18
- # Default available models
19
- AVAILABLE_MODELS = [
20
- 'command-a-03-2025',
21
- 'command-r-plus-08-2024',
22
- 'command-r-08-2024',
23
- 'command-r-plus',
24
- 'command-r',
25
- 'command-r7b-12-2024',
26
- 'command-r7b-arabic-02-2025'
27
- ] # Placeholder for available models, It will be updated in the constructor
28
-
29
- def __repr__(self) -> str:
30
- return f"C4ai({self.model})"
31
-
32
- def __init__(
33
- self,
34
- is_conversation: bool = True,
35
- max_tokens: int = 2000,
36
- timeout: int = 60,
37
- filepath: str = None,
38
- update_file: bool = True,
39
- proxies: dict = {},
40
- model: str = "command-a-03-2025",
41
- system_prompt: str = "You are a helpful assistant.",
42
- ):
43
- """Initialize the C4ai client."""
44
- self.url = "https://cohereforai-c4ai-command.hf.space"
45
- self.session = requests.Session()
46
- self.session.proxies.update(proxies)
47
-
48
- # Set up headers for all requests
49
- self.headers = {
50
- "Content-Type": "application/json",
51
- "User-Agent": LitAgent().random(),
52
- "Accept": "*/*",
53
- "Accept-Encoding": "gzip, deflate, br, zstd",
54
- "Accept-Language": "en-US,en;q=0.9",
55
- "Origin": "https://cohereforai-c4ai-command.hf.space",
56
- "Referer": "https://cohereforai-c4ai-command.hf.space/",
57
- "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
58
- "Sec-Ch-Ua-Mobile": "?0",
59
- "Sec-Ch-Ua-Platform": "\"Windows\"",
60
- "Sec-Fetch-Dest": "empty",
61
- "Sec-Fetch-Mode": "cors",
62
- "Sec-Fetch-Site": "same-origin",
63
- "DNT": "1",
64
- "Priority": "u=1, i"
65
- }
66
-
67
- # Update available models
68
- self.update_available_models()
69
-
70
- # Set default model if none provided
71
- self.model = model
72
- # Provider settings
73
- self.is_conversation = is_conversation
74
- self.max_tokens_to_sample = max_tokens
75
- self.timeout = timeout
76
- self.last_response = {}
77
-
78
- # Initialize a simplified conversation history for file saving only
79
- self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
80
-
81
- # Store conversation data for different models
82
- self._conversation_data = {}
83
- self.preprompt = system_prompt
84
-
85
- def update_available_models(self):
86
- """Update the available models list from HuggingFace"""
87
- try:
88
- models = self.get_models()
89
- if models and len(models) > 0:
90
- self.AVAILABLE_MODELS = models
91
- except Exception:
92
- # Fallback to default models list if fetching fails
93
- pass
94
-
95
- @classmethod
96
- def get_models(cls):
97
- """Fetch available models from HuggingFace."""
98
- try:
99
- response = requests.get("https://cohereforai-c4ai-command.hf.space/")
100
- text = response.text
101
- models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
102
-
103
- if not models_match:
104
- return cls.AVAILABLE_MODELS
105
-
106
- models_text = models_match.group(1)
107
- models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
108
- models_text = models_text.replace('void 0', 'null')
109
-
110
- def add_quotation_mark(match):
111
- return f'{match.group(1)}"{match.group(2)}":'
112
-
113
- models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
114
-
115
- models_data = json.loads(models_text)
116
- # print([model["id"] for model in models_data])
117
- return [model["id"] for model in models_data]
118
- except Exception:
119
- return cls.AVAILABLE_MODELS
120
-
121
- def create_conversation(self, model: str):
122
- """Create a new conversation with the specified model."""
123
- url = "https://cohereforai-c4ai-command.hf.space/conversation"
124
- payload = {"model": model, "preprompt": self.preprompt,}
125
-
126
- # Update referer for this specific request
127
- headers = self.headers.copy()
128
- headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/"
129
-
130
- try:
131
- response = self.session.post(url, json=payload, headers=headers)
132
-
133
- if response.status_code == 401:
134
- raise exceptions.AuthenticationError("Authentication failed.")
135
-
136
- # Handle other error codes
137
- if response.status_code != 200:
138
- return None
139
-
140
- data = response.json()
141
- conversation_id = data.get("conversationId")
142
-
143
- # Store conversation data
144
- if model not in self._conversation_data:
145
- self._conversation_data[model] = {
146
- "conversationId": conversation_id,
147
- "messageId": str(uuid.uuid4()) # Initial message ID
148
- }
149
-
150
- return conversation_id
151
- except requests.exceptions.RequestException:
152
- return None
153
-
154
- def fetch_message_id(self, conversation_id: str) -> str:
155
- """Fetch the latest message ID for a conversation."""
156
- try:
157
- url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
158
- response = self.session.get(url, headers=self.headers)
159
- response.raise_for_status()
160
-
161
- # Parse the JSON data from the response
162
- json_data = None
163
- for line in response.text.split('\n'):
164
- if line.strip():
165
- try:
166
- parsed = json.loads(line)
167
- if isinstance(parsed, dict) and "nodes" in parsed:
168
- json_data = parsed
169
- break
170
- except json.JSONDecodeError:
171
- continue
172
-
173
- if not json_data:
174
- # Fall back to a UUID if we can't parse the response
175
- return str(uuid.uuid4())
176
-
177
- # Extract message ID using the same pattern as in the example
178
- if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
179
- return str(uuid.uuid4())
180
-
181
- data = json_data["nodes"][1]["data"]
182
- keys = data[data[0]["messages"]]
183
- message_keys = data[keys[-1]]
184
- message_id = data[message_keys["id"]]
185
-
186
- return message_id
187
-
188
- except Exception:
189
- # Fall back to a UUID if there's an error
190
- return str(uuid.uuid4())
191
-
192
- def generate_boundary(self):
193
- """Generate a random boundary for multipart/form-data requests"""
194
- boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
195
- boundary = "----WebKitFormBoundary"
196
- boundary += "".join(random.choice(boundary_chars) for _ in range(16))
197
- return boundary
198
-
199
- def process_response(self, response, prompt: str):
200
- """Process streaming response and extract content."""
201
- full_text = ""
202
- sources = None
203
- reasoning_text = ""
204
- has_reasoning = False
205
-
206
- for line in response.iter_lines(decode_unicode=True):
207
- if not line:
208
- continue
209
-
210
- try:
211
- # Parse each line as JSON
212
- data = json.loads(line)
213
-
214
- # Handle different response types
215
- if "type" not in data:
216
- continue
217
-
218
- if data["type"] == "stream" and "token" in data:
219
- token = data["token"].replace("\u0000", "")
220
- full_text += token
221
- resp = {"text": token}
222
- yield resp
223
- elif data["type"] == "finalAnswer":
224
- final_text = data.get("text", "")
225
- if final_text and not full_text:
226
- full_text = final_text
227
- resp = {"text": final_text}
228
- yield resp
229
- elif data["type"] == "webSearch" and "sources" in data:
230
- sources = data["sources"]
231
- elif data["type"] == "reasoning":
232
- has_reasoning = True
233
- if data.get("subtype") == "stream" and "token" in data:
234
- reasoning_text += data["token"]
235
- # elif data.get("subtype") == "status":
236
- # # For status updates in reasoning, we can just append them as a comment
237
- # if data.get("status"):
238
- # reasoning_text += f"\n# {data['status']}"
239
-
240
- # If we have reasoning, prepend it to the next text output
241
- if reasoning_text and not full_text:
242
- resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
243
- yield resp
244
-
245
- except json.JSONDecodeError:
246
- continue
247
-
248
- # Update conversation history only for saving to file if needed
249
- if full_text and self.conversation.file:
250
- if has_reasoning:
251
- full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
252
- self.last_response = {"text": full_text_with_reasoning}
253
- self.conversation.update_chat_history(prompt, full_text_with_reasoning)
254
- else:
255
- self.last_response = {"text": full_text}
256
- self.conversation.update_chat_history(prompt, full_text)
257
-
258
- return full_text
259
-
260
- def ask(
261
- self,
262
- prompt: str,
263
- stream: bool = False,
264
- raw: bool = False,
265
- optimizer: str = None,
266
- conversationally: bool = False,
267
- web_search: bool = False,
268
- ) -> Union[Dict[str, Any], Generator]:
269
- """Send a message to the HuggingFace Chat API"""
270
- model = self.model
271
-
272
- # Check if we have a conversation for this model
273
- if model not in self._conversation_data:
274
- conversation_id = self.create_conversation(model)
275
- if not conversation_id:
276
- raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
277
- else:
278
- conversation_id = self._conversation_data[model]["conversationId"]
279
- # Refresh message ID
280
- self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
281
-
282
- url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
283
- message_id = self._conversation_data[model]["messageId"]
284
-
285
- # Data to send - use the prompt directly without generating a complete prompt
286
- # since HuggingFace maintains conversation state internally
287
- request_data = {
288
- "inputs": prompt,
289
- "id": message_id,
290
- "is_retry": False,
291
- "is_continue": False,
292
- "web_search": web_search,
293
- "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
294
- }
295
-
296
- # Update headers for this specific request
297
- headers = self.headers.copy()
298
- headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
299
-
300
- # Create multipart form data
301
- boundary = self.generate_boundary()
302
- multipart_headers = headers.copy()
303
- multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
304
-
305
- # Serialize the data to JSON
306
- data_json = json.dumps(request_data, separators=(',', ':'))
307
-
308
- # Create the multipart form data body
309
- body = f"--{boundary}\r\n"
310
- body += f'Content-Disposition: form-data; name="data"\r\n'
311
- body += f"Content-Type: application/json\r\n\r\n"
312
- body += f"{data_json}\r\n"
313
- body += f"--{boundary}--\r\n"
314
-
315
- multipart_headers["Content-Length"] = str(len(body))
316
-
317
- def for_stream():
318
- try:
319
- # Try with multipart/form-data first
320
- response = None
321
- try:
322
- response = self.session.post(
323
- url,
324
- data=body,
325
- headers=multipart_headers,
326
- stream=True,
327
- timeout=self.timeout
328
- )
329
- except requests.exceptions.RequestException:
330
- pass
331
-
332
- # If multipart fails or returns error, try with regular JSON
333
- if not response or response.status_code != 200:
334
- response = self.session.post(
335
- url,
336
- json=request_data,
337
- headers=headers,
338
- stream=True,
339
- timeout=self.timeout
340
- )
341
-
342
- # If both methods fail, raise exception
343
- if response.status_code != 200:
344
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
345
-
346
- # Process the streaming response
347
- yield from self.process_response(response, prompt)
348
-
349
- except Exception as e:
350
- if isinstance(e, requests.exceptions.RequestException):
351
- if hasattr(e, 'response') and e.response is not None:
352
- status_code = e.response.status_code
353
- if status_code == 401:
354
- raise exceptions.AuthenticationError("Authentication failed.")
355
-
356
- # Try another model if current one fails
357
- if len(self.AVAILABLE_MODELS) > 1:
358
- current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
359
- next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
360
- self.model = self.AVAILABLE_MODELS[next_model_index]
361
-
362
- # Create new conversation with the alternate model
363
- conversation_id = self.create_conversation(self.model)
364
- if conversation_id:
365
- # Try again with the new model
366
- yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
367
- conversationally=conversationally, web_search=web_search)
368
- return
369
-
370
- # If we get here, all models failed
371
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
372
-
373
- def for_non_stream():
374
- response_text = ""
375
- for response in for_stream():
376
- if "text" in response:
377
- response_text += response["text"]
378
- self.last_response = {"text": response_text}
379
- return self.last_response
380
-
381
- return for_stream() if stream else for_non_stream()
382
-
383
- def chat(
384
- self,
385
- prompt: str,
386
- stream: bool = False,
387
- optimizer: str = None,
388
- conversationally: bool = False,
389
- web_search: bool = False
390
- ) -> Union[str, Generator]:
391
- """Generate a response to a prompt"""
392
- def for_stream():
393
- for response in self.ask(
394
- prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
395
- ):
396
- yield self.get_message(response)
397
-
398
- def for_non_stream():
399
- return self.get_message(
400
- self.ask(
401
- prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
402
- )
403
- )
404
-
405
- return for_stream() if stream else for_non_stream()
406
-
407
- def get_message(self, response: dict) -> str:
408
- """Extract message text from response"""
409
- assert isinstance(response, dict), "Response should be of dict data-type only"
410
- return response.get("text", "")
411
-
412
- if __name__ == "__main__":
413
- print("-" * 80)
414
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
415
- print("-" * 80)
416
-
417
- for model in C4ai.AVAILABLE_MODELS:
418
- try:
419
- test_ai = C4ai(model=model, timeout=60)
420
- response = test_ai.chat("Say 'Hello' in one word")
421
- response_text = response
422
-
423
- if response_text and len(response_text.strip()) > 0:
424
- status = "✓"
425
- # Truncate response if too long
426
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
427
- else:
428
- status = "✗"
429
- display_text = "Empty or invalid response"
430
- print(f"{model:<50} {status:<10} {display_text}")
431
- except Exception as e:
432
- print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -1,237 +0,0 @@
1
- import requests
2
- import re
3
- import json
4
- import os
5
- from typing import Union, List, Dict
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- from webscout.litagent import LitAgent
10
- class ChatGPTES(Provider):
11
- """
12
- A class to interact with the ChatGPT.es API.
13
- """
14
-
15
- SUPPORTED_MODELS = {
16
- 'gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest'
17
- }
18
-
19
- def __init__(
20
- self,
21
- is_conversation: bool = True,
22
- max_tokens: int = 600,
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- model: str = "chatgpt-4o-latest", # Default model
31
- system_prompt: str = "You are a helpful assistant.",
32
- ):
33
- """
34
- Initializes the ChatGPT.es API with given parameters.
35
- """
36
- if model not in self.SUPPORTED_MODELS:
37
- raise ValueError(f"Unsupported model: {model}. Choose from: {self.SUPPORTED_MODELS}")
38
-
39
- self.session = requests.Session()
40
- self.is_conversation = is_conversation
41
- self.max_tokens_to_sample = max_tokens
42
- self.api_endpoint = 'https://chatgpt.es/wp-admin/admin-ajax.php'
43
- self.stream_chunk_size = 64
44
- self.timeout = timeout
45
- self.last_response = {}
46
- self.system_prompt = system_prompt
47
- self.model = model
48
- self.initial_headers = {
49
- 'User-Agent': LitAgent().random(),
50
- 'Referer': 'https://www.google.com/',
51
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
52
- 'image/avif,image/webp,image/apng,*/*;q=0.8,'
53
- 'application/signed-exchange;v=b3;q=0.7',
54
- }
55
- self.post_headers = {
56
- 'User-Agent': self.initial_headers['User-Agent'],
57
- 'Referer': 'https://chatgpt.es/',
58
- 'Origin': 'https://chatgpt.es',
59
- 'Accept': '*/*',
60
- 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
61
- }
62
- self.nonce = None
63
- self.post_id = None
64
-
65
- self.__available_optimizers = (
66
- method
67
- for method in dir(Optimizers)
68
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
69
- )
70
-
71
- # Conversation setup
72
- Conversation.intro = (
73
- AwesomePrompts().get_act(
74
- act, raise_not_found=True, default=None, case_insensitive=True
75
- )
76
- if act
77
- else intro or Conversation.intro
78
- )
79
- self.conversation = Conversation(
80
- is_conversation, self.max_tokens_to_sample, filepath, update_file
81
- )
82
- self.conversation.history_offset = history_offset
83
- self.session.proxies = proxies
84
-
85
- def get_nonce_and_post_id(self):
86
- """
87
- Retrieves the nonce and post ID from the ChatGPT.es website.
88
- """
89
- try:
90
- response = self.session.get('https://chatgpt.es/', headers=self.initial_headers, timeout=self.timeout)
91
- response.raise_for_status()
92
- except requests.RequestException as e:
93
- raise ConnectionError(f"Failed to retrieve nonce and post_id: {e}")
94
-
95
- nonce_match = re.search(r'data-nonce="(.+?)"', response.text)
96
- post_id_match = re.search(r'data-post-id="(.+?)"', response.text)
97
-
98
- if not nonce_match or not post_id_match:
99
- raise ValueError("Failed to parse nonce or post_id from the response.")
100
-
101
- self.nonce = nonce_match.group(1)
102
- self.post_id = post_id_match.group(1)
103
-
104
- def ask(
105
- self,
106
- prompt: str,
107
- stream: bool = False,
108
- raw: bool = False,
109
- optimizer: str = None,
110
- conversationally: bool = False,
111
- ) -> dict:
112
- """
113
- Chat with ChatGPT.es
114
-
115
- Args:
116
- prompt (str): Prompt to be sent.
117
- stream (bool, optional): Flag for streaming response. Defaults to False.
118
- raw (bool, optional): Stream back raw response as received. Defaults to False.
119
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
120
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
121
-
122
- Returns:
123
- dict: Response dictionary.
124
- """
125
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
126
- if optimizer:
127
- if optimizer in self.__available_optimizers:
128
- optimizer_func = getattr(Optimizers, optimizer)
129
- conversation_prompt = optimizer_func(
130
- conversation_prompt if conversationally else prompt
131
- )
132
- else:
133
- raise ValueError(f"Optimizer '{optimizer}' is not supported. "
134
- f"Available optimizers: {list(self.__available_optimizers)}")
135
-
136
- # Retrieve nonce and post_id if they are not set
137
- if not self.nonce or not self.post_id:
138
- self.get_nonce_and_post_id()
139
-
140
- messages = [
141
- {"role": "user", "content": conversation_prompt},
142
- ]
143
-
144
- # Prepare conversation history
145
- conversation = ["Human: strictly respond in the same language as my prompt, preferably English"]
146
- for msg in messages:
147
- role = "Human" if msg['role'] == "user" else "AI"
148
- conversation.append(f"{role}: {msg['content']}")
149
-
150
- payload = {
151
- '_wpnonce': self.nonce,
152
- 'post_id': self.post_id,
153
- 'url': 'https://chatgpt.es',
154
- 'action': 'wpaicg_chat_shortcode_message',
155
- 'message': messages[-1]['content'],
156
- 'bot_id': '0',
157
- 'chatbot_identity': 'shortcode',
158
- 'wpaicg_chat_client_id': os.urandom(5).hex(),
159
- 'wpaicg_chat_history': json.dumps(conversation)
160
- }
161
-
162
- try:
163
- response = self.session.post(
164
- self.api_endpoint,
165
- headers=self.post_headers,
166
- data=payload,
167
- timeout=self.timeout
168
- )
169
- response.raise_for_status()
170
- except requests.RequestException as e:
171
- raise ConnectionError(f"Failed to send request to ChatGPT.es: {e}")
172
-
173
- try:
174
- response_data = response.json()
175
- except json.JSONDecodeError:
176
- raise ValueError(f"Invalid JSON response: {response.text}")
177
-
178
- if not isinstance(response_data, dict):
179
- raise TypeError(f"Expected response_data to be a dict, got {type(response_data)}")
180
-
181
- # Extract the message directly from the 'data' key
182
- message = response_data.get('data')
183
- if not isinstance(message, str):
184
- raise KeyError("Missing 'data' key in response or 'data' is not a string")
185
-
186
- self.last_response.update(dict(text=message))
187
- self.conversation.update_chat_history(
188
- prompt, self.get_message(self.last_response)
189
- )
190
- return self.last_response
191
-
192
- def chat(
193
- self,
194
- prompt: str,
195
- stream: bool = False,
196
- optimizer: str = None,
197
- conversationally: bool = False,
198
- ) -> str:
199
- """
200
- Generate response as a string.
201
-
202
- Args:
203
- prompt (str): Prompt to be sent.
204
- stream (bool, optional): Flag for streaming response. Defaults to False.
205
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
206
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
207
-
208
- Returns:
209
- str: Response generated.
210
- """
211
- response = self.ask(
212
- prompt,
213
- stream=stream,
214
- optimizer=optimizer,
215
- conversationally=conversationally,
216
- )
217
- return self.get_message(response)
218
-
219
- def get_message(self, response: dict) -> str:
220
- """
221
- Retrieves message only from response.
222
-
223
- Args:
224
- response (dict): Response generated by `self.ask`.
225
-
226
- Returns:
227
- str: Message extracted.
228
- """
229
- assert isinstance(response, dict), "Response should be of dict data-type only"
230
- return response["text"]
231
-
232
- if __name__ == "__main__":
233
- ai = ChatGPTES()
234
- response = ai.chat(input(">>> "))
235
- for chunk in response:
236
- print(chunk, end="", flush=True)
237
-