webscout 7.6__py3-none-any.whl → 7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (124) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +14 -11
  3. webscout/DWEBS.py +431 -415
  4. webscout/Extra/autocoder/autocoder_utiles.py +183 -47
  5. webscout/Extra/autocoder/rawdog.py +848 -649
  6. webscout/Extra/gguf.py +682 -652
  7. webscout/Provider/AI21.py +1 -1
  8. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  9. webscout/Provider/AISEARCH/ISou.py +2 -23
  10. webscout/Provider/AISEARCH/felo_search.py +6 -6
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/Aitopia.py +292 -0
  13. webscout/Provider/AllenAI.py +5 -22
  14. webscout/Provider/Andi.py +3 -3
  15. webscout/Provider/C4ai.py +1 -1
  16. webscout/Provider/ChatGPTClone.py +226 -0
  17. webscout/Provider/ChatGPTES.py +3 -5
  18. webscout/Provider/ChatGPTGratis.py +4 -4
  19. webscout/Provider/Chatify.py +2 -2
  20. webscout/Provider/Cloudflare.py +3 -2
  21. webscout/Provider/DARKAI.py +3 -2
  22. webscout/Provider/DeepSeek.py +2 -2
  23. webscout/Provider/Deepinfra.py +1 -1
  24. webscout/Provider/EDITEE.py +1 -1
  25. webscout/Provider/ElectronHub.py +178 -96
  26. webscout/Provider/ExaChat.py +310 -0
  27. webscout/Provider/Free2GPT.py +2 -2
  28. webscout/Provider/Gemini.py +5 -19
  29. webscout/Provider/GithubChat.py +1 -1
  30. webscout/Provider/Glider.py +12 -8
  31. webscout/Provider/Groq.py +3 -3
  32. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  33. webscout/Provider/HeckAI.py +1 -1
  34. webscout/Provider/HuggingFaceChat.py +1 -1
  35. webscout/Provider/Hunyuan.py +272 -0
  36. webscout/Provider/Jadve.py +3 -3
  37. webscout/Provider/Koboldai.py +3 -3
  38. webscout/Provider/LambdaChat.py +391 -0
  39. webscout/Provider/Llama.py +3 -5
  40. webscout/Provider/Llama3.py +4 -12
  41. webscout/Provider/Marcus.py +3 -3
  42. webscout/Provider/OLLAMA.py +260 -36
  43. webscout/Provider/Openai.py +7 -3
  44. webscout/Provider/PI.py +1 -1
  45. webscout/Provider/Perplexitylabs.py +1 -1
  46. webscout/Provider/Phind.py +1 -1
  47. webscout/Provider/PizzaGPT.py +1 -1
  48. webscout/Provider/QwenLM.py +4 -7
  49. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +21 -46
  50. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +37 -49
  51. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  52. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  53. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  54. webscout/Provider/TTI/__init__.py +3 -1
  55. webscout/Provider/TTI/artbit/async_artbit.py +4 -33
  56. webscout/Provider/TTI/artbit/sync_artbit.py +4 -32
  57. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  58. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  59. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  60. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  61. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  62. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  63. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  65. webscout/Provider/TTS/utils.py +1 -1
  66. webscout/Provider/TeachAnything.py +1 -1
  67. webscout/Provider/TextPollinationsAI.py +4 -4
  68. webscout/Provider/TwoAI.py +1 -2
  69. webscout/Provider/Venice.py +4 -2
  70. webscout/Provider/VercelAI.py +234 -0
  71. webscout/Provider/WebSim.py +228 -0
  72. webscout/Provider/WiseCat.py +10 -12
  73. webscout/Provider/Youchat.py +1 -1
  74. webscout/Provider/__init__.py +22 -1
  75. webscout/Provider/ai4chat.py +1 -1
  76. webscout/Provider/aimathgpt.py +2 -6
  77. webscout/Provider/akashgpt.py +1 -1
  78. webscout/Provider/askmyai.py +4 -4
  79. webscout/Provider/asksteve.py +203 -0
  80. webscout/Provider/bagoodex.py +2 -2
  81. webscout/Provider/cerebras.py +1 -1
  82. webscout/Provider/chatglm.py +4 -4
  83. webscout/Provider/cleeai.py +1 -0
  84. webscout/Provider/copilot.py +427 -415
  85. webscout/Provider/elmo.py +1 -1
  86. webscout/Provider/flowith.py +14 -3
  87. webscout/Provider/freeaichat.py +57 -31
  88. webscout/Provider/gaurish.py +3 -5
  89. webscout/Provider/geminiprorealtime.py +1 -1
  90. webscout/Provider/granite.py +4 -4
  91. webscout/Provider/hermes.py +5 -5
  92. webscout/Provider/julius.py +1 -1
  93. webscout/Provider/koala.py +1 -1
  94. webscout/Provider/labyrinth.py +239 -0
  95. webscout/Provider/learnfastai.py +28 -15
  96. webscout/Provider/lepton.py +1 -1
  97. webscout/Provider/llama3mitril.py +4 -4
  98. webscout/Provider/llamatutor.py +1 -1
  99. webscout/Provider/llmchat.py +3 -3
  100. webscout/Provider/meta.py +1 -1
  101. webscout/Provider/multichat.py +10 -10
  102. webscout/Provider/promptrefine.py +1 -1
  103. webscout/Provider/searchchat.py +293 -0
  104. webscout/Provider/sonus.py +208 -0
  105. webscout/Provider/talkai.py +2 -2
  106. webscout/Provider/turboseek.py +1 -1
  107. webscout/Provider/tutorai.py +1 -1
  108. webscout/Provider/typegpt.py +6 -43
  109. webscout/Provider/uncovr.py +299 -0
  110. webscout/Provider/x0gpt.py +1 -1
  111. webscout/__init__.py +36 -36
  112. webscout/cli.py +293 -283
  113. webscout/litagent/agent.py +14 -9
  114. webscout/tempid.py +11 -11
  115. webscout/utils.py +2 -2
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +1282 -1223
  118. webscout/webscout_search_async.py +813 -692
  119. {webscout-7.6.dist-info → webscout-7.8.dist-info}/METADATA +76 -44
  120. {webscout-7.6.dist-info → webscout-7.8.dist-info}/RECORD +124 -106
  121. {webscout-7.6.dist-info → webscout-7.8.dist-info}/LICENSE.md +0 -0
  122. {webscout-7.6.dist-info → webscout-7.8.dist-info}/WHEEL +0 -0
  123. {webscout-7.6.dist-info → webscout-7.8.dist-info}/entry_points.txt +0 -0
  124. {webscout-7.6.dist-info → webscout-7.8.dist-info}/top_level.txt +0 -0
webscout/Provider/elmo.py CHANGED
@@ -3,7 +3,7 @@ from webscout.AIutel import Optimizers
3
3
  from webscout.AIutel import Conversation
4
4
  from webscout.AIutel import AwesomePrompts
5
5
  from webscout.AIbase import Provider
6
- from webscout import LitAgent
6
+ from webscout.litagent import LitAgent
7
7
 
8
8
  class Elmo(Provider):
9
9
  """
@@ -10,7 +10,7 @@ from webscout.AIutel import Conversation
10
10
  from webscout.AIutel import AwesomePrompts, sanitize_stream
11
11
  from webscout.AIbase import Provider, AsyncProvider
12
12
  from webscout import exceptions
13
- from webscout import LitAgent
13
+ from webscout.litagent import LitAgent
14
14
 
15
15
  class Flowith(Provider):
16
16
  """
@@ -109,6 +109,17 @@ class Flowith(Provider):
109
109
 
110
110
  return text.strip()
111
111
 
112
+ def decode_response(self, content):
113
+ """Try to decode the response content using multiple encodings."""
114
+ encodings = ['utf-8', 'latin1', 'iso-8859-1', 'cp1252']
115
+ for encoding in encodings:
116
+ try:
117
+ return content.decode(encoding)
118
+ except UnicodeDecodeError:
119
+ continue
120
+ # If all encodings fail, try to decode with 'latin1' as it can decode any byte
121
+ return content.decode('latin1')
122
+
112
123
  def ask(
113
124
  self,
114
125
  prompt: str,
@@ -143,8 +154,8 @@ class Flowith(Provider):
143
154
  f"Request failed with status code {response.status_code}"
144
155
  )
145
156
 
146
- # Get the response text directly
147
- response_text = response.text.strip()
157
+ # Get the response text using our multi-encoding decoder
158
+ response_text = self.decode_response(response.content).strip()
148
159
 
149
160
  # Clean the response
150
161
  cleaned_text = self.clean_response(response_text)
@@ -8,39 +8,61 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
- from webscout import LitAgent
11
+ from webscout.litagent import LitAgent
12
12
 
13
13
  class FreeAIChat(Provider):
14
14
  """
15
- A class to interact with the FreeAIChat API with LitAgent user-agent.
15
+ A class to interact with the FreeAIChat API
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "mistral-nemo",
20
- "mistral-large",
21
- "gemini-2.0-flash",
22
- "gemini-1.5-pro",
23
- "gemini-1.5-flash",
24
- "gemini-2.0-pro-exp-02-05",
25
- # "deepseek-r1", >>>> NOT WORKING
26
- "deepseek-v3",
27
- # "Deepseek r1 14B", >>>> NOT WORKING
28
- # "Deepseek r1 32B", >>>> NOT WORKING
29
- "o3-mini-high",
30
- "o3-mini-medium",
31
- "o3-mini-low",
32
- "o3-mini",
33
- "GPT-4o-mini",
34
- "o1",
35
- "o1-mini",
36
- "GPT-4o",
37
- "Qwen coder",
38
- # "Qwen 2.5 72B", >>>> NOT WORKING
39
- "Llama 3.1 405B",
40
- # "llama3.1-70b-fast", >>>> NOT WORKING
41
- # "Llama 3.3 70B", >>>> NOT WORKING
42
- "claude 3.5 haiku",
19
+ # OpenAI Models
20
+ "GPT 4o",
21
+ "GPT 4o Latest",
22
+ "GPT 4o mini",
23
+ "GPT 4o Search Preview",
24
+ "O1",
25
+ "O1 Mini",
26
+ "O3 Mini",
27
+ "O3 Mini High",
28
+ "O3 Mini Low",
29
+
30
+ # Anthropic Models
43
31
  "claude 3.5 sonnet",
32
+ "Claude 3.7 Sonnet",
33
+ "Claude 3.7 Sonnet (Thinking)",
34
+
35
+ # Deepseek Models
36
+ "Deepseek R1",
37
+ "Deepseek R1 Fast",
38
+ "Deepseek V3",
39
+ "Deepseek v3 0324",
40
+
41
+ # Google Models
42
+ "Gemini 1.5 Flash",
43
+ "Gemini 1.5 Pro",
44
+ "Gemini 2.0 Pro",
45
+ "Gemini 2.0 Flash",
46
+ "Gemini 2.5 Pro",
47
+
48
+ # Llama Models
49
+ "Llama 3.1 405B",
50
+ "Llama 3.1 70B Fast",
51
+ "Llama 3.3 70B",
52
+
53
+ # Mistral Models
54
+ "Mistral Large",
55
+ "Mistral Nemo",
56
+
57
+ # Qwen Models
58
+ "Qwen Max",
59
+ "Qwen Plus",
60
+ "Qwen Turbo",
61
+ "QwQ 32B",
62
+ "QwQ Plus",
63
+
64
+ # XAI Models
65
+ "Grok 2"
44
66
  ]
45
67
 
46
68
  def __init__(
@@ -119,12 +141,16 @@ class FreeAIChat(Provider):
119
141
 
120
142
  messages = [
121
143
  {
122
- "role": "system",
123
- "content": self.system_prompt
124
- },
125
- {
144
+ "id": str(int(time.time() * 1000)),
126
145
  "role": "user",
127
- "content": conversation_prompt
146
+ "content": conversation_prompt,
147
+ "model": {
148
+ # "id": "14",
149
+ "name": self.model,
150
+ # "icon": "https://cdn-avatars.huggingface.co/v1/production/uploads/1620805164087-5ec0135ded25d76864d553f1.png",
151
+ # "provider": "openAI",
152
+ # "contextWindow": 63920
153
+ }
128
154
  }
129
155
  ]
130
156
 
@@ -1,14 +1,12 @@
1
1
  import requests
2
2
  import json
3
- from typing import Any, Dict, Generator, Union
4
- import uuid
3
+ from typing import Dict, Generator, Union
5
4
 
6
5
  from webscout.AIutel import Optimizers
7
6
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
10
9
  from webscout import exceptions
11
- from webscout import LitAgent
12
10
  from webscout.Litlogger import Logger, LogFormat
13
11
 
14
12
  class GaurishCerebras(Provider):
@@ -9,7 +9,7 @@ from webscout.AIutel import Conversation
9
9
  from webscout.AIutel import AwesomePrompts, sanitize_stream
10
10
  from webscout.AIbase import Provider, AsyncProvider
11
11
  from webscout import exceptions
12
- from webscout import LitAgent
12
+ from webscout.litagent import LitAgent
13
13
 
14
14
  class GeminiPro(Provider):
15
15
  """
@@ -1,11 +1,11 @@
1
1
  import requests
2
2
  import json
3
- from typing import Any, Dict, Generator
3
+ from typing import Union, Any, Dict, Generator
4
4
 
5
5
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
- from webscout import LitAgent as Lit
8
+ from webscout.litagent import LitAgent as Lit
9
9
 
10
10
  class IBMGranite(Provider):
11
11
  """
@@ -81,7 +81,7 @@ class IBMGranite(Provider):
81
81
  raw: bool = False,
82
82
  optimizer: str = None,
83
83
  conversationally: bool = False,
84
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
84
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
85
85
  """Chat with AI
86
86
  Args:
87
87
  prompt (str): Prompt to be sent.
@@ -157,7 +157,7 @@ class IBMGranite(Provider):
157
157
  stream: bool = False,
158
158
  optimizer: str = None,
159
159
  conversationally: bool = False,
160
- ) -> str | Generator[str, None, None]:
160
+ ) -> Union[str, Generator[str, None, None]]:
161
161
  """Generate response as a string using chat method"""
162
162
  def for_stream():
163
163
  for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
@@ -1,13 +1,13 @@
1
1
  import requests
2
2
  import json
3
- from typing import Any, Dict, Generator, Optional
3
+ from typing import Union, Any, Dict, Generator, Optional
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
-
10
+ from webscout.litagent import LitAgent
11
11
  class NousHermes(Provider):
12
12
  """
13
13
  A class to interact with the Hermes API.
@@ -56,7 +56,7 @@ class NousHermes(Provider):
56
56
  'content-type': 'application/json',
57
57
  'origin': 'https://hermes.nousresearch.com',
58
58
  'referer': 'https://hermes.nousresearch.com/',
59
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
59
+ 'user-agent': LitAgent().random(),
60
60
  'cookie': self.cookies
61
61
  }
62
62
 
@@ -99,7 +99,7 @@ class NousHermes(Provider):
99
99
  raw: bool = False,
100
100
  optimizer: str = None,
101
101
  conversationally: bool = False,
102
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
102
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
103
103
  """Chat with AI
104
104
  Args:
105
105
  prompt (str): Prompt to be send.
@@ -169,7 +169,7 @@ class NousHermes(Provider):
169
169
  stream: bool = False,
170
170
  optimizer: str = None,
171
171
  conversationally: bool = False,
172
- ) -> str | Generator[str, None, None]:
172
+ ) -> Union[str, Generator[str, None, None]]:
173
173
  """Generate response `str`
174
174
  Args:
175
175
  prompt (str): Prompt to be send.
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
- from typing import Any, AsyncGenerator, Dict
11
+ from typing import Union, Any, AsyncGenerator, Dict
12
12
 
13
13
 
14
14
  class Julius(Provider):
@@ -1,6 +1,6 @@
1
1
  import requests
2
2
  import json
3
- from typing import Any, Dict, Optional
3
+ from typing import Union, Any, Dict, Optional
4
4
  from webscout.AIutel import Optimizers
5
5
  from webscout.AIutel import Conversation
6
6
  from webscout.AIutel import AwesomePrompts, sanitize_stream
@@ -0,0 +1,239 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class LabyrinthAI(Provider):
13
+ """
14
+ A class to interact with the Labyrinth AI chat API.
15
+ """
16
+
17
+ # AVAILABLE_MODELS = [
18
+ # "gemini-2.0-flash"
19
+ # ]
20
+
21
+ def __init__(
22
+ self,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 2049,
25
+ timeout: int = 30,
26
+ intro: str = None,
27
+ filepath: str = None,
28
+ update_file: bool = True,
29
+ proxies: dict = {},
30
+ history_offset: int = 10250,
31
+ act: str = None,
32
+ # model: str = "gemini-2.0-flash",
33
+ browser: str = "chrome"
34
+ ):
35
+ """Initializes the Labyrinth AI API client."""
36
+ # if model not in self.AVAILABLE_MODELS:
37
+ # raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
38
+
39
+ self.url = "https://labyrinth-ebon.vercel.app/api/chat"
40
+
41
+ # Initialize LitAgent for user agent generation
42
+ self.agent = LitAgent()
43
+ # Use fingerprinting to create a consistent browser identity
44
+ self.fingerprint = self.agent.generate_fingerprint(browser)
45
+
46
+ # Use the fingerprint for headers
47
+ self.headers = {
48
+ "Accept": self.fingerprint["accept"],
49
+ "Accept-Encoding": "gzip, deflate, br, zstd",
50
+ "Accept-Language": self.fingerprint["accept_language"],
51
+ "Content-Type": "application/json",
52
+ "Origin": "https://labyrinth-ebon.vercel.app",
53
+ "Cookie": "stock-mode=false; __Host-next-auth.csrf-token=68aa6224f2ff7bbf2c4480a90c49b7b95aaac01a63ed90f3d20a69292c16a366%7C1f6672653c6e304ea971373fecdc3fe491568d014c68cdf3b26ead42f1c6ac62; __Secure-next-auth.callback-url=https%3A%2F%2Flabyrinth-ebon.vercel.app%2F; selectedModel={\"id\":\"gemini-2.0-flash\",\"name\":\"Gemini 2.0 Flash\",\"provider\":\"Google Generative AI\",\"providerId\":\"google\",\"enabled\":true,\"toolCallType\":\"native\",\"searchMode\":true}; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..Z5-1j_rsCWRHY17B.s0lMkhWr0S7a3-4h2p-ce0NJHeNyh8nDyOcsrzFU8AZtBbygGcHKbJ8PzLLQBNL7NwrUwET3fKGbtnAphaVjuSJQfXA0tu69zKJELPw-A3x0Ev6aHJMTG3l9_SweByHyfCSCnGB7tvjwEFsW4c5xs_HzMdPmoRTYyYzlZPuDGhHtQX7WyeUiARc36NfwV-KJYpzXV5-g0VkpsxFEawcfdk6D_S7JtOMmjMTTYuw2BbNYvtlvM-n_XivIctQmQ5Fp65JEE73nr5hWVReyYrkyfUGt4Q.TP8Woa-7Ao05yVCjbbGDug",
54
+ "Referer": "https://labyrinth-ebon.vercel.app/",
55
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
56
+ "Sec-CH-UA-Mobile": "?0",
57
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
58
+ "User-Agent": self.fingerprint["user_agent"],
59
+ "Sec-Fetch-Dest": "empty",
60
+ "Sec-Fetch-Mode": "cors",
61
+ "Sec-Fetch-Site": "same-origin",
62
+ "Sec-GPC": "1"
63
+ }
64
+
65
+ self.session = requests.Session()
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+
69
+ self.is_conversation = is_conversation
70
+ self.max_tokens_to_sample = max_tokens
71
+ self.timeout = timeout
72
+ self.last_response = {}
73
+ # self.model = model
74
+
75
+ self.__available_optimizers = (
76
+ method
77
+ for method in dir(Optimizers)
78
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
79
+ )
80
+ Conversation.intro = (
81
+ AwesomePrompts().get_act(
82
+ act, raise_not_found=True, default=None, case_insensitive=True
83
+ )
84
+ if act
85
+ else intro or Conversation.intro
86
+ )
87
+
88
+ self.conversation = Conversation(
89
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
90
+ )
91
+ self.conversation.history_offset = history_offset
92
+
93
+ def refresh_identity(self, browser: str = None):
94
+ """
95
+ Refreshes the browser identity fingerprint.
96
+
97
+ Args:
98
+ browser: Specific browser to use for the new fingerprint
99
+ """
100
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
101
+ self.fingerprint = self.agent.generate_fingerprint(browser)
102
+
103
+ # Update headers with new fingerprint
104
+ self.headers.update({
105
+ "Accept": self.fingerprint["accept"],
106
+ "Accept-Language": self.fingerprint["accept_language"],
107
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
108
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
109
+ "User-Agent": self.fingerprint["user_agent"],
110
+ })
111
+
112
+ # Update session headers
113
+ for header, value in self.headers.items():
114
+ self.session.headers[header] = value
115
+
116
+ return self.fingerprint
117
+
118
+ def ask(
119
+ self,
120
+ prompt: str,
121
+ stream: bool = False,
122
+ raw: bool = False,
123
+ optimizer: str = None,
124
+ conversationally: bool = False,
125
+ ) -> Union[Dict[str, Any], Generator]:
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
134
+
135
+ # Prepare the request payload
136
+ payload = {
137
+ "id": str(uuid.uuid4()),
138
+ "messages": [
139
+ {
140
+ "role": "user",
141
+ "content": conversation_prompt,
142
+ "parts": [{"type": "text", "text": conversation_prompt}]
143
+ }
144
+ ],
145
+ "stockMode": False
146
+ }
147
+
148
+ def for_stream():
149
+ try:
150
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
151
+ if response.status_code != 200:
152
+ # If we get a non-200 response, try refreshing our identity once
153
+ if response.status_code in [403, 429]:
154
+ self.refresh_identity()
155
+ # Retry with new identity
156
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
157
+ if not retry_response.ok:
158
+ raise exceptions.FailedToGenerateResponseError(
159
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
160
+ )
161
+ response = retry_response
162
+ else:
163
+ raise exceptions.FailedToGenerateResponseError(
164
+ f"Request failed with status code {response.status_code}"
165
+ )
166
+
167
+ streaming_text = ""
168
+ for line in response.iter_lines():
169
+ if line:
170
+ try:
171
+ line = line.decode('utf-8')
172
+ if line.startswith('0:'):
173
+ content = line[2:].strip('"')
174
+ streaming_text += content
175
+ resp = dict(text=content)
176
+ yield resp if raw else resp
177
+ except UnicodeDecodeError:
178
+ continue
179
+
180
+ self.last_response = {"text": streaming_text}
181
+ self.conversation.update_chat_history(prompt, streaming_text)
182
+
183
+ except requests.RequestException as e:
184
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
185
+
186
+ def for_non_stream():
187
+ try:
188
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
189
+ if response.status_code != 200:
190
+ if response.status_code in [403, 429]:
191
+ self.refresh_identity()
192
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
193
+ if not response.ok:
194
+ raise exceptions.FailedToGenerateResponseError(
195
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
196
+ )
197
+ else:
198
+ raise exceptions.FailedToGenerateResponseError(
199
+ f"Request failed with status code {response.status_code}"
200
+ )
201
+
202
+ full_response = ""
203
+ for line in response.iter_lines():
204
+ if line:
205
+ try:
206
+ line = line.decode('utf-8')
207
+ if line.startswith('0:'):
208
+ content = line[2:].strip('"')
209
+ full_response += content
210
+ except UnicodeDecodeError:
211
+ continue
212
+
213
+ self.last_response = {"text": full_response}
214
+ self.conversation.update_chat_history(prompt, full_response)
215
+ return {"text": full_response}
216
+ except Exception as e:
217
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
218
+
219
+ return for_stream() if stream else for_non_stream()
220
+
221
+ def chat(
222
+ self,
223
+ prompt: str,
224
+ stream: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ ) -> Union[str, Generator[str, None, None]]:
228
+ def for_stream():
229
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
230
+ yield self.get_message(response)
231
+ def for_non_stream():
232
+ return self.get_message(
233
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
234
+ )
235
+ return for_stream() if stream else for_non_stream()
236
+
237
+ def get_message(self, response: dict) -> str:
238
+ assert isinstance(response, dict), "Response should be of dict data-type only"
239
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  import json
3
- from typing import Optional
3
+ from typing import Optional, Union, Generator
4
4
  import uuid
5
5
  import requests
6
6
  import cloudscraper
@@ -118,7 +118,9 @@ class LearnFast(Provider):
118
118
  """
119
119
  payload = {
120
120
  "prompt": conversation_prompt,
121
+ "firstQuestionFlag": True,
121
122
  "sessionId": session_id,
123
+ "attachments": []
122
124
  }
123
125
  if image_url:
124
126
  payload["attachments"] = [
@@ -138,7 +140,7 @@ class LearnFast(Provider):
138
140
  optimizer: str = None,
139
141
  conversationally: bool = False,
140
142
  image_path: Optional[str] = None,
141
- ) -> dict:
143
+ ) -> Union[dict, Generator[dict, None, None]]:
142
144
  """Chat with LearnFast
143
145
 
144
146
  Args:
@@ -151,7 +153,7 @@ class LearnFast(Provider):
151
153
  Defaults to None.
152
154
 
153
155
  Returns:
154
- dict : {}
156
+ Union[dict, Generator[dict, None, None]]: Response generated
155
157
  """
156
158
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
157
159
  if optimizer:
@@ -194,20 +196,24 @@ class LearnFast(Provider):
194
196
  full_response = ""
195
197
  for line in response.iter_lines(decode_unicode=True):
196
198
  if line:
197
- if line.strip() == "[DONE]":
199
+ line = line.strip()
200
+ if line == "[DONE]":
198
201
  break
199
202
  try:
200
203
  json_response = json.loads(line)
201
- message = json_response.get('data', {}).get('message', '')
202
- if message:
203
- full_response += message
204
- # print(message, end='', flush=True)
204
+ if json_response.get('code') == 200 and json_response.get('data'):
205
+ message = json_response['data'].get('message', '')
206
+ if message:
207
+ full_response += message
208
+ if stream:
209
+ yield {"text": message}
205
210
  except json.JSONDecodeError:
206
- print(f"\nFailed to parse JSON: {line}")
211
+ pass
207
212
  self.last_response.update({"text": full_response})
208
213
  self.conversation.update_chat_history(prompt, full_response)
209
214
 
210
- return self.last_response
215
+ if not stream:
216
+ return self.last_response
211
217
  except requests.exceptions.RequestException as e:
212
218
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
213
219
 
@@ -218,7 +224,7 @@ class LearnFast(Provider):
218
224
  optimizer: str = None,
219
225
  conversationally: bool = False,
220
226
  image_path: Optional[str] = None,
221
- ) -> str:
227
+ ) -> Union[str, Generator[str, None, None]]:
222
228
  """Generate response `str`
223
229
  Args:
224
230
  prompt (str): Prompt to be send.
@@ -228,10 +234,17 @@ class LearnFast(Provider):
228
234
  image_path (Optional[str], optional): Path to the image to be uploaded.
229
235
  Defaults to None.
230
236
  Returns:
231
- str: Response generated
237
+ Union[str, Generator[str, None, None]]: Response generated
232
238
  """
233
- response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
234
- return self.get_message(response)
239
+ try:
240
+ response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
241
+ if stream:
242
+ for chunk in response:
243
+ yield chunk["text"]
244
+ else:
245
+ return str(response)
246
+ except Exception as e:
247
+ return f"Error: {str(e)}"
235
248
 
236
249
  def get_message(self, response: dict) -> str:
237
250
  """Retrieves message only from response
@@ -248,6 +261,6 @@ class LearnFast(Provider):
248
261
  if __name__ == "__main__":
249
262
  from rich import print
250
263
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path=None)
264
+ response = ai.chat(input(">>> "), stream=True)
252
265
  for chunk in response:
253
266
  print(chunk, end="", flush=True)
@@ -6,7 +6,7 @@ from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
- from webscout import LitAgent as Lit
9
+ from webscout.litagent import LitAgent as Lit
10
10
  class Lepton(Provider):
11
11
  """
12
12
  A class to interact with the Lepton.run API.
@@ -1,13 +1,13 @@
1
1
  import requests
2
2
  import json
3
3
  import re
4
- from typing import Any, Dict, Optional, Generator
4
+ from typing import Union, Any, Dict, Optional, Generator
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
- from webscout import LitAgent as Lit
10
+ from webscout.litagent import LitAgent as Lit
11
11
 
12
12
  class Llama3Mitril(Provider):
13
13
  """
@@ -77,7 +77,7 @@ class Llama3Mitril(Provider):
77
77
  raw: bool = False,
78
78
  optimizer: str = None,
79
79
  conversationally: bool = False,
80
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
80
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
81
81
  """Sends a prompt to the Llama3 Mitril API and returns the response."""
82
82
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
83
83
  if optimizer:
@@ -143,7 +143,7 @@ class Llama3Mitril(Provider):
143
143
  stream: bool = True,
144
144
  optimizer: str = None,
145
145
  conversationally: bool = False,
146
- ) -> str | Generator[str, None, None]:
146
+ ) -> Union[str, Generator[str, None, None]]:
147
147
  """Generates a response from the Llama3 Mitril API."""
148
148
 
149
149
  def for_stream():