webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (137) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +432 -0
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/Cloudflare.py +18 -21
  24. webscout/Provider/DeepSeek.py +27 -48
  25. webscout/Provider/Deepinfra.py +129 -53
  26. webscout/Provider/Gemini.py +1 -1
  27. webscout/Provider/GithubChat.py +362 -0
  28. webscout/Provider/Glider.py +25 -8
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +38 -5
  31. webscout/Provider/HuggingFaceChat.py +462 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/Marcus.py +7 -50
  34. webscout/Provider/Netwrck.py +43 -67
  35. webscout/Provider/PI.py +4 -2
  36. webscout/Provider/Perplexitylabs.py +26 -6
  37. webscout/Provider/Phind.py +29 -3
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  51. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  52. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  53. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  54. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  55. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  56. webscout/Provider/TTS/__init__.py +2 -2
  57. webscout/Provider/TTS/deepgram.py +12 -39
  58. webscout/Provider/TTS/elevenlabs.py +14 -40
  59. webscout/Provider/TTS/gesserit.py +11 -35
  60. webscout/Provider/TTS/murfai.py +13 -39
  61. webscout/Provider/TTS/parler.py +17 -40
  62. webscout/Provider/TTS/speechma.py +180 -0
  63. webscout/Provider/TTS/streamElements.py +17 -44
  64. webscout/Provider/TextPollinationsAI.py +39 -59
  65. webscout/Provider/Venice.py +217 -200
  66. webscout/Provider/WiseCat.py +27 -5
  67. webscout/Provider/Youchat.py +63 -36
  68. webscout/Provider/__init__.py +13 -8
  69. webscout/Provider/akashgpt.py +28 -10
  70. webscout/Provider/copilot.py +416 -0
  71. webscout/Provider/flowith.py +196 -0
  72. webscout/Provider/freeaichat.py +32 -45
  73. webscout/Provider/granite.py +17 -53
  74. webscout/Provider/koala.py +20 -5
  75. webscout/Provider/llamatutor.py +7 -47
  76. webscout/Provider/llmchat.py +36 -53
  77. webscout/Provider/multichat.py +92 -98
  78. webscout/Provider/talkai.py +1 -0
  79. webscout/Provider/turboseek.py +3 -0
  80. webscout/Provider/tutorai.py +2 -0
  81. webscout/Provider/typegpt.py +154 -64
  82. webscout/Provider/x0gpt.py +3 -1
  83. webscout/Provider/yep.py +102 -20
  84. webscout/__init__.py +3 -0
  85. webscout/cli.py +4 -40
  86. webscout/conversation.py +1 -10
  87. webscout/exceptions.py +19 -9
  88. webscout/litagent/__init__.py +2 -2
  89. webscout/litagent/agent.py +351 -20
  90. webscout/litagent/constants.py +34 -5
  91. webscout/litprinter/__init__.py +0 -3
  92. webscout/models.py +181 -0
  93. webscout/optimizers.py +1 -1
  94. webscout/prompt_manager.py +2 -8
  95. webscout/scout/core/scout.py +1 -4
  96. webscout/scout/core/search_result.py +1 -1
  97. webscout/scout/core/text_utils.py +1 -1
  98. webscout/scout/core.py +2 -5
  99. webscout/scout/element.py +1 -1
  100. webscout/scout/parsers/html_parser.py +1 -1
  101. webscout/scout/utils.py +0 -1
  102. webscout/swiftcli/__init__.py +1 -3
  103. webscout/tempid.py +1 -1
  104. webscout/update_checker.py +55 -95
  105. webscout/version.py +1 -1
  106. webscout/webscout_search_async.py +1 -2
  107. webscout/yep_search.py +297 -297
  108. webscout-7.6.dist-info/LICENSE.md +146 -0
  109. {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
  110. {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
  111. webscout/Extra/autollama.py +0 -231
  112. webscout/Local/__init__.py +0 -10
  113. webscout/Local/_version.py +0 -3
  114. webscout/Local/formats.py +0 -747
  115. webscout/Local/model.py +0 -1368
  116. webscout/Local/samplers.py +0 -125
  117. webscout/Local/thread.py +0 -539
  118. webscout/Local/ui.py +0 -401
  119. webscout/Local/utils.py +0 -388
  120. webscout/Provider/Amigo.py +0 -274
  121. webscout/Provider/Bing.py +0 -243
  122. webscout/Provider/DiscordRocks.py +0 -253
  123. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  124. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  125. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  126. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  127. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  128. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  129. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  130. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  131. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  132. webscout/Provider/TTS/voicepod.py +0 -117
  133. webscout/Provider/dgaf.py +0 -214
  134. webscout-7.4.dist-info/LICENSE.md +0 -211
  135. {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  136. {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  137. {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,8 @@ class TurboSeek(Provider):
13
13
  """
14
14
  This class provides methods for interacting with the TurboSeek API.
15
15
  """
16
+ AVAILABLE_MODELS = ["Llama 3.1 70B"]
17
+
16
18
  def __init__(
17
19
  self,
18
20
  is_conversation: bool = True,
@@ -24,6 +26,7 @@ class TurboSeek(Provider):
24
26
  proxies: dict = {},
25
27
  history_offset: int = 10250,
26
28
  act: str = None,
29
+ model: str = "Llama 3.1 70B"
27
30
  ):
28
31
  """Instantiates TurboSeek
29
32
 
@@ -15,6 +15,7 @@ class TutorAI(Provider):
15
15
  """
16
16
  A class to interact with the TutorAI.me API.
17
17
  """
18
+ AVAILABLE_MODELS = ["gpt-4o"]
18
19
 
19
20
  def __init__(
20
21
  self,
@@ -28,6 +29,7 @@ class TutorAI(Provider):
28
29
  history_offset: int = 10250,
29
30
  act: str = None,
30
31
  system_prompt: str = "You are a helpful AI assistant.",
32
+ model: str = "gpt-4o"
31
33
  ):
32
34
  """
33
35
  Initializes the TutorAI.me API with given parameters.
@@ -15,43 +15,64 @@ class TypeGPT(Provider):
15
15
  A class to interact with the TypeGPT.net API. Improved to match webscout standards.
16
16
  """
17
17
  url = "https://chat.typegpt.net"
18
- working = True
19
- supports_message_history = True
20
18
 
21
- models = [
19
+ AVAILABLE_MODELS = [
22
20
  # OpenAI Models
23
21
  "gpt-3.5-turbo",
24
- "chatgpt-4o-latest",
25
22
  "gpt-3.5-turbo-202201",
26
23
  "gpt-4o",
27
24
  "gpt-4o-2024-05-13",
25
+ "gpt-4o-2024-11-20",
26
+ "gpt-4o-mini",
27
+ "gpt-4o-mini-2024-07-18",
28
+ # "gpt-4o-mini-ddg", >>>> NOT WORKING
29
+ "o1",
30
+ # "o1-mini-2024-09-12", >>>> NOT WORKING
28
31
  "o1-preview",
32
+ "o3-mini",
33
+ "chatgpt-4o-latest",
29
34
 
30
35
  # Claude Models
31
- "claude",
36
+ # "claude", >>>> NOT WORKING
32
37
  "claude-3-5-sonnet",
33
- "claude-sonnet-3.5",
34
38
  "claude-3-5-sonnet-20240620",
39
+ "claude-3-5-sonnet-x",
40
+ # "claude-3-haiku-ddg", >>>> NOT WORKING
41
+ "claude-hybridspace",
42
+ "claude-sonnet-3.5",
43
+ "Claude-sonnet-3.7",
44
+ "anthropic/claude-3.5-sonnet",
45
+ "anthropic/claude-3.7-sonnet",
35
46
 
36
47
  # Meta/LLaMA Models
37
48
  "@cf/meta/llama-2-7b-chat-fp16",
38
49
  "@cf/meta/llama-2-7b-chat-int8",
39
50
  "@cf/meta/llama-3-8b-instruct",
40
51
  "@cf/meta/llama-3.1-8b-instruct",
41
- "@cf/meta-llama/llama-2-7b-chat-hf-lora",
52
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
53
+ # "@cf/meta-llama/llama-2-7b-chat-hf-lora", >>>> NOT WORKING
42
54
  "llama-3.1-405b",
43
55
  "llama-3.1-70b",
56
+ # "llama-3.1-70b-ddg", >>>> NOT WORKING
44
57
  "llama-3.1-8b",
45
- "meta-llama/Llama-2-7b-chat-hf",
46
- "meta-llama/Llama-3.1-70B-Instruct",
47
- "meta-llama/Llama-3.1-8B-Instruct",
58
+ # "llama-scaleway", >>>> NOT WORKING
59
+ "llama3.1-8b", # >>>> NOT WORKING
60
+ "llama3.3-70b",
61
+ # "llamalight", >>>> NOT WORKING
62
+ "Meta-Llama-3.1-405B-Instruct-Turbo",
63
+ "Meta-Llama-3.3-70B-Instruct-Turbo",
64
+ # "meta-llama/Llama-2-7b-chat-hf", >>>> NOT WORKING
65
+ # "meta-llama/Llama-3.1-70B-Instruct", >>>> NOT WORKING
66
+ # "meta-llama/Llama-3.1-8B-Instruct", >>>> NOT WORKING
48
67
  "meta-llama/Llama-3.2-11B-Vision-Instruct",
49
- "meta-llama/Llama-3.2-1B-Instruct",
50
- "meta-llama/Llama-3.2-3B-Instruct",
68
+ # "meta-llama/Llama-3.2-1B-Instruct", >>>> NOT WORKING
69
+ # "meta-llama/Llama-3.2-3B-Instruct", >>>> NOT WORKING
51
70
  "meta-llama/Llama-3.2-90B-Vision-Instruct",
52
- "meta-llama/Llama-Guard-3-8B",
53
- "meta-llama/Meta-Llama-3-70B-Instruct",
54
- "meta-llama/Meta-Llama-3-8B-Instruct",
71
+ "meta-llama/Llama-3.3-70B-Instruct",
72
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
73
+ # "meta-llama/Llama-Guard-3-8B", >>>> NOT WORKING
74
+ # "meta-llama/Meta-Llama-3-70B-Instruct", >>>> NOT WORKING
75
+ # "meta-llama/Meta-Llama-3-8B-Instruct", >>>> NOT WORKING
55
76
  "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
56
77
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
57
78
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
@@ -60,25 +81,34 @@ class TypeGPT(Provider):
60
81
  "mistral",
61
82
  "mistral-large",
62
83
  "@cf/mistral/mistral-7b-instruct-v0.1",
63
- "@cf/mistral/mistral-7b-instruct-v0.2-lora",
84
+ # "@cf/mistral/mistral-7b-instruct-v0.2-lora", >>>> NOT WORKING
64
85
  "@hf/mistralai/mistral-7b-instruct-v0.2",
65
86
  "mistralai/Mistral-7B-Instruct-v0.2",
66
87
  "mistralai/Mistral-7B-Instruct-v0.3",
67
88
  "mistralai/Mixtral-8x22B-Instruct-v0.1",
68
89
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
90
+ # "mixtral-8x7b-ddg", >>>> NOT WORKING
91
+ "Mistral-7B-Instruct-v0.2",
69
92
 
70
93
  # Qwen Models
71
94
  "@cf/qwen/qwen1.5-0.5b-chat",
72
95
  "@cf/qwen/qwen1.5-1.8b-chat",
73
- "@cf/qwen/qwen1.5-7b-chat-awq",
74
96
  "@cf/qwen/qwen1.5-14b-chat-awq",
97
+ "@cf/qwen/qwen1.5-7b-chat-awq",
75
98
  "Qwen/Qwen2.5-3B-Instruct",
76
99
  "Qwen/Qwen2.5-72B-Instruct",
77
100
  "Qwen/Qwen2.5-Coder-32B-Instruct",
101
+ "Qwen/Qwen2-72B-Instruct",
102
+ "Qwen/QwQ-32B",
103
+ "Qwen/QwQ-32B-Preview",
104
+ "Qwen2.5-72B-Instruct",
105
+ "qwen",
106
+ "qwen-coder",
107
+ # "Qwen-QwQ-32B-Preview", >>>> NOT WORKING
78
108
 
79
109
  # Google/Gemini Models
80
- "@cf/google/gemma-2b-it-lora",
81
- "@cf/google/gemma-7b-it-lora",
110
+ # "@cf/google/gemma-2b-it-lora", >>>> NOT WORKING
111
+ # "@cf/google/gemma-7b-it-lora", >>>> NOT WORKING
82
112
  "@hf/google/gemma-7b-it",
83
113
  "google/gemma-1.1-2b-it",
84
114
  "google/gemma-1.1-7b-it",
@@ -86,22 +116,8 @@ class TypeGPT(Provider):
86
116
  "gemini-1.5-pro",
87
117
  "gemini-1.5-pro-latest",
88
118
  "gemini-1.5-flash",
89
-
90
- # Cohere Models
91
- "c4ai-aya-23-35b",
92
- "c4ai-aya-23-8b",
93
- "command",
94
- "command-light",
95
- "command-light-nightly",
96
- "command-nightly",
97
- "command-r",
98
- "command-r-08-2024",
99
- "command-r-plus",
100
- "command-r-plus-08-2024",
101
- "rerank-english-v2.0",
102
- "rerank-english-v3.0",
103
- "rerank-multilingual-v2.0",
104
- "rerank-multilingual-v3.0",
119
+ "gemini-flash-2.0",
120
+ "gemini-thinking",
105
121
 
106
122
  # Microsoft Models
107
123
  "@cf/microsoft/phi-2",
@@ -109,29 +125,45 @@ class TypeGPT(Provider):
109
125
  "microsoft/Phi-3-medium-4k-instruct",
110
126
  "microsoft/Phi-3-mini-4k-instruct",
111
127
  "microsoft/Phi-3.5-mini-instruct",
128
+ "microsoft/phi-4",
112
129
  "microsoft/WizardLM-2-8x22B",
113
130
 
114
131
  # Yi Models
115
132
  "01-ai/Yi-1.5-34B-Chat",
116
- "01-ai/Yi-34B-Chat",
133
+ # "01-ai/Yi-34B-Chat", >>>> NOT WORKING
117
134
 
118
- # Specialized Models and Tools
135
+ # DeepSeek Models
119
136
  "@cf/deepseek-ai/deepseek-math-7b-base",
120
137
  "@cf/deepseek-ai/deepseek-math-7b-instruct",
138
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
139
+ "deepseek",
140
+ "deepseek-ai/DeepSeek-R1",
141
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
142
+ # "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", >>>> NOT WORKING
143
+ # "deepseek-ai/DeepSeek-V2.5", >>>> NOT WORKING
144
+ "deepseek-llm-67b-chat",
145
+ "deepseek-r1",
146
+ "deepseek-r1-distill-llama-70b",
147
+ # "deepseek-reasoner", >>>> NOT WORKING
148
+ "deepseek-v3",
149
+
150
+ # Specialized Models and Tools
121
151
  "@cf/defog/sqlcoder-7b-2",
122
- "@cf/openchat/openchat-3.5-0106",
123
152
  "@cf/thebloke/discolm-german-7b-v1-awq",
124
153
  "@cf/tiiuae/falcon-7b-instruct",
125
- "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
126
- "@hf/nexusflow/starling-lm-7b-beta",
127
- "@hf/nousresearch/hermes-2-pro-mistral-7b",
128
- "@hf/thebloke/deepseek-coder-6.7b-base-awq",
129
- "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
130
- "@hf/thebloke/llama-2-13b-chat-awq",
131
- "@hf/thebloke/llamaguard-7b-awq",
132
- "@hf/thebloke/neural-chat-7b-v3-1-awq",
133
- "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
134
- "@hf/thebloke/zephyr-7b-beta-awq",
154
+ # "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", >>>> NOT WORKING
155
+ # "@hf/nexusflow/starling-lm-7b-beta", >>>> NOT WORKING
156
+ # "@hf/nousresearch/hermes-2-pro-mistral-7b", >>>> NOT WORKING
157
+ # "@hf/thebloke/deepseek-coder-6.7b-base-awq", >>>> NOT WORKING
158
+ # "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", >>>> NOT WORKING
159
+ # "@hf/thebloke/llama-2-13b-chat-awq", >>>> NOT WORKING
160
+ # "@hf/thebloke/llamaguard-7b-awq", >>>> NOT WORKING
161
+ # "@hf/thebloke/mistral-7b-instruct-v0.1-awq", >>>> NOT WORKING
162
+ # "@hf/thebloke/neural-chat-7b-v3-1-awq", >>>> NOT WORKING
163
+ # "@hf/thebloke/openhermes-2.5-mistral-7b-awq", >>>> NOT WORKING
164
+ # "@hf/thebloke/zephyr-7b-beta-awq", >>>> NOT WORKING
165
+
166
+ # Development Agents
135
167
  "AndroidDeveloper",
136
168
  "AngularJSAgent",
137
169
  "AzureAgent",
@@ -150,8 +182,6 @@ class TypeGPT(Provider):
150
182
  "GodotAgent",
151
183
  "GoogleCloudAgent",
152
184
  "HTMLAgent",
153
- "HerokuAgent",
154
- "ImageGeneration",
155
185
  "JavaAgent",
156
186
  "JavaScriptAgent",
157
187
  "MongoDBAgent",
@@ -162,18 +192,54 @@ class TypeGPT(Provider):
162
192
  "RepoMap",
163
193
  "SwiftDeveloper",
164
194
  "XcodeAgent",
165
- "YoutubeAgent",
195
+ # "YoutubeAgent", >>>> NOT WORKING
196
+
197
+ # Other Models
166
198
  "blackboxai",
167
199
  "blackboxai-pro",
168
200
  "builderAgent",
169
- "dify",
201
+ # "Cipher-20b", >>>> NOT WORKING
202
+ # "dify", >>>> NOT WORKING
170
203
  "flux",
171
- "openchat/openchat-3.6-8b",
172
- "rtist",
173
- "searchgpt",
174
- "sur",
175
- "sur-mistral",
176
- "unity"
204
+ # "flux-1-schnell", >>>> NOT WORKING
205
+ # "HelpingAI-15B", >>>> NOT WORKING
206
+ # "HelpingAI2-3b", >>>> NOT WORKING
207
+ # "HelpingAI2-6B", >>>> NOT WORKING
208
+ # "HelpingAI2-9B", >>>> NOT WORKING
209
+ # "HelpingAI2.5-10B", >>>> NOT WORKING
210
+ # "Helpingai2.5-10b-1m", >>>> NOT WORKING
211
+ # "HelpingAI2.5-2B", >>>> NOT WORKING
212
+ # "HELVETE", >>>> NOT WORKING
213
+ # "HELVETE-X", >>>> NOT WORKING
214
+ # "evil", >>>> NOT WORKING
215
+ # "Image-Generator", >>>> NOT WORKING
216
+ # "Image-Generator-NSFW", >>>> NOT WORKING
217
+ # "midijourney", >>>> NOT WORKING
218
+ # "Niansuh", >>>> NOT WORKING
219
+ # "niansuh-t1", >>>> NOT WORKING
220
+ # "Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
221
+ # "NousResearch/Hermes-3-Llama-3.1-8B", >>>> NOT WORKING
222
+ # "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
223
+ # "nvidia/Llama-3.1-Nemotron-70B-Instruct", >>>> NOT WORKING
224
+ # "openai", >>>> NOT WORKING
225
+ # "openai-audio", >>>> NOT WORKING
226
+ # "openai-large", >>>> NOT WORKING
227
+ # "openai-reasoning", >>>> NOT WORKING
228
+ # "openai/whisper-large-v3", >>>> NOT WORKING
229
+ # "openai/whisper-large-v3-turbo", >>>> NOT WORKING
230
+ # "openbmb/MiniCPM-Llama3-V-2_5", >>>> NOT WORKING
231
+ # "openchat/openchat-3.6-8b", >>>> NOT WORKING
232
+ # "p1", >>>> NOT WORKING
233
+ # "phi", >>>> NOT WORKING
234
+ # "Phi-4-multilmodal-instruct", >>>> NOT WORKING
235
+ # "Priya-3B", >>>> NOT WORKING
236
+ # "rtist", >>>> NOT WORKING
237
+ # "searchgpt", >>>> NOT WORKING
238
+ # "sur", >>>> NOT WORKING
239
+ # "sur-mistral", >>>> NOT WORKING
240
+ # "tiiuae/falcon-7b-instruct", >>>> NOT WORKING
241
+ # "TirexAi", >>>> NOT WORKING
242
+ # "unity", >>>> NOT WORKING
177
243
  ]
178
244
 
179
245
  def __init__(
@@ -195,8 +261,8 @@ class TypeGPT(Provider):
195
261
  top_p: float = 1,
196
262
  ):
197
263
  """Initializes the TypeGPT API client."""
198
- if model not in self.models:
199
- raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.models)}")
264
+ if model not in self.AVAILABLE_MODELS:
265
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
200
266
 
201
267
  self.session = requests.Session()
202
268
  self.is_conversation = is_conversation
@@ -354,7 +420,31 @@ class TypeGPT(Provider):
354
420
  raise TypeError("Invalid response type. Expected str or dict.")
355
421
 
356
422
  if __name__ == "__main__":
357
- ai = TypeGPT(model="chatgpt-4o-latest")
358
- response = ai.chat("hi", stream=True)
359
- for chunks in response:
360
- print(chunks, end="", flush=True)
423
+ print("-" * 80)
424
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
425
+ print("-" * 80)
426
+
427
+ # Test all available models
428
+ working = 0
429
+ total = len(TypeGPT.AVAILABLE_MODELS)
430
+
431
+ for model in TypeGPT.AVAILABLE_MODELS:
432
+ try:
433
+ test_ai = TypeGPT(model=model, timeout=60)
434
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
435
+ response_text = ""
436
+ for chunk in response:
437
+ response_text += chunk
438
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
439
+
440
+ if response_text and len(response_text.strip()) > 0:
441
+ status = "✓"
442
+ # Truncate response if too long
443
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
444
+ else:
445
+ status = "✗"
446
+ display_text = "Empty or invalid response"
447
+ print(f"\r{model:<50} {status:<10} {display_text}")
448
+ except Exception as e:
449
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
450
+
@@ -24,6 +24,7 @@ class X0GPT(Provider):
24
24
  >>> print(response)
25
25
  'The weather today is sunny with a high of 75°F.'
26
26
  """
27
+ AVAILABLE_MODELS = ["UNKNOWN"]
27
28
 
28
29
  def __init__(
29
30
  self,
@@ -36,7 +37,8 @@ class X0GPT(Provider):
36
37
  proxies: dict = {},
37
38
  history_offset: int = 10250,
38
39
  act: str = None,
39
- system_prompt: str = "You are a helpful assistant."
40
+ system_prompt: str = "You are a helpful assistant.",
41
+ model: str = "UNKNOWN"
40
42
  ):
41
43
  """
42
44
  Initializes the X0GPT API with given parameters.
webscout/Provider/yep.py CHANGED
@@ -38,7 +38,8 @@ class YEPCHAT(Provider):
38
38
  act: str = None,
39
39
  model: str = "DeepSeek-R1-Distill-Qwen-32B",
40
40
  temperature: float = 0.6,
41
- top_p: float = 0.7
41
+ top_p: float = 0.7,
42
+ browser: str = "chrome"
42
43
  ):
43
44
  """
44
45
  Initializes the YEPCHAT provider with the specified parameters.
@@ -69,20 +70,25 @@ class YEPCHAT(Provider):
69
70
 
70
71
  # Initialize LitAgent for user agent generation
71
72
  self.agent = LitAgent()
73
+ # Use fingerprinting to create a consistent browser identity
74
+ self.fingerprint = self.agent.generate_fingerprint(browser)
72
75
 
76
+ # Use the fingerprint for headers
73
77
  self.headers = {
74
- "Accept": "*/*",
78
+ "Accept": self.fingerprint["accept"],
75
79
  "Accept-Encoding": "gzip, deflate, br, zstd",
76
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
80
+ "Accept-Language": self.fingerprint["accept_language"],
77
81
  "Content-Type": "application/json; charset=utf-8",
78
82
  "DNT": "1",
79
83
  "Origin": "https://yep.com",
80
84
  "Referer": "https://yep.com/",
81
- "Sec-CH-UA": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
85
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
82
86
  "Sec-CH-UA-Mobile": "?0",
83
- "Sec-CH-UA-Platform": '"Windows"',
84
- "User-Agent": self.agent.random(), # Use LitAgent to generate a random user agent
87
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
88
+ "User-Agent": self.fingerprint["user_agent"],
85
89
  }
90
+
91
+ # Create session cookies with unique identifiers
86
92
  self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
87
93
 
88
94
  self.__available_optimizers = (
@@ -101,8 +107,38 @@ class YEPCHAT(Provider):
101
107
  )
102
108
  self.conversation.history_offset = history_offset
103
109
  self.session.proxies = proxies
110
+
111
+ # Set consistent headers for the scraper session
112
+ for header, value in self.headers.items():
113
+ self.session.headers[header] = value
104
114
 
105
- self.knowledge_cutoff = "December 2023"
115
+ def refresh_identity(self, browser: str = None):
116
+ """
117
+ Refreshes the browser identity fingerprint.
118
+
119
+ Args:
120
+ browser: Specific browser to use for the new fingerprint
121
+ """
122
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
123
+ self.fingerprint = self.agent.generate_fingerprint(browser)
124
+
125
+ # Update headers with new fingerprint
126
+ self.headers.update({
127
+ "Accept": self.fingerprint["accept"],
128
+ "Accept-Language": self.fingerprint["accept_language"],
129
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
130
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
131
+ "User-Agent": self.fingerprint["user_agent"],
132
+ })
133
+
134
+ # Update session headers
135
+ for header, value in self.headers.items():
136
+ self.session.headers[header] = value
137
+
138
+ # Generate new cookies
139
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
140
+
141
+ return self.fingerprint
106
142
 
107
143
  def ask(
108
144
  self,
@@ -147,9 +183,20 @@ class YEPCHAT(Provider):
147
183
  try:
148
184
  with self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout) as response:
149
185
  if not response.ok:
150
- raise exceptions.FailedToGenerateResponseError(
151
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
152
- )
186
+ # If we get a non-200 response, try refreshing our identity once
187
+ if response.status_code in [403, 429]:
188
+ self.refresh_identity()
189
+ # Retry with new identity
190
+ with self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout) as retry_response:
191
+ if not retry_response.ok:
192
+ raise exceptions.FailedToGenerateResponseError(
193
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
194
+ )
195
+ response = retry_response
196
+ else:
197
+ raise exceptions.FailedToGenerateResponseError(
198
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
199
+ )
153
200
 
154
201
  streaming_text = ""
155
202
  for line in response.iter_lines(decode_unicode=True):
@@ -177,9 +224,30 @@ class YEPCHAT(Provider):
177
224
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
178
225
 
179
226
  def for_non_stream():
180
- for _ in for_stream():
181
- pass
182
- return self.last_response
227
+ try:
228
+ response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout)
229
+ if not response.ok:
230
+ if response.status_code in [403, 429]:
231
+ self.refresh_identity()
232
+ response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout)
233
+ if not response.ok:
234
+ raise exceptions.FailedToGenerateResponseError(
235
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
236
+ )
237
+ else:
238
+ raise exceptions.FailedToGenerateResponseError(
239
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
240
+ )
241
+
242
+ response_data = response.json()
243
+ if 'choices' in response_data and len(response_data['choices']) > 0:
244
+ content = response_data['choices'][0].get('message', {}).get('content', '')
245
+ self.conversation.update_chat_history(prompt, content)
246
+ return {"text": content}
247
+ else:
248
+ raise exceptions.FailedToGenerateResponseError("No response content found")
249
+ except Exception as e:
250
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
183
251
 
184
252
  return for_stream() if stream else for_non_stream()
185
253
 
@@ -189,7 +257,7 @@ class YEPCHAT(Provider):
189
257
  stream: bool = False,
190
258
  optimizer: str = None,
191
259
  conversationally: bool = False,
192
- ) -> str:
260
+ ) -> Union[str, Generator[str, None, None]]:
193
261
  """
194
262
  Initiates a chat with the Yep API using the provided prompt.
195
263
 
@@ -234,9 +302,23 @@ class YEPCHAT(Provider):
234
302
 
235
303
 
236
304
  if __name__ == "__main__":
237
- from rich import print
238
-
239
- ai = YEPCHAT(model="DeepSeek-R1-Distill-Qwen-32B")
240
- response = ai.chat("how many r in 'strawberry'", stream=True)
241
- for chunk in response:
242
- print(chunk, end="", flush=True)
305
+ print("-" * 80)
306
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
307
+ print("-" * 80)
308
+
309
+ for model in YEPCHAT.AVAILABLE_MODELS:
310
+ try:
311
+ test_ai = YEPCHAT(model=model, timeout=60)
312
+ response = test_ai.chat("Say 'Hello' in one word")
313
+ response_text = response
314
+
315
+ if response_text and len(response_text.strip()) > 0:
316
+ status = "✓"
317
+ # Truncate response if too long
318
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
319
+ else:
320
+ status = "✗"
321
+ display_text = "Empty or invalid response"
322
+ print(f"{model:<50} {status:<10} {display_text}")
323
+ except Exception as e:
324
+ print(f"{model:<50} {'✗':<10} {str(e)}")
webscout/__init__.py CHANGED
@@ -31,3 +31,6 @@ except Exception:
31
31
 
32
32
  import logging
33
33
  logging.getLogger("webscout").addHandler(logging.NullHandler())
34
+
35
+ # Import models for easy access
36
+ from webscout.models import model