webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,357 +1,360 @@
1
- import requests
2
- import json
3
- from typing import *
4
-
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
- from webscout.litagent import LitAgent
11
- class TypeGPT(Provider):
12
- """
13
- A class to interact with the TypeGPT.net API. Improved to match webscout standards.
14
- """
15
- url = "https://chat.typegpt.net"
16
- working = True
17
- supports_message_history = True
18
-
19
- models = [
20
- # OpenAI Models
21
- "gpt-3.5-turbo",
22
- "gpt-3.5-turbo-202201",
23
- "gpt-4o",
24
- "gpt-4o-2024-05-13",
25
- "o1-preview",
26
-
27
- # Claude Models
28
- "claude",
29
- "claude-3-5-sonnet",
30
- "claude-sonnet-3.5",
31
- "claude-3-5-sonnet-20240620",
32
-
33
- # Meta/LLaMA Models
34
- "@cf/meta/llama-2-7b-chat-fp16",
35
- "@cf/meta/llama-2-7b-chat-int8",
36
- "@cf/meta/llama-3-8b-instruct",
37
- "@cf/meta/llama-3.1-8b-instruct",
38
- "@cf/meta-llama/llama-2-7b-chat-hf-lora",
39
- "llama-3.1-405b",
40
- "llama-3.1-70b",
41
- "llama-3.1-8b",
42
- "meta-llama/Llama-2-7b-chat-hf",
43
- "meta-llama/Llama-3.1-70B-Instruct",
44
- "meta-llama/Llama-3.1-8B-Instruct",
45
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
46
- "meta-llama/Llama-3.2-1B-Instruct",
47
- "meta-llama/Llama-3.2-3B-Instruct",
48
- "meta-llama/Llama-3.2-90B-Vision-Instruct",
49
- "meta-llama/Llama-Guard-3-8B",
50
- "meta-llama/Meta-Llama-3-70B-Instruct",
51
- "meta-llama/Meta-Llama-3-8B-Instruct",
52
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
53
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
54
- "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
55
-
56
- # Mistral Models
57
- "mistral",
58
- "mistral-large",
59
- "@cf/mistral/mistral-7b-instruct-v0.1",
60
- "@cf/mistral/mistral-7b-instruct-v0.2-lora",
61
- "@hf/mistralai/mistral-7b-instruct-v0.2",
62
- "mistralai/Mistral-7B-Instruct-v0.2",
63
- "mistralai/Mistral-7B-Instruct-v0.3",
64
- "mistralai/Mixtral-8x22B-Instruct-v0.1",
65
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
66
-
67
- # Qwen Models
68
- "@cf/qwen/qwen1.5-0.5b-chat",
69
- "@cf/qwen/qwen1.5-1.8b-chat",
70
- "@cf/qwen/qwen1.5-7b-chat-awq",
71
- "@cf/qwen/qwen1.5-14b-chat-awq",
72
- "Qwen/Qwen2.5-3B-Instruct",
73
- "Qwen/Qwen2.5-72B-Instruct",
74
- "Qwen/Qwen2.5-Coder-32B-Instruct",
75
-
76
- # Google/Gemini Models
77
- "@cf/google/gemma-2b-it-lora",
78
- "@cf/google/gemma-7b-it-lora",
79
- "@hf/google/gemma-7b-it",
80
- "google/gemma-1.1-2b-it",
81
- "google/gemma-1.1-7b-it",
82
- "gemini-pro",
83
- "gemini-1.5-pro",
84
- "gemini-1.5-pro-latest",
85
- "gemini-1.5-flash",
86
-
87
- # Cohere Models
88
- "c4ai-aya-23-35b",
89
- "c4ai-aya-23-8b",
90
- "command",
91
- "command-light",
92
- "command-light-nightly",
93
- "command-nightly",
94
- "command-r",
95
- "command-r-08-2024",
96
- "command-r-plus",
97
- "command-r-plus-08-2024",
98
- "rerank-english-v2.0",
99
- "rerank-english-v3.0",
100
- "rerank-multilingual-v2.0",
101
- "rerank-multilingual-v3.0",
102
-
103
- # Microsoft Models
104
- "@cf/microsoft/phi-2",
105
- "microsoft/DialoGPT-medium",
106
- "microsoft/Phi-3-medium-4k-instruct",
107
- "microsoft/Phi-3-mini-4k-instruct",
108
- "microsoft/Phi-3.5-mini-instruct",
109
- "microsoft/WizardLM-2-8x22B",
110
-
111
- # Yi Models
112
- "01-ai/Yi-1.5-34B-Chat",
113
- "01-ai/Yi-34B-Chat",
114
-
115
- # Specialized Models and Tools
116
- "@cf/deepseek-ai/deepseek-math-7b-base",
117
- "@cf/deepseek-ai/deepseek-math-7b-instruct",
118
- "@cf/defog/sqlcoder-7b-2",
119
- "@cf/openchat/openchat-3.5-0106",
120
- "@cf/thebloke/discolm-german-7b-v1-awq",
121
- "@cf/tiiuae/falcon-7b-instruct",
122
- "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
123
- "@hf/nexusflow/starling-lm-7b-beta",
124
- "@hf/nousresearch/hermes-2-pro-mistral-7b",
125
- "@hf/thebloke/deepseek-coder-6.7b-base-awq",
126
- "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
127
- "@hf/thebloke/llama-2-13b-chat-awq",
128
- "@hf/thebloke/llamaguard-7b-awq",
129
- "@hf/thebloke/neural-chat-7b-v3-1-awq",
130
- "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
131
- "@hf/thebloke/zephyr-7b-beta-awq",
132
- "AndroidDeveloper",
133
- "AngularJSAgent",
134
- "AzureAgent",
135
- "BitbucketAgent",
136
- "DigitalOceanAgent",
137
- "DockerAgent",
138
- "ElectronAgent",
139
- "ErlangAgent",
140
- "FastAPIAgent",
141
- "FirebaseAgent",
142
- "FlaskAgent",
143
- "FlutterAgent",
144
- "GitAgent",
145
- "GitlabAgent",
146
- "GoAgent",
147
- "GodotAgent",
148
- "GoogleCloudAgent",
149
- "HTMLAgent",
150
- "HerokuAgent",
151
- "ImageGeneration",
152
- "JavaAgent",
153
- "JavaScriptAgent",
154
- "MongoDBAgent",
155
- "Next.jsAgent",
156
- "PyTorchAgent",
157
- "PythonAgent",
158
- "ReactAgent",
159
- "RepoMap",
160
- "SwiftDeveloper",
161
- "XcodeAgent",
162
- "YoutubeAgent",
163
- "blackboxai",
164
- "blackboxai-pro",
165
- "builderAgent",
166
- "dify",
167
- "flux",
168
- "openchat/openchat-3.6-8b",
169
- "rtist",
170
- "searchgpt",
171
- "sur",
172
- "sur-mistral",
173
- "unity"
174
- ]
175
-
176
- def __init__(
177
- self,
178
- is_conversation: bool = True,
179
- max_tokens: int = 4000, # Set a reasonable default
180
- timeout: int = 30,
181
- intro: str = None,
182
- filepath: str = None,
183
- update_file: bool = True,
184
- proxies: dict = {},
185
- history_offset: int = 10250,
186
- act: str = None,
187
- model: str = "claude-3-5-sonnet-20240620",
188
- system_prompt: str = "You are a helpful assistant.",
189
- temperature: float = 0.5,
190
- presence_penalty: int = 0,
191
- frequency_penalty: int = 0,
192
- top_p: float = 1,
193
- ):
194
- """Initializes the TypeGPT API client."""
195
- if model not in self.models:
196
- raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.models)}")
197
-
198
- self.session = requests.Session()
199
- self.is_conversation = is_conversation
200
- self.max_tokens_to_sample = max_tokens
201
- self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
202
- self.timeout = timeout
203
- self.last_response = {}
204
- self.model = model
205
- self.system_prompt = system_prompt
206
- self.temperature = temperature
207
- self.presence_penalty = presence_penalty
208
- self.frequency_penalty = frequency_penalty
209
- self.top_p = top_p
210
- self.headers = {
211
- "authority": "chat.typegpt.net",
212
- "accept": "application/json, text/event-stream",
213
- "accept-language": "en-US,en;q=0.9",
214
- "content-type": "application/json",
215
- "origin": "https://chat.typegpt.net",
216
- "referer": "https://chat.typegpt.net/",
217
- "user-agent": LitAgent().random()
218
- }
219
-
220
-
221
- self.__available_optimizers = (
222
- method
223
- for method in dir(Optimizers)
224
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
225
- )
226
- Conversation.intro = (
227
- AwesomePrompts().get_act(
228
- act, raise_not_found=True, default=None, case_insensitive=True
229
- )
230
- if act
231
- else intro or Conversation.intro
232
- )
233
- self.conversation = Conversation(
234
- is_conversation, self.max_tokens_to_sample, filepath, update_file
235
- )
236
- self.conversation.history_offset = history_offset
237
- self.session.proxies = proxies
238
-
239
- def ask(
240
- self,
241
- prompt: str,
242
- stream: bool = False,
243
- raw: bool = False,
244
- optimizer: str = None,
245
- conversationally: bool = False,
246
- ) -> Dict[str, Any] | Generator:
247
- """Sends a prompt to the TypeGPT.net API and returns the response."""
248
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
249
- if optimizer:
250
- if optimizer in self.__available_optimizers:
251
- conversation_prompt = getattr(Optimizers, optimizer)(
252
- conversation_prompt if conversationally else prompt
253
- )
254
- else:
255
- raise exceptions.FailedToGenerateResponseError(
256
- f"Optimizer is not one of {self.__available_optimizers}"
257
- )
258
-
259
-
260
- payload = {
261
- "messages": [
262
- {"role": "system", "content": self.system_prompt},
263
- {"role": "user", "content": conversation_prompt}
264
- ],
265
- "stream": stream,
266
- "model": self.model,
267
- "temperature": self.temperature,
268
- "presence_penalty": self.presence_penalty,
269
- "frequency_penalty": self.frequency_penalty,
270
- "top_p": self.top_p,
271
- "max_tokens": self.max_tokens_to_sample,
272
- }
273
- def for_stream():
274
- response = self.session.post(
275
- self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
276
- )
277
- if not response.ok:
278
- raise exceptions.FailedToGenerateResponseError(
279
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
280
- )
281
- message_load = ""
282
- for line in response.iter_lines():
283
- if line:
284
- line = line.decode("utf-8")
285
- if line.startswith("data: "):
286
- line = line[6:] # Remove "data: " prefix
287
- # Skip [DONE] message
288
- if line.strip() == "[DONE]":
289
- break
290
-
291
- try:
292
- data = json.loads(line)
293
-
294
- # Extract and yield only new content
295
- if 'choices' in data and len(data['choices']) > 0:
296
- delta = data['choices'][0].get('delta', {})
297
- if 'content' in delta:
298
- new_content = delta['content']
299
- message_load += new_content
300
- # Yield only the new content
301
- yield dict(text=new_content) if not raw else new_content
302
- self.last_response = dict(text=message_load)
303
-
304
- except json.JSONDecodeError:
305
- continue
306
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
307
-
308
- def for_non_stream():
309
-
310
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload)
311
- if not response.ok:
312
- raise exceptions.FailedToGenerateResponseError(
313
- f"Request failed - {response.status_code}: {response.text}"
314
- )
315
- self.last_response = response.json()
316
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
317
- return self.last_response
318
-
319
-
320
- return for_stream() if stream else for_non_stream()
321
-
322
-
323
- def chat(
324
- self,
325
- prompt: str,
326
- stream: bool = False,
327
- optimizer: str = None,
328
- conversationally: bool = False,
329
- ) -> str | Generator[str, None, None]:
330
- """Generate response `str` or stream."""
331
-
332
- if stream:
333
- gen = self.ask(
334
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
335
- )
336
- for chunk in gen:
337
- yield self.get_message(chunk) # Extract text from streamed chunks
338
- else:
339
- return self.get_message(self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally))
340
-
341
- def get_message(self, response: Dict[str, Any]) -> str:
342
- """Retrieves message from response."""
343
- if isinstance(response, str): #Handle raw responses
344
- return response
345
- elif isinstance(response, dict):
346
- assert isinstance(response, dict), "Response should be of dict data-type only"
347
- return response.get("text", "") #Extract text from dictionary response
348
- else:
349
- raise TypeError("Invalid response type. Expected str or dict.")
350
-
351
-
352
- if __name__ == "__main__":
353
-
354
- ai = TypeGPT(model="claude-3-5-sonnet-20240620")
355
- response = ai.chat("hi", stream=True)
356
- for chunks in response:
1
+ import requests
2
+ import json
3
+ from typing import *
4
+ import requests.exceptions
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class TypeGPT(Provider):
14
+ """
15
+ A class to interact with the TypeGPT.net API. Improved to match webscout standards.
16
+ """
17
+ url = "https://chat.typegpt.net"
18
+ working = True
19
+ supports_message_history = True
20
+
21
+ models = [
22
+ # OpenAI Models
23
+ "gpt-3.5-turbo",
24
+ "chatgpt-4o-latest",
25
+ "gpt-3.5-turbo-202201",
26
+ "gpt-4o",
27
+ "gpt-4o-2024-05-13",
28
+ "o1-preview",
29
+
30
+ # Claude Models
31
+ "claude",
32
+ "claude-3-5-sonnet",
33
+ "claude-sonnet-3.5",
34
+ "claude-3-5-sonnet-20240620",
35
+
36
+ # Meta/LLaMA Models
37
+ "@cf/meta/llama-2-7b-chat-fp16",
38
+ "@cf/meta/llama-2-7b-chat-int8",
39
+ "@cf/meta/llama-3-8b-instruct",
40
+ "@cf/meta/llama-3.1-8b-instruct",
41
+ "@cf/meta-llama/llama-2-7b-chat-hf-lora",
42
+ "llama-3.1-405b",
43
+ "llama-3.1-70b",
44
+ "llama-3.1-8b",
45
+ "meta-llama/Llama-2-7b-chat-hf",
46
+ "meta-llama/Llama-3.1-70B-Instruct",
47
+ "meta-llama/Llama-3.1-8B-Instruct",
48
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
49
+ "meta-llama/Llama-3.2-1B-Instruct",
50
+ "meta-llama/Llama-3.2-3B-Instruct",
51
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
52
+ "meta-llama/Llama-Guard-3-8B",
53
+ "meta-llama/Meta-Llama-3-70B-Instruct",
54
+ "meta-llama/Meta-Llama-3-8B-Instruct",
55
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
56
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
57
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
58
+
59
+ # Mistral Models
60
+ "mistral",
61
+ "mistral-large",
62
+ "@cf/mistral/mistral-7b-instruct-v0.1",
63
+ "@cf/mistral/mistral-7b-instruct-v0.2-lora",
64
+ "@hf/mistralai/mistral-7b-instruct-v0.2",
65
+ "mistralai/Mistral-7B-Instruct-v0.2",
66
+ "mistralai/Mistral-7B-Instruct-v0.3",
67
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
68
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
69
+
70
+ # Qwen Models
71
+ "@cf/qwen/qwen1.5-0.5b-chat",
72
+ "@cf/qwen/qwen1.5-1.8b-chat",
73
+ "@cf/qwen/qwen1.5-7b-chat-awq",
74
+ "@cf/qwen/qwen1.5-14b-chat-awq",
75
+ "Qwen/Qwen2.5-3B-Instruct",
76
+ "Qwen/Qwen2.5-72B-Instruct",
77
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
78
+
79
+ # Google/Gemini Models
80
+ "@cf/google/gemma-2b-it-lora",
81
+ "@cf/google/gemma-7b-it-lora",
82
+ "@hf/google/gemma-7b-it",
83
+ "google/gemma-1.1-2b-it",
84
+ "google/gemma-1.1-7b-it",
85
+ "gemini-pro",
86
+ "gemini-1.5-pro",
87
+ "gemini-1.5-pro-latest",
88
+ "gemini-1.5-flash",
89
+
90
+ # Cohere Models
91
+ "c4ai-aya-23-35b",
92
+ "c4ai-aya-23-8b",
93
+ "command",
94
+ "command-light",
95
+ "command-light-nightly",
96
+ "command-nightly",
97
+ "command-r",
98
+ "command-r-08-2024",
99
+ "command-r-plus",
100
+ "command-r-plus-08-2024",
101
+ "rerank-english-v2.0",
102
+ "rerank-english-v3.0",
103
+ "rerank-multilingual-v2.0",
104
+ "rerank-multilingual-v3.0",
105
+
106
+ # Microsoft Models
107
+ "@cf/microsoft/phi-2",
108
+ "microsoft/DialoGPT-medium",
109
+ "microsoft/Phi-3-medium-4k-instruct",
110
+ "microsoft/Phi-3-mini-4k-instruct",
111
+ "microsoft/Phi-3.5-mini-instruct",
112
+ "microsoft/WizardLM-2-8x22B",
113
+
114
+ # Yi Models
115
+ "01-ai/Yi-1.5-34B-Chat",
116
+ "01-ai/Yi-34B-Chat",
117
+
118
+ # Specialized Models and Tools
119
+ "@cf/deepseek-ai/deepseek-math-7b-base",
120
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
121
+ "@cf/defog/sqlcoder-7b-2",
122
+ "@cf/openchat/openchat-3.5-0106",
123
+ "@cf/thebloke/discolm-german-7b-v1-awq",
124
+ "@cf/tiiuae/falcon-7b-instruct",
125
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
126
+ "@hf/nexusflow/starling-lm-7b-beta",
127
+ "@hf/nousresearch/hermes-2-pro-mistral-7b",
128
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq",
129
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
130
+ "@hf/thebloke/llama-2-13b-chat-awq",
131
+ "@hf/thebloke/llamaguard-7b-awq",
132
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
133
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
134
+ "@hf/thebloke/zephyr-7b-beta-awq",
135
+ "AndroidDeveloper",
136
+ "AngularJSAgent",
137
+ "AzureAgent",
138
+ "BitbucketAgent",
139
+ "DigitalOceanAgent",
140
+ "DockerAgent",
141
+ "ElectronAgent",
142
+ "ErlangAgent",
143
+ "FastAPIAgent",
144
+ "FirebaseAgent",
145
+ "FlaskAgent",
146
+ "FlutterAgent",
147
+ "GitAgent",
148
+ "GitlabAgent",
149
+ "GoAgent",
150
+ "GodotAgent",
151
+ "GoogleCloudAgent",
152
+ "HTMLAgent",
153
+ "HerokuAgent",
154
+ "ImageGeneration",
155
+ "JavaAgent",
156
+ "JavaScriptAgent",
157
+ "MongoDBAgent",
158
+ "Next.jsAgent",
159
+ "PyTorchAgent",
160
+ "PythonAgent",
161
+ "ReactAgent",
162
+ "RepoMap",
163
+ "SwiftDeveloper",
164
+ "XcodeAgent",
165
+ "YoutubeAgent",
166
+ "blackboxai",
167
+ "blackboxai-pro",
168
+ "builderAgent",
169
+ "dify",
170
+ "flux",
171
+ "openchat/openchat-3.6-8b",
172
+ "rtist",
173
+ "searchgpt",
174
+ "sur",
175
+ "sur-mistral",
176
+ "unity"
177
+ ]
178
+
179
+ def __init__(
180
+ self,
181
+ is_conversation: bool = True,
182
+ max_tokens: int = 4000, # Set a reasonable default
183
+ timeout: int = 30,
184
+ intro: str = None,
185
+ filepath: str = None,
186
+ update_file: bool = True,
187
+ proxies: dict = {},
188
+ history_offset: int = 10250,
189
+ act: str = None,
190
+ model: str = "gpt-4o",
191
+ system_prompt: str = "You are a helpful assistant.",
192
+ temperature: float = 0.5,
193
+ presence_penalty: int = 0,
194
+ frequency_penalty: int = 0,
195
+ top_p: float = 1,
196
+ ):
197
+ """Initializes the TypeGPT API client."""
198
+ if model not in self.models:
199
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.models)}")
200
+
201
+ self.session = requests.Session()
202
+ self.is_conversation = is_conversation
203
+ self.max_tokens_to_sample = max_tokens
204
+ self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
205
+ self.timeout = timeout
206
+ self.last_response = {}
207
+ self.model = model
208
+ self.system_prompt = system_prompt
209
+ self.temperature = temperature
210
+ self.presence_penalty = presence_penalty
211
+ self.frequency_penalty = frequency_penalty
212
+ self.top_p = top_p
213
+ self.headers = {
214
+ "authority": "chat.typegpt.net",
215
+ "accept": "application/json, text/event-stream",
216
+ "accept-language": "en-US,en;q=0.9",
217
+ "content-type": "application/json",
218
+ "origin": "https://chat.typegpt.net",
219
+ "referer": "https://chat.typegpt.net/",
220
+ "user-agent": LitAgent().random()
221
+ }
222
+
223
+ self.__available_optimizers = (
224
+ method
225
+ for method in dir(Optimizers)
226
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
227
+ )
228
+ Conversation.intro = (
229
+ AwesomePrompts().get_act(
230
+ act, raise_not_found=True, default=None, case_insensitive=True
231
+ )
232
+ if act
233
+ else intro or Conversation.intro
234
+ )
235
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
236
+ self.conversation.history_offset = history_offset
237
+ self.session.proxies = proxies
238
+
239
+ def ask(
240
+ self,
241
+ prompt: str,
242
+ stream: bool = False,
243
+ raw: bool = False,
244
+ optimizer: str = None,
245
+ conversationally: bool = False,
246
+ ) -> Dict[str, Any] | Generator:
247
+ """Sends a prompt to the TypeGPT.net API and returns the response."""
248
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
249
+ if optimizer:
250
+ if optimizer in self.__available_optimizers:
251
+ conversation_prompt = getattr(Optimizers, optimizer)(
252
+ conversation_prompt if conversationally else prompt
253
+ )
254
+ else:
255
+ raise exceptions.FailedToGenerateResponseError(
256
+ f"Optimizer is not one of {self.__available_optimizers}"
257
+ )
258
+
259
+ payload = {
260
+ "messages": [
261
+ {"role": "system", "content": self.system_prompt},
262
+ {"role": "user", "content": conversation_prompt}
263
+ ],
264
+ "stream": stream,
265
+ "model": self.model,
266
+ "temperature": self.temperature,
267
+ "presence_penalty": self.presence_penalty,
268
+ "frequency_penalty": self.frequency_penalty,
269
+ "top_p": self.top_p,
270
+ "max_tokens": self.max_tokens_to_sample,
271
+ }
272
+
273
+ def for_stream():
274
+ try:
275
+ response = self.session.post(
276
+ self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
277
+ )
278
+ except requests.exceptions.ConnectionError as ce:
279
+ raise exceptions.FailedToGenerateResponseError(
280
+ f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
281
+ ) from ce
282
+
283
+ if not response.ok:
284
+ raise exceptions.FailedToGenerateResponseError(
285
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
286
+ )
287
+ message_load = ""
288
+ for line in response.iter_lines():
289
+ if line:
290
+ line = line.decode("utf-8")
291
+ if line.startswith("data: "):
292
+ line = line[6:] # Remove "data: " prefix
293
+ # Skip [DONE] message
294
+ if line.strip() == "[DONE]":
295
+ break
296
+ try:
297
+ data = json.loads(line)
298
+ # Extract and yield only new content
299
+ if 'choices' in data and len(data['choices']) > 0:
300
+ delta = data['choices'][0].get('delta', {})
301
+ if 'content' in delta:
302
+ new_content = delta['content']
303
+ message_load += new_content
304
+ # Yield only the new content
305
+ yield dict(text=new_content) if not raw else new_content
306
+ self.last_response = dict(text=message_load)
307
+ except json.JSONDecodeError:
308
+ continue
309
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
310
+
311
+ def for_non_stream():
312
+ try:
313
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
314
+ except requests.exceptions.ConnectionError as ce:
315
+ raise exceptions.FailedToGenerateResponseError(
316
+ f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
317
+ ) from ce
318
+
319
+ if not response.ok:
320
+ raise exceptions.FailedToGenerateResponseError(
321
+ f"Request failed - {response.status_code}: {response.text}"
322
+ )
323
+ self.last_response = response.json()
324
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
325
+ return self.last_response
326
+
327
+ return for_stream() if stream else for_non_stream()
328
+
329
+ def chat(
330
+ self,
331
+ prompt: str,
332
+ stream: bool = False,
333
+ optimizer: str = None,
334
+ conversationally: bool = False,
335
+ ) -> str | Generator[str, None, None]:
336
+ """Generate response string or stream."""
337
+ if stream:
338
+ gen = self.ask(
339
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
340
+ )
341
+ for chunk in gen:
342
+ yield self.get_message(chunk) # Extract text from streamed chunks
343
+ else:
344
+ return self.get_message(self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally))
345
+
346
+ def get_message(self, response: Dict[str, Any]) -> str:
347
+ """Retrieves message from response."""
348
+ if isinstance(response, str): # Handle raw responses
349
+ return response
350
+ elif isinstance(response, dict):
351
+ assert isinstance(response, dict), "Response should be of dict data-type only"
352
+ return response.get("text", "") # Extract text from dictionary response
353
+ else:
354
+ raise TypeError("Invalid response type. Expected str or dict.")
355
+
356
+ if __name__ == "__main__":
357
+ ai = TypeGPT(model="chatgpt-4o-latest")
358
+ response = ai.chat("hi", stream=True)
359
+ for chunks in response:
357
360
  print(chunks, end="", flush=True)