webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -8,33 +8,42 @@ from webscout.AIbase import Provider, AsyncProvider
8
8
  from webscout import exceptions
9
9
  from typing import Any, AsyncGenerator, Dict
10
10
  import cloudscraper
11
+ from webscout import LitAgent
12
+ from webscout.Litlogger import Logger, LogFormat
11
13
 
12
14
  class Cloudflare(Provider):
15
+ """
16
+ Cloudflare provider to interact with Cloudflare's text generation API.
17
+ Includes logging capabilities using Logger and uses LitAgent for user-agent.
18
+ """
13
19
 
20
+ # Updated AVAILABLE_MODELS from given JSON data
14
21
  AVAILABLE_MODELS = [
15
- "@cf/llava-hf/llava-1.5-7b-hf",
16
- "@cf/unum/uform-gen2-qwen-500m",
17
- "@cf/facebook/detr-resnet-50",
18
- "@cf/facebook/bart-large-cnn",
19
22
  "@hf/thebloke/deepseek-coder-6.7b-base-awq",
20
23
  "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
21
- "@cf/deepseek-ai/deepseek-math-7b-base",
22
24
  "@cf/deepseek-ai/deepseek-math-7b-instruct",
25
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
23
26
  "@cf/thebloke/discolm-german-7b-v1-awq",
24
27
  "@cf/tiiuae/falcon-7b-instruct",
25
- "@cf/google/gemma-2b-it-lora",
26
28
  "@hf/google/gemma-7b-it",
27
- "@cf/google/gemma-7b-it-lora",
28
29
  "@hf/nousresearch/hermes-2-pro-mistral-7b",
29
30
  "@hf/thebloke/llama-2-13b-chat-awq",
30
- "@cf/meta-llama/llama-2-7b-chat-hf-lora",
31
+ "@cf/meta/llama-2-7b-chat-fp16",
32
+ "@cf/meta/llama-2-7b-chat-int8",
31
33
  "@cf/meta/llama-3-8b-instruct",
32
34
  "@cf/meta/llama-3-8b-instruct-awq",
33
35
  "@cf/meta/llama-3.1-8b-instruct",
36
+ "@cf/meta/llama-3.1-8b-instruct-awq",
37
+ "@cf/meta/llama-3.1-8b-instruct-fp8",
38
+ "@cf/meta/llama-3.2-11b-vision-instruct",
39
+ "@cf/meta/llama-3.2-1b-instruct",
40
+ "@cf/meta/llama-3.2-3b-instruct",
41
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
34
42
  "@hf/thebloke/llamaguard-7b-awq",
43
+ "@hf/meta-llama/meta-llama-3-8b-instruct",
44
+ "@cf/mistral/mistral-7b-instruct-v0.1",
35
45
  "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
36
46
  "@hf/mistral/mistral-7b-instruct-v0.2",
37
- "@cf/mistral/mistral-7b-instruct-v0.2-lora",
38
47
  "@hf/thebloke/neural-chat-7b-v3-1-awq",
39
48
  "@cf/openchat/openchat-3.5-0106",
40
49
  "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
@@ -61,26 +70,25 @@ class Cloudflare(Provider):
61
70
  proxies: dict = {},
62
71
  history_offset: int = 10250,
63
72
  act: str = None,
64
- model: str = "@cf/meta/llama-3.1-8b-instruct",
65
- system_prompt: str = "You are a helpful assistant."
73
+ model: str = "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
74
+ system_prompt: str = "You are a helpful assistant.",
75
+ logging: bool = False
66
76
  ):
67
- """Instantiates Cloudflare
77
+ """Instantiates Cloudflare Provider
68
78
 
69
79
  Args:
70
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
71
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
72
- timeout (int, optional): Http request timeout. Defaults to 30.
73
- intro (str, optional): Conversation introductory prompt. Defaults to None.
74
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
75
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
76
- proxies (dict, optional): Http request proxies. Defaults to {}.
77
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
78
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
79
- model (str, optional): Model to use for generating text.
80
- Defaults to "@cf/meta/llama-3.1-8b-instruct".
81
- Choose from AVAILABLE_MODELS.
82
- system_prompt (str, optional): System prompt for Cloudflare.
83
- Defaults to "You are a helpful assistant.".
80
+ is_conversation (bool, optional): Flag for conversational mode. Defaults to True.
81
+ max_tokens (int, optional): Max tokens to generate. Defaults to 600.
82
+ timeout (int, optional): HTTP request timeout. Defaults to 30.
83
+ intro (str, optional): Introductory prompt. Defaults to None.
84
+ filepath (str, optional): File path for conversation history. Defaults to None.
85
+ update_file (bool, optional): Update history file flag. Defaults to True.
86
+ proxies (dict, optional): Request proxies. Defaults to {}.
87
+ history_offset (int, optional): Chat history limit. Defaults to 10250.
88
+ act (str, optional): Awesome prompt key/index. Defaults to None.
89
+ model (str, optional): Model to use. Defaults to "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b".
90
+ system_prompt (str, optional): System prompt for conversation. Defaults to "You are a helpful assistant.".
91
+ logging (bool, optional): Enable logging if True. Defaults to False.
84
92
  """
85
93
  if model not in self.AVAILABLE_MODELS:
86
94
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
@@ -94,6 +102,7 @@ class Cloudflare(Provider):
94
102
  self.last_response = {}
95
103
  self.model = model
96
104
  self.system_prompt = system_prompt
105
+
97
106
  self.headers = {
98
107
  'Accept': 'text/event-stream',
99
108
  'Accept-Encoding': 'gzip, deflate, br, zstd',
@@ -108,7 +117,7 @@ class Cloudflare(Provider):
108
117
  'Sec-Fetch-Dest': 'empty',
109
118
  'Sec-Fetch-Mode': 'cors',
110
119
  'Sec-Fetch-Site': 'same-origin',
111
- 'User-Agent': webscout.LitAgent().random()
120
+ 'User-Agent': LitAgent().random()
112
121
  }
113
122
 
114
123
  self.cookies = {
@@ -122,21 +131,27 @@ class Cloudflare(Provider):
122
131
  for method in dir(Optimizers)
123
132
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
124
133
  )
125
- # FIX: Initialize the session here
126
- self.session = cloudscraper.create_scraper()
134
+
135
+ # Initialize session and apply proxies
136
+ self.session = cloudscraper.create_scraper()
127
137
  self.session.headers.update(self.headers)
138
+ self.session.proxies = proxies
139
+
128
140
  Conversation.intro = (
129
- AwesomePrompts().get_act(
130
- act, raise_not_found=True, default=None, case_insensitive=True
131
- )
132
- if act
133
- else intro or Conversation.intro
134
- )
135
- self.conversation = Conversation(
136
- is_conversation, self.max_tokens_to_sample, filepath, update_file
141
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
142
+ if act else intro or Conversation.intro
137
143
  )
144
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
138
145
  self.conversation.history_offset = history_offset
139
- self.session.proxies = proxies
146
+
147
+ # Initialize logger if logging is enabled
148
+ self.logger = Logger(
149
+ name="Cloudflare",
150
+ format=LogFormat.MODERN_EMOJI,
151
+ ) if logging else None
152
+
153
+ if self.logger:
154
+ self.logger.info("Cloudflare initialized successfully")
140
155
 
141
156
  def ask(
142
157
  self,
@@ -149,14 +164,13 @@ class Cloudflare(Provider):
149
164
  """Chat with AI
150
165
 
151
166
  Args:
152
- prompt (str): Prompt to be send.
167
+ prompt (str): Prompt to be sent.
153
168
  stream (bool, optional): Whether to stream the response. Defaults to False.
154
- raw (bool, optional): Whether to return the raw response. Defaults to False.
155
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
156
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
157
-
169
+ raw (bool, optional): Return raw response. Defaults to False.
170
+ optimizer (str, optional): Optimizer to use. Defaults to None.
171
+ conversationally (bool, optional): Conversational mode flag. Defaults to False.
158
172
  Returns:
159
- The response from the API.
173
+ dict: Response from the API.
160
174
  """
161
175
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
162
176
  if optimizer:
@@ -164,10 +178,12 @@ class Cloudflare(Provider):
164
178
  conversation_prompt = getattr(Optimizers, optimizer)(
165
179
  conversation_prompt if conversationally else prompt
166
180
  )
181
+ if self.logger:
182
+ self.logger.debug(f"Applied optimizer: {optimizer}")
167
183
  else:
168
- raise Exception(
169
- f"Optimizer is not one of {self.__available_optimizers}"
170
- )
184
+ if self.logger:
185
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
186
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
171
187
 
172
188
  payload = {
173
189
  "messages": [
@@ -181,11 +197,19 @@ class Cloudflare(Provider):
181
197
  }
182
198
 
183
199
  def for_stream():
200
+ if self.logger:
201
+ self.logger.debug("Sending streaming request to Cloudflare API...")
184
202
  response = self.scraper.post(
185
- self.chat_endpoint, headers=self.headers, cookies=self.cookies, data=json.dumps(payload), stream=True, timeout=self.timeout
203
+ self.chat_endpoint,
204
+ headers=self.headers,
205
+ cookies=self.cookies,
206
+ data=json.dumps(payload),
207
+ stream=True,
208
+ timeout=self.timeout
186
209
  )
187
-
188
210
  if not response.ok:
211
+ if self.logger:
212
+ self.logger.error(f"Request failed: ({response.status_code}, {response.reason})")
189
213
  raise exceptions.FailedToGenerateResponseError(
190
214
  f"Failed to generate response - ({response.status_code}, {response.reason})"
191
215
  )
@@ -197,9 +221,9 @@ class Cloudflare(Provider):
197
221
  streaming_response += content
198
222
  yield content if raw else dict(text=content)
199
223
  self.last_response.update(dict(text=streaming_response))
200
- self.conversation.update_chat_history(
201
- prompt, self.get_message(self.last_response)
202
- )
224
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
225
+ if self.logger:
226
+ self.logger.info("Streaming response completed successfully")
203
227
 
204
228
  def for_non_stream():
205
229
  for _ in for_stream():
@@ -215,48 +239,37 @@ class Cloudflare(Provider):
215
239
  optimizer: str = None,
216
240
  conversationally: bool = False,
217
241
  ) -> str:
218
- """Generate response `str`
242
+ """Generate response string from chat
243
+
219
244
  Args:
220
- prompt (str): Prompt to be send.
221
- stream (bool, optional): Flag for streaming response. Defaults to False.
222
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
223
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
245
+ prompt (str): Prompt to be sent.
246
+ stream (bool, optional): Stream response flag. Defaults to False.
247
+ optimizer (str, optional): Optimizer name. Defaults to None.
248
+ conversationally (bool, optional): Conversational mode flag. Defaults to False.
224
249
  Returns:
225
- str: Response generated
250
+ str: Generated response.
226
251
  """
227
-
228
252
  def for_stream():
229
- for response in self.ask(
230
- prompt, True, optimizer=optimizer, conversationally=conversationally
231
- ):
253
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
232
254
  yield self.get_message(response)
233
-
234
255
  def for_non_stream():
235
- return self.get_message(
236
- self.ask(
237
- prompt,
238
- False,
239
- optimizer=optimizer,
240
- conversationally=conversationally,
241
- )
242
- )
243
-
256
+ return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
244
257
  return for_stream() if stream else for_non_stream()
245
258
 
246
259
  def get_message(self, response: dict) -> str:
247
- """Retrieves message only from response
260
+ """Extracts the message text from the response
248
261
 
249
262
  Args:
250
- response (dict): Response generated by `self.ask`
251
-
263
+ response (dict): API response.
252
264
  Returns:
253
- str: Message extracted
265
+ str: Extracted text.
254
266
  """
255
267
  assert isinstance(response, dict), "Response should be of dict data-type only"
256
268
  return response["text"]
269
+
257
270
  if __name__ == '__main__':
258
271
  from rich import print
259
- ai = Cloudflare(timeout=5000)
272
+ ai = Cloudflare(timeout=5000, logging=True)
260
273
  response = ai.chat("write a poem about AI", stream=True)
261
274
  for chunk in response:
262
- print(chunk, end="", flush=True)
275
+ print(chunk, end="", flush=True)
@@ -0,0 +1,218 @@
1
+
2
+ import requests
3
+ import json
4
+ from typing import Any, Dict, Optional, Generator
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.Litlogger import Logger, LogFormat
11
+ from webscout import LitAgent as Lit
12
+
13
+ class DeepSeek(Provider):
14
+ """
15
+ A class to interact with the DeepSeek AI API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = {
19
+ "deepseek-v3": "deepseek-v3",
20
+ "deepseek-r1": "deepseek-r1",
21
+ "deepseek-llm-67b-chat": "deepseek-llm-67b-chat"
22
+ }
23
+
24
+ def __init__(
25
+ self,
26
+ is_conversation: bool = True,
27
+ max_tokens: int = 600,
28
+ timeout: int = 30,
29
+ intro: str = None,
30
+ filepath: str = None,
31
+ update_file: bool = True,
32
+ proxies: dict = {},
33
+ history_offset: int = 10250,
34
+ act: str = None,
35
+ model: str = "deepseek-r1", # Default model
36
+ system_prompt: str = "You are a helpful AI assistant.",
37
+ logging: bool = False
38
+ ):
39
+ """
40
+ Initializes the DeepSeek AI API with given parameters.
41
+ """
42
+ if model not in self.AVAILABLE_MODELS:
43
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS.keys()}")
44
+
45
+ # Initialize logging
46
+ self.logger = Logger(
47
+ name="DeepSeek",
48
+ format=LogFormat.MODERN_EMOJI,
49
+ ) if logging else None
50
+
51
+ if self.logger:
52
+ self.logger.info(f"Initializing DeepSeek with model: {model}")
53
+
54
+ self.session = requests.Session()
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.api_endpoint = "https://www.deepseekapp.io/v1/chat/completions"
58
+ self.timeout = timeout
59
+ self.last_response = {}
60
+ self.system_prompt = system_prompt
61
+ self.model = model
62
+ self.api_key = "skgadi_mare_2_seater"
63
+ self.headers = {
64
+ "Content-Type": "application/json",
65
+ "Authorization": f"Bearer {self.api_key}",
66
+ }
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+ self.session.headers.update(self.headers)
74
+ Conversation.intro = (
75
+ AwesomePrompts().get_act(
76
+ act, raise_not_found=True, default=None, case_insensitive=True
77
+ )
78
+ if act
79
+ else intro or Conversation.intro
80
+ )
81
+ self.conversation = Conversation(
82
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
83
+ )
84
+ self.conversation.history_offset = history_offset
85
+ self.session.proxies = proxies
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> Dict[str, Any]:
95
+ """Chat with AI"""
96
+ if self.logger:
97
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
98
+
99
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
100
+ if optimizer:
101
+ if optimizer in self.__available_optimizers:
102
+ conversation_prompt = getattr(Optimizers, optimizer)(
103
+ conversation_prompt if conversationally else prompt
104
+ )
105
+ if self.logger:
106
+ self.logger.debug(f"Applied optimizer: {optimizer}")
107
+ else:
108
+ if self.logger:
109
+ self.logger.error(f"Invalid optimizer: {optimizer}")
110
+ raise Exception(
111
+ f"Optimizer is not one of {self.__available_optimizers}"
112
+ )
113
+
114
+ messages = [
115
+ {"role": "system", "content": self.system_prompt},
116
+ {"role": "user", "content": conversation_prompt}
117
+ ]
118
+
119
+ payload = {
120
+ "model": self.model,
121
+ "messages": messages
122
+ }
123
+
124
+ def for_stream():
125
+ if self.logger:
126
+ self.logger.debug("Sending streaming request to DeepInfra API...")
127
+ try:
128
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
129
+ if response.status_code != 200:
130
+ if self.logger:
131
+ self.logger.error(f"Request failed with status code {response.status_code}")
132
+ raise exceptions.FailedToGenerateResponseError(
133
+ f"Request failed with status code {response.status_code}"
134
+ )
135
+
136
+ streaming_text = ""
137
+ for line in response.iter_lines(decode_unicode=True):
138
+ if line:
139
+ line = line.strip()
140
+ if line.startswith("data: "):
141
+ json_str = line[6:] # Remove "data: " prefix
142
+ if json_str == "[DONE]":
143
+ break
144
+ try:
145
+ json_data = json.loads(json_str)
146
+ if 'choices' in json_data:
147
+ choice = json_data['choices'][0]
148
+ if 'delta' in choice and 'content' in choice['delta']:
149
+ content = choice['delta']['content']
150
+ streaming_text += content
151
+ resp = {"text": content}
152
+ yield resp if raw else resp
153
+ except json.JSONDecodeError:
154
+ if self.logger:
155
+ self.logger.error("JSON decode error in streaming data")
156
+ continue
157
+
158
+ self.last_response = {"text": streaming_text}
159
+ self.conversation.update_chat_history(prompt, streaming_text)
160
+ if self.logger:
161
+ self.logger.info("Streaming response completed successfully")
162
+
163
+ except requests.RequestException as e:
164
+ if self.logger:
165
+ self.logger.error(f"Request failed: {e}")
166
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
167
+
168
+ def for_non_stream():
169
+ for _ in for_stream():
170
+ pass
171
+ return self.last_response
172
+
173
+ return for_stream() if stream else for_non_stream()
174
+
175
+ def chat(
176
+ self,
177
+ prompt: str,
178
+ stream: bool = False,
179
+ optimizer: str = None,
180
+ conversationally: bool = False,
181
+ ) -> str:
182
+ """Generate response string"""
183
+ def for_stream():
184
+ for response in self.ask(
185
+ prompt, True, optimizer=optimizer, conversationally=conversationally
186
+ ):
187
+ yield self.get_message(response)
188
+
189
+ def for_non_stream():
190
+ return self.get_message(
191
+ self.ask(
192
+ prompt,
193
+ False,
194
+ optimizer=optimizer,
195
+ conversationally=conversationally,
196
+ )
197
+ )
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def get_message(self, response: dict) -> str:
202
+ """Retrieves message only from response"""
203
+ assert isinstance(response, dict), "Response should be of dict data-type only"
204
+ return response["text"]
205
+
206
+ if __name__ == "__main__":
207
+ from rich import print
208
+
209
+ # Example usage
210
+ ai = DeepSeek(system_prompt="You are an expert AI assistant.", logging=True)
211
+
212
+ try:
213
+ # Send a prompt and stream the response
214
+ response = ai.chat("Write me a short poem about AI.", stream=True)
215
+ for chunk in response:
216
+ print(chunk, end="", flush=True)
217
+ except Exception as e:
218
+ print(f"Error: {e}")