webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,137 +1,191 @@
1
- import requests
2
- import json
3
- from typing import Any, Dict, Optional, Generator
4
-
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
- from webscout import LitAgent as Lit
11
-
12
- class Marcus(Provider):
13
- """
14
- This class provides methods for interacting with the AskMarcus API.
15
- Improved to match webscout provider standards.
16
- """
17
-
18
- def __init__(
19
- self,
20
- is_conversation: bool = True,
21
- max_tokens: int = 2048, # Added max_tokens parameter
22
- timeout: int = 30,
23
- intro: str = None,
24
- filepath: str = None,
25
- update_file: bool = True,
26
- proxies: dict = {},
27
- history_offset: int = 10250,
28
- act: str = None,
29
- ):
30
- """Initializes the Marcus API."""
31
- self.session = requests.Session()
32
- self.is_conversation = is_conversation
33
- self.max_tokens_to_sample = max_tokens
34
- self.api_endpoint = "https://www.askmarcus.app/api/response"
35
- self.timeout = timeout
36
- self.last_response = {}
37
- self.headers = {
38
- 'content-type': 'application/json',
39
- 'accept': '*/*',
40
- 'origin': 'https://www.askmarcus.app',
41
- 'referer': 'https://www.askmarcus.app/chat',
42
- 'user-agent': Lit().random(),
43
- }
44
- self.__available_optimizers = (
45
- method
46
- for method in dir(Optimizers)
47
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
48
- )
49
- Conversation.intro = (
50
- AwesomePrompts().get_act(
51
- act, raise_not_found=True, default=None, case_insensitive=True
52
- )
53
- if act
54
- else intro or Conversation.intro
55
- )
56
- self.conversation = Conversation(
57
- is_conversation, self.max_tokens_to_sample, filepath, update_file
58
- )
59
- self.conversation.history_offset = history_offset
60
- self.session.proxies = proxies
61
-
62
- def ask(
63
- self,
64
- prompt: str,
65
- stream: bool = False,
66
- raw: bool = False,
67
- optimizer: str = None,
68
- conversationally: bool = False,
69
- ) -> Dict[str, Any] | Generator[str, None, None]:
70
- """Sends a prompt to the AskMarcus API and returns the response."""
71
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
72
- if optimizer:
73
- if optimizer in self.__available_optimizers:
74
- conversation_prompt = getattr(Optimizers, optimizer)(
75
- conversation_prompt if conversationally else prompt
76
- )
77
- else:
78
- raise exceptions.FailedToGenerateResponseError(
79
- f"Optimizer is not one of {self.__available_optimizers}"
80
- )
81
-
82
- data = {"message": conversation_prompt}
83
-
84
- def for_stream():
85
- try:
86
- with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
87
- response.raise_for_status()
88
- for line in response.iter_lines():
89
- if line:
90
- yield line.decode('utf-8')
91
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
92
-
93
- except requests.exceptions.RequestException as e:
94
- raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
95
-
96
- def for_non_stream():
97
- full_response = ""
98
- for line in for_stream():
99
- full_response += line
100
- self.last_response = {"text": full_response}
101
- return self.last_response
102
-
103
- return for_stream() if stream else for_non_stream()
104
-
105
- def chat(
106
- self,
107
- prompt: str,
108
- stream: bool = False,
109
- optimizer: str = None,
110
- conversationally: bool = False,
111
- ) -> str | Generator[str, None, None]:
112
- """Generates a response from the AskMarcus API."""
113
-
114
- def for_stream():
115
- for response_chunk in self.ask(
116
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
117
- ):
118
- yield response_chunk
119
-
120
- def for_non_stream():
121
- response = self.ask(
122
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
123
- )
124
- return self.get_message(response)
125
-
126
- return for_stream() if stream else for_non_stream()
127
-
128
- def get_message(self, response: Dict[str, Any]) -> str:
129
- """Extracts the message from the API response."""
130
- assert isinstance(response, dict), "Response should be of dict data-type only"
131
- return response.get("text", "")
132
-
133
- if __name__ == '__main__':
134
- ai = Marcus(timeout=30)
135
- response = ai.chat("Tell me about India", stream=True)
136
- for chunk in response:
137
- print(chunk, end="", flush=True)
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.Litlogger import Logger, LogFormat
11
+ from webscout import LitAgent as Lit
12
+
13
+ class Marcus(Provider):
14
+ """
15
+ This class provides methods for interacting with the AskMarcus API.
16
+ Improved to match webscout provider standards with comprehensive logging.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2048,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ logging: bool = False
31
+ ):
32
+ """Initializes the Marcus API with logging capabilities."""
33
+ self.logger = Logger(
34
+ name="Marcus",
35
+ format=LogFormat.MODERN_EMOJI,
36
+ ) if logging else None
37
+
38
+ if self.logger:
39
+ self.logger.info("Initializing Marcus API")
40
+
41
+ self.session = requests.Session()
42
+ self.is_conversation = is_conversation
43
+ self.max_tokens_to_sample = max_tokens
44
+ self.api_endpoint = "https://www.askmarcus.app/api/response"
45
+ self.timeout = timeout
46
+ self.last_response = {}
47
+
48
+ self.headers = {
49
+ 'content-type': 'application/json',
50
+ 'accept': '*/*',
51
+ 'origin': 'https://www.askmarcus.app',
52
+ 'referer': 'https://www.askmarcus.app/chat',
53
+ 'user-agent': Lit().random(),
54
+ }
55
+
56
+ self.__available_optimizers = (
57
+ method
58
+ for method in dir(Optimizers)
59
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
60
+ )
61
+
62
+ Conversation.intro = (
63
+ AwesomePrompts().get_act(
64
+ act, raise_not_found=True, default=None, case_insensitive=True
65
+ )
66
+ if act
67
+ else intro or Conversation.intro
68
+ )
69
+
70
+ self.conversation = Conversation(
71
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
72
+ )
73
+ self.conversation.history_offset = history_offset
74
+ self.session.proxies = proxies
75
+
76
+ if self.logger:
77
+ self.logger.info("Marcus API initialized successfully")
78
+
79
+ def ask(
80
+ self,
81
+ prompt: str,
82
+ stream: bool = False,
83
+ raw: bool = False,
84
+ optimizer: str = None,
85
+ conversationally: bool = False,
86
+ ) -> Dict[str, Any] | Generator[str, None, None]:
87
+ """Sends a prompt to the AskMarcus API and returns the response with logging."""
88
+ if self.logger:
89
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
90
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
91
+
92
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
93
+ if optimizer:
94
+ if optimizer in self.__available_optimizers:
95
+ conversation_prompt = getattr(Optimizers, optimizer)(
96
+ conversation_prompt if conversationally else prompt
97
+ )
98
+ if self.logger:
99
+ self.logger.debug(f"Applied optimizer: {optimizer}")
100
+ else:
101
+ if self.logger:
102
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
103
+ raise exceptions.FailedToGenerateResponseError(
104
+ f"Optimizer is not one of {self.__available_optimizers}"
105
+ )
106
+
107
+ data = {"message": conversation_prompt}
108
+
109
+ def for_stream():
110
+ try:
111
+ if self.logger:
112
+ self.logger.debug("Initiating streaming request to API")
113
+
114
+ with requests.post(
115
+ self.api_endpoint,
116
+ headers=self.headers,
117
+ json=data,
118
+ stream=True,
119
+ timeout=self.timeout
120
+ ) as response:
121
+ response.raise_for_status()
122
+
123
+ if self.logger:
124
+ self.logger.info(f"API connection established successfully. Status: {response.status_code}")
125
+
126
+ for line in response.iter_lines():
127
+ if line:
128
+ yield line.decode('utf-8')
129
+
130
+ self.conversation.update_chat_history(
131
+ prompt, self.get_message(self.last_response)
132
+ )
133
+
134
+ except requests.exceptions.RequestException as e:
135
+ if self.logger:
136
+ self.logger.error(f"API request failed: {str(e)}")
137
+ raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
138
+
139
+ def for_non_stream():
140
+ if self.logger:
141
+ self.logger.debug("Processing non-streaming request")
142
+
143
+ full_response = ""
144
+ for line in for_stream():
145
+ full_response += line
146
+ self.last_response = {"text": full_response}
147
+
148
+ if self.logger:
149
+ self.logger.debug("Response processing completed")
150
+
151
+ return self.last_response
152
+
153
+ return for_stream() if stream else for_non_stream()
154
+
155
+ def chat(
156
+ self,
157
+ prompt: str,
158
+ stream: bool = False,
159
+ optimizer: str = None,
160
+ conversationally: bool = False,
161
+ ) -> str | Generator[str, None, None]:
162
+ """Generates a response from the AskMarcus API with logging."""
163
+ if self.logger:
164
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
165
+
166
+ def for_stream():
167
+ for response_chunk in self.ask(
168
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
169
+ ):
170
+ yield response_chunk
171
+
172
+ def for_non_stream():
173
+ response = self.ask(
174
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
175
+ )
176
+ return self.get_message(response)
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def get_message(self, response: Dict[str, Any]) -> str:
181
+ """Extracts the message from the API response."""
182
+ assert isinstance(response, dict), "Response should be of dict data-type only"
183
+ return response.get("text", "")
184
+
185
+ if __name__ == "__main__":
186
+ from rich import print
187
+ # Enable logging for testing
188
+ ai = Marcus(logging=True)
189
+ response = ai.chat(input(">>> "), stream=True)
190
+ for chunk in response:
191
+ print(chunk, end="", flush=True)
@@ -2,41 +2,39 @@ import time
2
2
  import uuid
3
3
  import requests
4
4
  import json
5
-
6
5
  from typing import Any, Dict, Optional, Generator, Union
7
6
  from dataclasses import dataclass, asdict
8
7
  from datetime import date
9
-
10
8
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
11
9
  from webscout.AIbase import Provider
12
10
  from webscout import exceptions
13
- from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
11
+ from webscout.Litlogger import Logger, LogFormat
14
12
  from webscout.litagent import LitAgent
15
13
 
16
-
17
14
  class Netwrck(Provider):
18
15
  """
19
16
  A class to interact with the Netwrck.com API. Supports streaming.
20
17
  """
21
- greeting = """An unknown multiverse phenomenon occurred, and you found yourself in a dark space. You looked around and found a source of light in a distance. You approached the light and *whoosh*....\nChoose your origin:\na) As a baby who just got birthed, your fate unknown\nb) As an amnesic stranded on an uninhabited island with mysterious ruins\nc) As an abandoned product of a forbidden experiment\nd) As a slave being sold at an auction\ne) Extremely Chaotic Randomizer\nOr, dive into your own fantasy."""
18
+ greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
22
19
 
23
20
  AVAILABLE_MODELS = {
24
- "lumimaid": "neversleep/llama-3.1-lumimaid-8b",
21
+ "lumimaid": "neversleep/llama-3-lumimaid-8b:extended",
25
22
  "grok": "x-ai/grok-2",
26
23
  "claude": "anthropic/claude-3.5-sonnet:beta",
27
24
  "euryale": "sao10k/l3-euryale-70b",
28
25
  "gpt4mini": "openai/gpt-4o-mini",
29
26
  "mythomax": "gryphe/mythomax-l2-13b",
30
27
  "gemini": "google/gemini-pro-1.5",
31
- "lumimaid70b": "neversleep/llama-3.1-lumimaid-70b",
32
28
  "nemotron": "nvidia/llama-3.1-nemotron-70b-instruct",
29
+ "deepseek-r1": "deepseek/deepseek-r1",
30
+ "deepseek": "deepseek/deepseek-chat",
33
31
  }
34
32
 
35
33
  def __init__(
36
34
  self,
37
35
  model: str = "claude",
38
36
  is_conversation: bool = True,
39
- max_tokens: int = 2048,
37
+ max_tokens: int = 4096,
40
38
  timeout: int = 30,
41
39
  intro: Optional[str] = None,
42
40
  filepath: Optional[str] = None,
@@ -50,8 +48,18 @@ class Netwrck(Provider):
50
48
  logging: bool = False
51
49
  ):
52
50
  """Initializes the Netwrck API client."""
51
+ # Initialize logger first for initialization logging
52
+ self.logger = Logger(
53
+ name="Netwrck",
54
+ format=LogFormat.MODERN_EMOJI,
55
+
56
+ ) if logging else None
57
+
53
58
  if model not in self.AVAILABLE_MODELS:
54
- raise ValueError(f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}")
59
+ error_msg = f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}"
60
+ if self.logger:
61
+ self.logger.error(error_msg)
62
+ raise ValueError(error_msg)
55
63
 
56
64
  self.model = model
57
65
  self.model_name = self.AVAILABLE_MODELS[model]
@@ -64,9 +72,7 @@ class Netwrck(Provider):
64
72
  self.temperature = temperature
65
73
  self.top_p = top_p
66
74
 
67
- # Initialize LitAgent for user agent generation
68
75
  self.agent = LitAgent()
69
-
70
76
  self.headers = {
71
77
  'authority': 'netwrck.com',
72
78
  'accept': '*/*',
@@ -76,6 +82,7 @@ class Netwrck(Provider):
76
82
  'referer': 'https://netwrck.com/',
77
83
  'user-agent': self.agent.random()
78
84
  }
85
+
79
86
  self.session.headers.update(self.headers)
80
87
  self.proxies = proxies or {}
81
88
 
@@ -84,16 +91,16 @@ class Netwrck(Provider):
84
91
  if act
85
92
  else intro or Conversation.intro
86
93
  )
94
+
87
95
  self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
88
96
  self.conversation.history_offset = history_offset
89
97
  self.__available_optimizers = (
90
- method
91
- for method in dir(Optimizers)
98
+ method for method in dir(Optimizers)
92
99
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
93
100
  )
94
101
 
95
- # Initialize logger
96
- self.logger = LitLogger(name="Netwrck", format=LogFormat.MODERN_EMOJI, color_scheme=ColorScheme.CYBERPUNK) if logging else None
102
+ if self.logger:
103
+ self.logger.info(f"Initialized Netwrck with model: {self.model_name}")
97
104
 
98
105
  def ask(
99
106
  self,
@@ -104,22 +111,20 @@ class Netwrck(Provider):
104
111
  conversationally: bool = False,
105
112
  ) -> Union[Dict[str, Any], Generator]:
106
113
  """Sends a prompt to the Netwrck API and returns the response."""
107
-
108
- if self.logger:
109
- self.logger.debug(f"ask() called with prompt: {prompt}")
114
+ if optimizer and optimizer not in self.__available_optimizers:
115
+ error_msg = f"Optimizer is not one of {self.__available_optimizers}"
116
+ if self.logger:
117
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
118
+ raise exceptions.FailedToGenerateResponseError(error_msg)
110
119
 
111
120
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
121
  if optimizer:
113
- if optimizer in self.__available_optimizers:
114
- conversation_prompt = getattr(Optimizers, optimizer)(
115
- conversation_prompt if conversationally else prompt
116
- )
117
- else:
118
- if self.logger:
119
- self.logger.error(f"Invalid optimizer: {optimizer}")
120
- raise exceptions.FailedToGenerateResponseError(
121
- f"Optimizer is not one of {self.__available_optimizers}"
122
- )
122
+ conversation_prompt = getattr(Optimizers, optimizer)(
123
+ conversation_prompt if conversationally else prompt
124
+ )
125
+ if self.logger:
126
+ self.logger.debug(f"Applied optimizer: {optimizer}")
127
+
123
128
  payload = {
124
129
  "query": prompt,
125
130
  "context": self.system_prompt,
@@ -128,6 +133,9 @@ class Netwrck(Provider):
128
133
  "greeting": self.greeting
129
134
  }
130
135
 
136
+ if self.logger:
137
+ self.logger.debug(f"Sending request to Netwrck API [stream={stream}]")
138
+
131
139
  def for_stream():
132
140
  try:
133
141
  response = self.session.post(
@@ -140,26 +148,23 @@ class Netwrck(Provider):
140
148
  )
141
149
  response.raise_for_status()
142
150
 
143
- # Initialize an empty string to accumulate the streaming text
144
151
  streaming_text = ""
145
152
  for line in response.iter_lines():
146
153
  if line:
147
154
  decoded_line = line.decode('utf-8').strip('"')
148
- streaming_text += decoded_line # Accumulate the text
149
- yield {"text": decoded_line} # Yield each chunk
155
+ streaming_text += decoded_line
156
+ yield {"text": decoded_line}
150
157
 
151
- # Optionally, you can update the conversation history with the full streaming text
152
158
  self.conversation.update_chat_history(payload["query"], streaming_text)
153
159
 
154
- except Exception as e:
160
+ except requests.exceptions.RequestException as e:
155
161
  if self.logger:
156
- self.logger.error(f"Error communicating with Netwrck: {e}")
157
- raise exceptions.ProviderConnectionError(f"Error communicating with Netwrck: {e}") from e
158
-
162
+ self.logger.error(f"Network error: {str(e)}")
163
+ raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
159
164
  except Exception as e:
160
165
  if self.logger:
161
- self.logger.error(f"Error communicating with Netwrck: {e}")
162
- raise exceptions.ProviderConnectionError(f"Error communicating with Netwrck: {e}") from e
166
+ self.logger.error(f"Unexpected error: {str(e)}")
167
+ raise exceptions.ProviderConnectionError(f"Unexpected error: {str(e)}") from e
163
168
 
164
169
  def for_non_stream():
165
170
  try:
@@ -171,16 +176,24 @@ class Netwrck(Provider):
171
176
  timeout=self.timeout,
172
177
  )
173
178
  response.raise_for_status()
174
- # print(response.text)
179
+
180
+ if self.logger:
181
+ self.logger.debug(f"Response status: {response.status_code}")
182
+
175
183
  text = response.text.strip('"')
176
184
  self.last_response = {"text": text}
177
185
  self.conversation.update_chat_history(prompt, text)
178
186
 
179
187
  return self.last_response
188
+
189
+ except requests.exceptions.RequestException as e:
190
+ if self.logger:
191
+ self.logger.error(f"Network error: {str(e)}")
192
+ raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
180
193
  except Exception as e:
181
194
  if self.logger:
182
- self.logger.error(f"Error communicating with Netwrck: {e}")
183
- raise exceptions.ProviderConnectionError(f"Error communicating with Netwrck: {e}") from e
195
+ self.logger.error(f"Unexpected error: {str(e)}")
196
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
184
197
 
185
198
  return for_stream() if stream else for_non_stream()
186
199
 
@@ -193,7 +206,7 @@ class Netwrck(Provider):
193
206
  ) -> str:
194
207
  """Generates a response from the Netwrck API."""
195
208
  if self.logger:
196
- self.logger.debug(f"chat() called with prompt: {prompt}")
209
+ self.logger.debug(f"Processing chat request [stream={stream}]")
197
210
 
198
211
  def for_stream():
199
212
  for response in self.ask(
@@ -219,21 +232,20 @@ class Netwrck(Provider):
219
232
  def get_message(self, response: Dict[str, Any]) -> str:
220
233
  """Retrieves message only from response"""
221
234
  assert isinstance(response, dict), "Response should be of dict data-type only"
222
- return response["text"]
235
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
223
236
 
224
- # Example Usage:
225
237
  if __name__ == "__main__":
226
238
  from rich import print
227
239
 
228
- # Non-streaming example
240
+ # Example with logging enabled
241
+ netwrck = Netwrck(model="claude", logging=False)
242
+
229
243
  print("Non-Streaming Response:")
230
- netwrck = Netwrck(model="claude", logging=True)
231
- response = netwrck.chat("tell me about Russia")
244
+ response = netwrck.chat("Tell me about Russia")
232
245
  print(response)
233
246
 
234
- # Streaming example
235
247
  print("\nStreaming Response:")
236
- response = netwrck.chat("tell me about India", stream=True)
248
+ response = netwrck.chat("Tell me about India", stream=True)
237
249
  for chunk in response:
238
250
  print(chunk, end="", flush=True)
239
- print() # Add a newline at the end
251
+ print()