webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -8,10 +8,12 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
+ from webscout import LitAgent
12
+ from webscout.Litlogger import Logger, LogFormat
11
13
 
12
14
  class DeepInfra(Provider):
13
15
  """
14
- A class to interact with the DeepInfra API.
16
+ A class to interact with the DeepInfra API with logging and LitAgent user-agent.
15
17
  """
16
18
 
17
19
  def __init__(
@@ -25,13 +27,29 @@ class DeepInfra(Provider):
25
27
  proxies: dict = {},
26
28
  history_offset: int = 10250,
27
29
  act: str = None,
28
- model: str = "Qwen/Qwen2.5-72B-Instruct",
30
+ model: str = "Qwen/Qwen2.5-72B-Instruct",
31
+ logging: bool = False
29
32
  ):
30
- """Initializes the DeepInfra API client."""
33
+ """Initializes the DeepInfra API client with logging support."""
31
34
  self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
35
+ # Use LitAgent for user-agent instead of hardcoded string.
32
36
  self.headers = {
33
- "Accept": "text/event-stream, application/json",
34
-
37
+ 'User-Agent': LitAgent().random(),
38
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
39
+ 'Cache-Control': 'no-cache',
40
+ 'Connection': 'keep-alive',
41
+ 'Content-Type': 'application/json',
42
+ 'Origin': 'https://deepinfra.com',
43
+ 'Pragma': 'no-cache',
44
+ 'Referer': 'https://deepinfra.com/',
45
+ 'Sec-Fetch-Dest': 'empty',
46
+ 'Sec-Fetch-Mode': 'cors',
47
+ 'Sec-Fetch-Site': 'same-site',
48
+ 'X-Deepinfra-Source': 'web-embed',
49
+ 'accept': 'text/event-stream',
50
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
51
+ 'sec-ch-ua-mobile': '?0',
52
+ 'sec-ch-ua-platform': '"macOS"'
35
53
  }
36
54
  self.session = requests.Session()
37
55
  self.session.headers.update(self.headers)
@@ -61,6 +79,16 @@ class DeepInfra(Provider):
61
79
  )
62
80
  self.conversation.history_offset = history_offset
63
81
 
82
+ # Initialize logger if enabled
83
+ self.logger = Logger(
84
+ name="DeepInfra",
85
+ format=LogFormat.MODERN_EMOJI,
86
+
87
+ ) if logging else None
88
+
89
+ if self.logger:
90
+ self.logger.info("DeepInfra initialized successfully")
91
+
64
92
  def ask(
65
93
  self,
66
94
  prompt: str,
@@ -69,14 +97,17 @@ class DeepInfra(Provider):
69
97
  optimizer: str = None,
70
98
  conversationally: bool = False,
71
99
  ) -> Union[Dict[str, Any], Generator]:
72
-
73
100
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
74
101
  if optimizer:
75
102
  if optimizer in self.__available_optimizers:
76
103
  conversation_prompt = getattr(Optimizers, optimizer)(
77
104
  conversation_prompt if conversationally else prompt
78
105
  )
106
+ if self.logger:
107
+ self.logger.debug(f"Applied optimizer: {optimizer}")
79
108
  else:
109
+ if self.logger:
110
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
80
111
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
81
112
 
82
113
  # Payload construction
@@ -90,17 +121,23 @@ class DeepInfra(Provider):
90
121
  }
91
122
 
92
123
  def for_stream():
124
+ if self.logger:
125
+ self.logger.debug("Sending streaming request to DeepInfra API...")
93
126
  try:
94
127
  with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
95
128
  if response.status_code != 200:
96
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
129
+ if self.logger:
130
+ self.logger.error(f"Request failed with status code {response.status_code}")
97
131
 
132
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
133
+ if self.logger:
134
+ self.logger.debug(response.text)
98
135
  streaming_text = ""
99
- for line in response.iter_lines(decode_unicode=True): # Decode lines
136
+ for line in response.iter_lines(decode_unicode=True):
100
137
  if line:
101
138
  line = line.strip()
102
139
  if line.startswith("data: "):
103
- json_str = line[6:] #Remove "data: " prefix
140
+ json_str = line[6:] # Remove "data: " prefix
104
141
  if json_str == "[DONE]":
105
142
  break
106
143
  try:
@@ -110,28 +147,27 @@ class DeepInfra(Provider):
110
147
  if 'delta' in choice and 'content' in choice['delta']:
111
148
  content = choice['delta']['content']
112
149
  streaming_text += content
113
-
114
- # Yield ONLY the new content:
115
- resp = dict(text=content)
150
+ resp = dict(text=content)
116
151
  yield resp if raw else resp
117
152
  except json.JSONDecodeError:
118
- pass # Or handle the error as needed
119
- self.conversation.update_chat_history(prompt, streaming_text) # Update history *after* streaming
153
+ if self.logger:
154
+ self.logger.error("JSON decode error in streaming data")
155
+ pass
156
+ self.conversation.update_chat_history(prompt, streaming_text)
157
+ if self.logger:
158
+ self.logger.info("Streaming response completed successfully")
120
159
  except requests.RequestException as e:
160
+ if self.logger:
161
+ self.logger.error(f"Request failed: {e}")
121
162
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
122
163
 
123
-
124
164
  def for_non_stream():
125
- # let's make use of stream
126
165
  for _ in for_stream():
127
166
  pass
128
167
  return self.last_response
129
168
 
130
-
131
169
  return for_stream() if stream else for_non_stream()
132
170
 
133
-
134
-
135
171
  def chat(
136
172
  self,
137
173
  prompt: str,
@@ -139,34 +175,22 @@ class DeepInfra(Provider):
139
175
  optimizer: str = None,
140
176
  conversationally: bool = False,
141
177
  ) -> str:
142
-
143
178
  def for_stream():
144
- for response in self.ask(
145
- prompt, True, optimizer=optimizer, conversationally=conversationally
146
- ):
179
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
147
180
  yield self.get_message(response)
148
-
149
181
  def for_non_stream():
150
182
  return self.get_message(
151
- self.ask(
152
- prompt,
153
- False,
154
- optimizer=optimizer,
155
- conversationally=conversationally,
156
- )
183
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
157
184
  )
158
-
159
185
  return for_stream() if stream else for_non_stream()
160
186
 
161
187
  def get_message(self, response: dict) -> str:
162
188
  assert isinstance(response, dict), "Response should be of dict data-type only"
163
189
  return response["text"]
164
190
 
165
-
166
-
167
191
  if __name__ == "__main__":
168
192
  from rich import print
169
- ai = DeepInfra(timeout=5000)
193
+ ai = DeepInfra(timeout=5000, logging=True)
170
194
  response = ai.chat("write a poem about AI", stream=True)
171
195
  for chunk in response:
172
- print(chunk, end="", flush=True)
196
+ print(chunk, end="", flush=True)
@@ -1,19 +1,35 @@
1
- import requests
2
- import uuid
3
- import json
1
+
2
+ #!/usr/bin/env python3
3
+ """
4
+ A merged API client for Free2GPT that supports both GPT and Claude variants
5
+ in a non-streaming manner. The client sends requests to the appropriate endpoint
6
+ based on the chosen variant and returns the complete response as text.
7
+
8
+ Usage:
9
+ python Free2GPT.py
10
+
11
+ Select the variant by passing the 'variant' parameter in the constructor:
12
+ variant="claude" --> Uses https://claude3.free2gpt.xyz/api/generate
13
+ variant="gpt" --> Uses https://chat1.free2gpt.com/api/generate
14
+ """
15
+
16
+ from typing import Optional, Dict
4
17
  import time
18
+ import json
19
+ import requests
5
20
  from hashlib import sha256
6
21
 
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts
22
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
10
23
  from webscout.AIbase import Provider
11
24
  from webscout import exceptions
25
+ from webscout.Litlogger import Logger, LogFormat
12
26
  from webscout import LitAgent
13
27
 
28
+
14
29
  class Free2GPT(Provider):
15
30
  """
16
- A class to interact with the Free2GPT API.
31
+ A class to interact with the Free2GPT API in a non-streaming way.
32
+ Supports both GPT and Claude variants via the 'variant' parameter.
17
33
  """
18
34
 
19
35
  def __init__(
@@ -21,35 +37,43 @@ class Free2GPT(Provider):
21
37
  is_conversation: bool = True,
22
38
  max_tokens: int = 600,
23
39
  timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
40
+ intro: Optional[str] = None,
41
+ filepath: Optional[str] = None,
26
42
  update_file: bool = True,
27
43
  proxies: dict = {},
28
44
  history_offset: int = 10250,
29
- act: str = None,
45
+ act: Optional[str] = None,
30
46
  system_prompt: str = "You are a helpful AI assistant.",
47
+ variant: str = "claude" # "claude" or "gpt"
31
48
  ):
32
49
  """
33
- Initializes the Free2GPT API with given parameters.
50
+ Initializes the Free2GPT API client.
34
51
 
35
52
  Args:
36
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
37
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
38
- timeout (int, optional): Http request timeout. Defaults to 30.
39
- intro (str, optional): Conversation introductory prompt. Defaults to None.
40
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
41
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
42
- proxies (dict, optional): Http request proxies. Defaults to {}.
43
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
44
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
45
- system_prompt (str, optional): System prompt for Free2GPT.
46
- Defaults to "You are a helpful AI assistant.".
53
+ is_conversation (bool): Enable conversational mode. Defaults to True.
54
+ max_tokens (int): Maximum tokens to generate. Defaults to 600.
55
+ timeout (int): HTTP request timeout. Defaults to 30.
56
+ intro (str, optional): Introductory prompt for the conversation. Defaults to None.
57
+ filepath (str, optional): Path to conversation history file. Defaults to None.
58
+ update_file (bool): Whether to update the conversation file. Defaults to True.
59
+ proxies (dict): HTTP proxy settings. Defaults to empty dict.
60
+ history_offset (int): Limit for conversation history. Defaults to 10250.
61
+ act (str, optional): Awesome prompt key/index. Defaults to None.
62
+ system_prompt (str): System prompt. Defaults to "You are a helpful AI assistant.".
63
+ variant (str): Select API variant: "claude" or "gpt". Defaults to "claude".
47
64
  """
48
65
  self.session = requests.Session()
49
66
  self.is_conversation = is_conversation
50
67
  self.max_tokens_to_sample = max_tokens
51
- self.api_endpoint = "https://chat1.free2gpt.com/api/generate"
52
- self.stream_chunk_size = 64
68
+
69
+ # Select API endpoint and header origins based on variant.
70
+ if variant.lower() == "gpt":
71
+ self.api_endpoint = "https://chat1.free2gpt.com/api/generate"
72
+ origin = "https://chat1.free2gpt.co"
73
+ else:
74
+ self.api_endpoint = "https://claude3.free2gpt.xyz/api/generate"
75
+ origin = "https://claude3.free2gpt.xyz"
76
+
53
77
  self.timeout = timeout
54
78
  self.last_response = {}
55
79
  self.system_prompt = system_prompt
@@ -59,8 +83,8 @@ class Free2GPT(Provider):
59
83
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
60
84
  "content-type": "text/plain;charset=UTF-8",
61
85
  "dnt": "1",
62
- "origin": "https://chat1.free2gpt.co",
63
- "referer": "https://chat1.free2gpt.co",
86
+ "origin": origin,
87
+ "referer": origin,
64
88
  "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
65
89
  "sec-ch-ua-mobile": "?0",
66
90
  "sec-ch-ua-platform": '"Windows"',
@@ -69,13 +93,16 @@ class Free2GPT(Provider):
69
93
  "sec-fetch-site": "same-origin",
70
94
  "user-agent": LitAgent().random(),
71
95
  }
96
+ self.session.headers.update(self.headers)
97
+ self.session.proxies = proxies
72
98
 
99
+ # Prepare available optimizers from Optimizers module.
73
100
  self.__available_optimizers = (
74
101
  method
75
102
  for method in dir(Optimizers)
76
103
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
77
104
  )
78
- self.session.headers.update(self.headers)
105
+
79
106
  Conversation.intro = (
80
107
  AwesomePrompts().get_act(
81
108
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -83,39 +110,48 @@ class Free2GPT(Provider):
83
110
  if act
84
111
  else intro or Conversation.intro
85
112
  )
86
- self.conversation = Conversation(
87
- is_conversation, self.max_tokens_to_sample, filepath, update_file
88
- )
113
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
89
114
  self.conversation.history_offset = history_offset
90
- self.session.proxies = proxies
91
115
 
92
- def generate_signature(self, time: int, text: str, secret: str = ""):
93
- message = f"{time}:{text}:{secret}"
116
+ def generate_signature(self, time_val: int, text: str, secret: str = "") -> str:
117
+ """
118
+ Generates a signature for the request.
119
+
120
+ Args:
121
+ time_val (int): Timestamp value.
122
+ text (str): Text to sign.
123
+ secret (str, optional): Optional secret. Defaults to "".
124
+
125
+ Returns:
126
+ str: Hexadecimal signature.
127
+ """
128
+ message = f"{time_val}:{text}:{secret}"
94
129
  return sha256(message.encode()).hexdigest()
95
130
 
96
131
  def ask(
97
132
  self,
98
133
  prompt: str,
99
- stream: bool = False,
134
+ stream: bool = False, # Ignored; always non-streaming.
100
135
  raw: bool = False,
101
- optimizer: str = None,
136
+ optimizer: Optional[str] = None,
102
137
  conversationally: bool = False,
103
- ) -> dict:
104
- """Chat with AI
138
+ ) -> Dict[str, any]:
139
+ """
140
+ Sends a prompt to the API in a non-streaming manner.
105
141
 
106
142
  Args:
107
- prompt (str): Prompt to be send.
108
- stream (bool, optional): Flag for streaming response. Defaults to False.
109
- raw (bool, optional): Stream back raw response as received. Defaults to False.
110
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
111
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
143
+ prompt (str): The prompt text.
144
+ stream (bool): Ignored; response is always non-streamed.
145
+ raw (bool): Whether to return the raw response. Defaults to False.
146
+ optimizer (str, optional): Optimizer name. Defaults to None.
147
+ conversationally (bool): Whether to use conversational optimization. Defaults to False.
148
+
112
149
  Returns:
113
- dict : {}
114
- ```json
115
- {
116
- "text" : "How may I assist you today?"
117
- }
118
- ```
150
+ dict: A dictionary containing the generated text.
151
+ Example:
152
+ {
153
+ "text": "How may I assist you today?"
154
+ }
119
155
  """
120
156
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
157
  if optimizer:
@@ -124,111 +160,82 @@ class Free2GPT(Provider):
124
160
  conversation_prompt if conversationally else prompt
125
161
  )
126
162
  else:
127
- raise Exception(
128
- f"Optimizer is not one of {self.__available_optimizers}"
129
- )
163
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
130
164
 
131
- # Generate timestamp
165
+ # Generate timestamp and signature.
132
166
  timestamp = int(time.time() * 1e3)
133
-
134
- # Generate signature
135
167
  signature = self.generate_signature(timestamp, conversation_prompt)
136
168
 
137
169
  payload = {
138
170
  "messages": [
139
- {
140
- "role": "system",
141
- "content": self.system_prompt
142
- },
143
- {
144
- "role": "user",
145
- "content": conversation_prompt
146
- }
171
+ {"role": "system", "content": self.system_prompt},
172
+ {"role": "user", "content": conversation_prompt},
147
173
  ],
148
174
  "time": timestamp,
149
175
  "pass": None,
150
- "sign": signature
176
+ "sign": signature,
151
177
  }
152
178
 
153
- def for_stream():
154
- try:
155
- # Send the POST request with streaming enabled
156
- with requests.post(self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True) as response:
157
- response.raise_for_status()
158
-
159
- full_response = ""
160
- for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
161
- if chunk:
162
- full_response += chunk.decode('utf-8')
163
- yield chunk.decode('utf-8') if raw else dict(text=chunk.decode('utf-8'))
164
-
165
- self.last_response.update(dict(text=full_response))
166
- self.conversation.update_chat_history(
167
- prompt, self.get_message(self.last_response)
168
- )
169
-
170
- except requests.exceptions.RequestException as e:
171
- raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
172
-
173
- def for_non_stream():
174
- for _ in for_stream():
175
- pass
179
+ try:
180
+ response = requests.post(
181
+ self.api_endpoint,
182
+ headers=self.headers,
183
+ data=json.dumps(payload),
184
+ timeout=self.timeout
185
+ )
186
+ response.raise_for_status()
187
+ full_response = response.text
188
+ self.last_response.update(dict(text=full_response))
189
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
176
190
  return self.last_response
177
-
178
- return for_stream() if stream else for_non_stream()
191
+ except requests.exceptions.RequestException as e:
192
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
179
193
 
180
194
  def chat(
181
195
  self,
182
196
  prompt: str,
183
- stream: bool = False,
184
- optimizer: str = None,
197
+ stream: bool = False, # Ignored; always non-streaming.
198
+ optimizer: Optional[str] = None,
185
199
  conversationally: bool = False,
186
200
  ) -> str:
187
- """Generate response `str`
188
- Args:
189
- prompt (str): Prompt to be send.
190
- stream (bool, optional): Flag for streaming response. Defaults to False.
191
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
192
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
193
- Returns:
194
- str: Response generated
195
201
  """
202
+ Sends a prompt and returns the generated response as a string.
196
203
 
197
- def for_stream():
198
- for response in self.ask(
199
- prompt, True, optimizer=optimizer, conversationally=conversationally
200
- ):
201
- yield self.get_message(response)
202
-
203
- def for_non_stream():
204
- return self.get_message(
205
- self.ask(
206
- prompt,
207
- False,
208
- optimizer=optimizer,
209
- conversationally=conversationally,
210
- )
211
- )
204
+ Args:
205
+ prompt (str): The prompt to send.
206
+ stream (bool): Ignored; response is always non-streamed.
207
+ optimizer (str, optional): Optimizer name. Defaults to None.
208
+ conversationally (bool): Whether to use conversational optimization. Defaults to False.
212
209
 
213
- return for_stream() if stream else for_non_stream()
210
+ Returns:
211
+ str: Generated response.
212
+ """
213
+ response = self.ask(
214
+ prompt,
215
+ stream=False,
216
+ optimizer=optimizer,
217
+ conversationally=conversationally,
218
+ )
219
+ return self.get_message(response)
214
220
 
215
- def get_message(self, response: dict) -> str:
216
- """Retrieves message only from response
221
+ def get_message(self, response: Dict[str, any]) -> str:
222
+ """
223
+ Extracts the message text from the API response.
217
224
 
218
225
  Args:
219
- response (dict): Response generated by `self.ask`
226
+ response (dict): The API response.
220
227
 
221
228
  Returns:
222
- str: Message extracted
229
+ str: Extracted message text.
223
230
  """
224
- assert isinstance(response, dict), "Response should be of dict data-type only"
231
+ assert isinstance(response, dict), "Response should be a dictionary"
225
232
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
226
233
 
227
234
 
228
235
  if __name__ == "__main__":
229
236
  from rich import print
230
-
231
- ai = Free2GPT(timeout=5000)
232
- response = ai.chat("write a poem about AI", stream=True)
233
- for chunk in response:
234
- print(chunk, end="", flush=True)
237
+ prompt_input = input(">>> ")
238
+ # Choose variant: "claude" or "gpt"
239
+ client = Free2GPT(variant="gpt")
240
+ result = client.chat(prompt_input)
241
+ print(result)