webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -453,3 +453,10 @@ class UncovrAI(OpenAICompatibleProvider):
453
453
  # If any error occurs, return the original text
454
454
  print(f"{RED}Warning: Error formatting text: {e}{RESET}")
455
455
  return text
456
+
457
+ @property
458
+ def models(self):
459
+ class _ModelList:
460
+ def list(inner_self):
461
+ return type(self).AVAILABLE_MODELS
462
+ return _ModelList()
@@ -1,6 +1,8 @@
1
- from typing import List, Dict, Optional, Any, Union
2
- from dataclasses import dataclass, asdict, is_dataclass
1
+ from typing import List, Dict, Optional, Any, Union, Literal
2
+ from dataclasses import dataclass, asdict, is_dataclass, field
3
3
  from enum import Enum
4
+ import time
5
+ import uuid
4
6
 
5
7
  # --- OpenAI Response Structure Mimics ---
6
8
  # Moved here for reusability across different OpenAI-compatible providers
@@ -95,13 +97,107 @@ class Choice(BaseModel):
95
97
  finish_reason: Optional[str] = None
96
98
  logprobs: Optional[Dict[str, Any]] = None
97
99
 
100
+ @dataclass
101
+ class ModelData(BaseModel):
102
+ """OpenAI model info response."""
103
+ id: str
104
+ object: str = "model"
105
+ created: int = int(time.time())
106
+ owned_by: str = "webscout"
107
+ permission: Optional[List[Dict[str, Any]]] = None
108
+ root: Optional[str] = None
109
+ parent: Optional[str] = None
110
+
111
+ @dataclass
112
+ class ModelList(BaseModel):
113
+ """OpenAI model list response."""
114
+ data: List[ModelData] # Moved before 'object'
115
+ object: str = "list"
116
+
117
+
118
+ # @dataclass
119
+ # class EmbeddingData(BaseModel):
120
+ # """Single embedding data."""
121
+ # embedding: List[float]
122
+ # index: int
123
+ # object: str = "embedding"
124
+
125
+ # @dataclass
126
+ # class EmbeddingResponse(BaseModel):
127
+ # """OpenAI embeddings response."""
128
+ # data: List[EmbeddingData]
129
+ # model: str
130
+ # usage: CompletionUsage
131
+ # object: str = "list"
132
+
133
+ # @dataclass
134
+ # class FineTuningJob(BaseModel):
135
+ # """OpenAI fine-tuning job."""
136
+ # id: str
137
+ # model: str
138
+ # created_at: int
139
+ # status: str
140
+ # training_file: str
141
+ # hyperparameters: Dict[str, Any]
142
+ # object: str = "fine_tuning.job"
143
+ # finished_at: Optional[int] = None
144
+ # validation_file: Optional[str] = None
145
+ # trained_tokens: Optional[int] = None
146
+ # result_files: Optional[List[str]] = None
147
+ # organization_id: Optional[str] = None
148
+
149
+ # @dataclass
150
+ # class FineTuningJobList(BaseModel):
151
+ # """OpenAI fine-tuning job list response."""
152
+ # data: List[FineTuningJob]
153
+ # object: str = "list"
154
+ # has_more: bool = False
155
+
156
+ # @dataclass
157
+ # class File(BaseModel):
158
+ # """OpenAI file."""
159
+ # id: str
160
+ # bytes: int
161
+ # created_at: int
162
+ # filename: str
163
+ # purpose: str
164
+ # object: str = "file"
165
+ # status: str = "uploaded"
166
+ # status_details: Optional[str] = None
167
+
168
+ # @dataclass
169
+ # class FileList(BaseModel):
170
+ # """OpenAI file list response."""
171
+ # data: List[File]
172
+ # object: str = "list"
173
+
174
+ # @dataclass
175
+ # class DeletedObject(BaseModel):
176
+ # """OpenAI deleted object response."""
177
+ # id: str
178
+ # object: str = "deleted_object"
179
+ # deleted: bool = True
180
+
181
+ # @dataclass
182
+ # class ImageData(BaseModel):
183
+ # """OpenAI generated image."""
184
+ # url: Optional[str] = None
185
+ # b64_json: Optional[str] = None
186
+ # revised_prompt: Optional[str] = None
187
+
188
+ # @dataclass
189
+ # class ImageResponse(BaseModel):
190
+ # """OpenAI image generation response."""
191
+ # data: List[ImageData]
192
+ # created: int = int(time.time())
193
+
98
194
  @dataclass
99
195
  class ChatCompletion(BaseModel):
100
196
  """Chat completion response."""
101
- id: str
102
- created: int
103
197
  model: str
104
198
  choices: List[Choice]
199
+ id: str = field(default_factory=lambda: f"chatcmpl-{str(uuid.uuid4())}")
200
+ created: int = field(default_factory=lambda: int(time.time()))
105
201
  object: str = "chat.completion"
106
202
  system_fingerprint: Optional[str] = None
107
203
  usage: Optional[CompletionUsage] = None
@@ -109,10 +205,10 @@ class ChatCompletion(BaseModel):
109
205
  @dataclass
110
206
  class ChatCompletionChunk(BaseModel):
111
207
  """Streaming chat completion response chunk."""
112
- id: str
113
- created: int
114
208
  model: str
115
209
  choices: List[Choice]
210
+ id: str = field(default_factory=lambda: f"chatcmpl-{str(uuid.uuid4())}")
211
+ created: int = field(default_factory=lambda: int(time.time()))
116
212
  object: str = "chat.completion.chunk"
117
213
  system_fingerprint: Optional[str] = None
118
214
 
@@ -120,7 +216,7 @@ class ChatCompletionChunk(BaseModel):
120
216
  # --- Helper Functions ---
121
217
 
122
218
  def format_prompt(messages: List[Dict[str, Any]], add_special_tokens: bool = False,
123
- do_continue: bool = False, include_system: bool = True) -> str:
219
+ do_continue: bool = False, include_system: bool = True) -> str:
124
220
  """
125
221
  Format a series of messages into a single string, optionally adding special tokens.
126
222
 
@@ -411,3 +411,15 @@ class Venice(OpenAICompatibleProvider):
411
411
  # Default to the most capable model
412
412
  print(f"Warning: Unknown model '{model}'. Using 'mistral-31-24b' instead.")
413
413
  return "mistral-31-24b"
414
+
415
+ @property
416
+ def models(self):
417
+ class _ModelList:
418
+ def list(inner_self):
419
+ return type(self).AVAILABLE_MODELS
420
+ return _ModelList()
421
+
422
+ @classmethod
423
+ def models(cls):
424
+ """Return the list of available models for Venice."""
425
+ return cls.AVAILABLE_MODELS
@@ -288,13 +288,11 @@ class WiseCat(OpenAICompatibleProvider):
288
288
  )
289
289
  """
290
290
 
291
- AVAILABLE_MODELS = [
292
- "chat-model-small",
293
- "chat-model-large",
294
- "chat-model-reasoning",
295
- ]
296
-
297
- # No model mapping needed as we use the model names directly
291
+ _base_models = ["chat-model-small", "chat-model-large", "chat-model-reasoning"]
292
+ # Create AVAILABLE_MODELS as a list with the format "WiseCat/model"
293
+ AVAILABLE_MODELS = [f"WiseCat/{model}" for model in _base_models]
294
+ # Create a mapping dictionary for internal use
295
+ _model_mapping = {model: f"WiseCat/{model}" for model in _base_models}
298
296
 
299
297
  def __init__(
300
298
  self,
@@ -364,18 +362,20 @@ class WiseCat(OpenAICompatibleProvider):
364
362
 
365
363
  def convert_model_name(self, model: str) -> str:
366
364
  """
367
- Convert model names to ones supported by WiseCat.
368
-
369
- Args:
370
- model: Model name to convert
371
-
372
- Returns:
373
- WiseCat model name
365
+ Convert model names to ones supported by WiseCat. Accepts both 'WiseCat/model' and raw model names.
374
366
  """
375
- # If the model is already a valid WiseCat model, return it
376
- if model in self.AVAILABLE_MODELS:
377
- return model
378
-
379
- # Default to the most capable model
367
+ if model.startswith("WiseCat/"):
368
+ model_raw = model.replace("WiseCat/", "", 1)
369
+ else:
370
+ model_raw = model
371
+ if f"WiseCat/{model_raw}" in self.AVAILABLE_MODELS:
372
+ return model_raw
380
373
  print(f"Warning: Unknown model '{model}'. Using 'chat-model-large' instead.")
381
374
  return "chat-model-large"
375
+
376
+ @property
377
+ def models(self):
378
+ class _ModelList:
379
+ def list(inner_self):
380
+ return WiseCat.AVAILABLE_MODELS
381
+ return _ModelList()
@@ -144,6 +144,13 @@ class Writecream(OpenAICompatibleProvider):
144
144
  def convert_model_name(self, model: str) -> str:
145
145
  return "writecream"
146
146
 
147
+ @property
148
+ def models(self):
149
+ class _ModelList:
150
+ def list(inner_self):
151
+ return Writecream.AVAILABLE_MODELS
152
+ return _ModelList()
153
+
147
154
  # Simple test if run directly
148
155
  if __name__ == "__main__":
149
156
  client = Writecream()
@@ -319,6 +319,13 @@ class X0GPT(OpenAICompatibleProvider):
319
319
  # Initialize the chat interface
320
320
  self.chat = Chat(self)
321
321
 
322
+ @property
323
+ def models(self):
324
+ class _ModelList:
325
+ def list(inner_self):
326
+ return X0GPT.AVAILABLE_MODELS
327
+ return _ModelList()
328
+
322
329
  def format_text(self, text: str) -> str:
323
330
  """
324
331
  Format text by replacing escaped newlines with actual newlines.
@@ -56,7 +56,13 @@ class Completions(BaseCompletions):
56
56
  Mimics openai.chat.completions.create
57
57
  Note: YEPCHAT does not support system messages. They will be ignored.
58
58
  """
59
- if model not in self._client.AVAILABLE_MODELS:
59
+ # Accept both raw and prefixed model names from the user, but always send the raw name to the API
60
+ if model.startswith("YEPCHAT/"):
61
+ model_raw = model.replace("YEPCHAT/", "", 1)
62
+ else:
63
+ model_raw = model
64
+ # Validate model
65
+ if f"YEPCHAT/{model_raw}" not in self._client.AVAILABLE_MODELS:
60
66
  raise ValueError(
61
67
  f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
62
68
  )
@@ -86,7 +92,7 @@ class Completions(BaseCompletions):
86
92
  "top_p": top_p,
87
93
  "temperature": temperature,
88
94
  "messages": filtered_messages, # Use filtered messages
89
- "model": model,
95
+ "model": model_raw, # Send only the raw model name to the API
90
96
  }
91
97
 
92
98
  # Add any extra kwargs to the payload
@@ -104,7 +110,6 @@ class Completions(BaseCompletions):
104
110
  self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
105
111
  ) -> Generator[ChatCompletionChunk, None, None]:
106
112
  try:
107
- # Use session.post from cloudscraper instance
108
113
  response = self._client.session.post(
109
114
  self._client.api_endpoint,
110
115
  headers=self._client.headers,
@@ -115,7 +120,6 @@ class Completions(BaseCompletions):
115
120
  )
116
121
 
117
122
  if not response.ok:
118
- # Simplified error handling for now, add refresh logic if needed
119
123
  raise IOError(
120
124
  f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
121
125
  )
@@ -133,9 +137,10 @@ class Completions(BaseCompletions):
133
137
  delta_data = choice_data.get('delta', {})
134
138
  finish_reason = choice_data.get('finish_reason')
135
139
  content = delta_data.get('content')
140
+ role = delta_data.get('role', None)
136
141
 
137
- if content is not None: # Only yield chunks with content
138
- delta = ChoiceDelta(content=content, role=delta_data.get('role', 'assistant'))
142
+ if content is not None or role is not None:
143
+ delta = ChoiceDelta(content=content, role=role)
139
144
  choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
140
145
  chunk = ChatCompletionChunk(
141
146
  id=request_id,
@@ -151,7 +156,7 @@ class Completions(BaseCompletions):
151
156
 
152
157
  # Yield final chunk with finish reason if not already sent
153
158
  delta = ChoiceDelta()
154
- choice = Choice(index=0, delta=delta, finish_reason="stop") # Assume stop if loop finishes
159
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
155
160
  chunk = ChatCompletionChunk(
156
161
  id=request_id,
157
162
  choices=[choice],
@@ -167,21 +172,34 @@ class Completions(BaseCompletions):
167
172
  self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
168
173
  ) -> ChatCompletion:
169
174
  full_response_content = ""
170
- finish_reason = "stop" # Assume stop unless error occurs
171
-
175
+ finish_reason = "stop"
172
176
  try:
173
- stream_generator = self._create_stream(request_id, created_time, model, payload)
174
- for chunk in stream_generator:
175
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
176
- full_response_content += chunk.choices[0].delta.content
177
- if chunk.choices and chunk.choices[0].finish_reason:
178
- finish_reason = chunk.choices[0].finish_reason # Capture finish reason if provided
179
-
180
- except IOError as e:
177
+ # Make a non-streaming request to the API
178
+ payload_copy = payload.copy()
179
+ payload_copy["stream"] = False
180
+ response = self._client.session.post(
181
+ self._client.api_endpoint,
182
+ headers=self._client.headers,
183
+ cookies=self._client.cookies,
184
+ json=payload_copy,
185
+ timeout=self._client.timeout
186
+ )
187
+ if not response.ok:
188
+ raise IOError(
189
+ f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
190
+ )
191
+ data = response.json()
192
+ if 'choices' in data and len(data['choices']) > 0:
193
+ # YEPCHAT non-streaming returns message content in choices[0]['message']['content']
194
+ full_response_content = data['choices'][0].get('message', {}).get('content', '')
195
+ finish_reason = data['choices'][0].get('finish_reason', 'stop')
196
+ else:
197
+ full_response_content = ''
198
+ finish_reason = 'stop'
199
+ except Exception as e:
181
200
  print(f"Error obtaining non-stream response from YEPCHAT: {e}")
182
201
  finish_reason = "error"
183
202
 
184
- # Construct the final ChatCompletion object
185
203
  message = ChatCompletionMessage(
186
204
  role="assistant",
187
205
  content=full_response_content
@@ -191,9 +209,7 @@ class Completions(BaseCompletions):
191
209
  message=message,
192
210
  finish_reason=finish_reason
193
211
  )
194
- # Usage data is not provided by this API in a standard way, set to 0
195
212
  usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
196
-
197
213
  completion = ChatCompletion(
198
214
  id=request_id,
199
215
  choices=[choice],
@@ -219,7 +235,13 @@ class YEPCHAT(OpenAICompatibleProvider):
219
235
  )
220
236
  print(response.choices[0].message.content)
221
237
  """
222
- AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
238
+ _base_models = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
239
+
240
+ # Create AVAILABLE_MODELS as a list with the format "YEPCHAT/model"
241
+ AVAILABLE_MODELS = [f"YEPCHAT/{model}" for model in _base_models]
242
+
243
+ # Create a mapping dictionary for internal use
244
+ _model_mapping = {model: f"YEPCHAT/{model}" for model in _base_models}
223
245
 
224
246
  def __init__(
225
247
  self,
@@ -274,6 +296,13 @@ class YEPCHAT(OpenAICompatibleProvider):
274
296
  # Initialize the chat interface
275
297
  self.chat = Chat(self)
276
298
 
299
+ @property
300
+ def models(self):
301
+ class _ModelList:
302
+ def list(inner_self):
303
+ return YEPCHAT.AVAILABLE_MODELS
304
+ return _ModelList()
305
+
277
306
  def convert_model_name(self, model: str) -> str:
278
307
  """
279
308
  Ensures the model name is valid for YEPCHAT.
@@ -184,7 +184,7 @@ class OpenGPT(Provider):
184
184
  # If stream=True was requested, simulate streaming by yielding the full message at once
185
185
  if stream:
186
186
  def stream_wrapper():
187
- yield self.get_message(response_data)
187
+ yield self.get_message(response_data) # yield only the text string
188
188
  return stream_wrapper()
189
189
  else:
190
190
  # If stream=False, return the full message directly
@@ -0,0 +1,159 @@
1
+ # AiForce Provider 🔥
2
+
3
+ Yo fam! This is the AiForce provider for generating some fire images! Part of the HelpingAI squad! 👑
4
+
5
+ ## Features 💪
6
+
7
+ - Both sync and async support ⚡
8
+ - 12 fire models to choose from 🎨
9
+ - Smart retry mechanism 🔄
10
+ - Custom image sizes 📐
11
+ - Save with custom names 💾
12
+ - Fire logging with cyberpunk theme 🌟
13
+ - Proxy support for stealth mode 🕵️‍♂️
14
+
15
+ ## Quick Start 🚀
16
+
17
+ ### Installation 📦
18
+
19
+ ```bash
20
+ pip install webscout
21
+ ```
22
+
23
+ ### Basic Usage 💫
24
+
25
+ ```python
26
+ # Sync way
27
+ from webscout import AiForceimager
28
+
29
+ provider = AiForceimager()
30
+ images = provider.generate("Epic dragon")
31
+ paths = provider.save(images)
32
+
33
+ # Async way
34
+ from webscout import AsyncAiForceimager
35
+ import asyncio
36
+
37
+ async def generate():
38
+ provider = AsyncAiForceimager()
39
+ images = await provider.generate("Cool art")
40
+ paths = await provider.save(images)
41
+
42
+ asyncio.run(generate())
43
+ ```
44
+
45
+ ## Available Models 🎭
46
+
47
+ | Model | Description | Best For |
48
+ |-------|-------------|----------|
49
+ | `Flux-1.1-Pro` | Latest pro model (Default) | High quality general purpose |
50
+ | `stable-diffusion-xl-lightning` | Fast SDXL model | Quick generations |
51
+ | `stable-diffusion-xl-base` | Base SDXL model | High quality base |
52
+ | `ideogram` | Artistic model | Creative artwork |
53
+ | `flux` | Standard model | General purpose |
54
+ | `flux-realism` | Photorealistic model | Realistic images |
55
+ | `flux-anime` | Anime style | Anime/manga art |
56
+ | `flux-3d` | 3D rendering | 3D objects/scenes |
57
+ | `flux-disney` | Disney style | Disney-like art |
58
+ | `flux-pixel` | Pixel art | Retro/game art |
59
+ | `flux-4o` | 4k output | High resolution |
60
+ | `any-dark` | Dark theme | Gothic/dark art |
61
+
62
+ ## Advanced Examples 🔥
63
+
64
+ ### Custom Settings 🛠️
65
+
66
+ ```python
67
+ provider = AiForceimager(
68
+ timeout=120, # Longer timeout
69
+ proxies={
70
+ 'http': 'http://proxy.example.com:8080'
71
+ }
72
+ )
73
+ ```
74
+
75
+ ### Multiple Images with Custom Size 📸
76
+
77
+ ```python
78
+ images = provider.generate(
79
+ prompt="A shiny red sports car",
80
+ amount=3, # Generate 3 images
81
+ model="flux-realism", # Use realistic model
82
+ width=1024, # Custom width
83
+ height=768, # Custom height
84
+ seed=42 # For reproducible results
85
+ )
86
+ ```
87
+
88
+ ### Custom Save Options 💾
89
+
90
+ ```python
91
+ paths = provider.save(
92
+ images,
93
+ name="sports_car", # Custom name
94
+ dir="my_images", # Custom directory
95
+ filenames_prefix="v1_" # Add prefix
96
+ )
97
+ ```
98
+
99
+ ### Async with Error Handling ⚡
100
+
101
+ ```python
102
+ async def generate_safely():
103
+ provider = AsyncAiForceimager()
104
+ try:
105
+ images = await provider.generate(
106
+ prompt="Epic dragon",
107
+ model="flux-3d",
108
+ amount=2
109
+ )
110
+ paths = await provider.save(images, dir="dragons")
111
+ print(f"Saved to: {paths}")
112
+ except Exception as e:
113
+ print(f"Oops! Something went wrong: {e}")
114
+
115
+ asyncio.run(generate_safely())
116
+ ```
117
+
118
+ ## Tips & Tricks 💡
119
+
120
+ 1. Use `flux-realism` for photorealistic images
121
+ 2. Use `flux-3d` for product renders
122
+ 3. Use `flux-anime` for anime style art
123
+ 4. Set custom timeouts for large images
124
+ 5. Use proxies for better reliability
125
+ 6. Add seed for reproducible results
126
+
127
+ ## Error Handling 🛡️
128
+
129
+ The provider handles common errors:
130
+
131
+ - Network issues
132
+ - API timeouts
133
+ - Invalid inputs
134
+ - File saving errors
135
+
136
+ Example with retry:
137
+
138
+ ```python
139
+ provider = AiForceimager()
140
+ try:
141
+ images = provider.generate(
142
+ "Epic scene",
143
+ max_retries=5, # More retries
144
+ retry_delay=10 # Longer delay
145
+ )
146
+ except Exception as e:
147
+ print(f"Generation failed: {e}")
148
+ ```
149
+
150
+ ## Contributing 🤝
151
+
152
+ Pull up to the squad! We're always looking for improvements:
153
+
154
+ 1. Fork it
155
+ 2. Create your feature branch
156
+ 3. Push your changes
157
+ 4. Hit us with that pull request
158
+
159
+ Made with 💖 by the HelpingAI Team
@@ -0,0 +1,99 @@
1
+ # FreeAI Image Provider 🎨
2
+
3
+ Generate amazing images with our FreeAI provider! Access to powerful models like DALL-E 3 and Flux series! 🚀
4
+
5
+ ## Features 💫
6
+ - Both sync and async support ⚡
7
+ - 7 powerful models to choose from 🎭
8
+ - Smart retry mechanism 🔄
9
+ - Custom image sizes 📐
10
+ - Save with custom names 💾
11
+ - Fire logging with cyberpunk theme 🌟
12
+ - Proxy support for stealth mode 🕵️‍♂️
13
+
14
+ ## Quick Start 🚀
15
+
16
+ ### Installation 📦
17
+ ```bash
18
+ pip install webscout
19
+ ```
20
+
21
+ ### Basic Usage 💫
22
+
23
+ ```python
24
+ # Sync way
25
+ from webscout import FreeAIImager
26
+
27
+ provider = FreeAIImager()
28
+ images = provider.generate("Epic dragon")
29
+ paths = provider.save(images)
30
+
31
+ # Async way
32
+ from webscout import AsyncFreeAIImager
33
+ import asyncio
34
+
35
+ async def generate():
36
+ provider = AsyncFreeAIImager()
37
+ images = await provider.generate("Cool art")
38
+ paths = await provider.save(images)
39
+
40
+ asyncio.run(generate())
41
+ ```
42
+
43
+ ## Available Models 🎭
44
+
45
+ | Model | Description | Best For |
46
+ |-------|-------------|----------|
47
+ | `dall-e-3` | Latest DALL-E model (Default) | High quality general purpose |
48
+ | `Flux Pro Ultra` | Premium Flux model | Professional quality |
49
+ | `Flux Pro` | Standard Pro model | High quality images |
50
+ | `Flux Pro Ultra Raw` | Unprocessed Ultra output | Raw creative control |
51
+ | `Flux Schnell` | Fast generation model | Quick results |
52
+ | `Flux Realism` | Photorealistic model | Realistic images |
53
+ | `grok-2-aurora` | Aurora enhancement | Artistic flair |
54
+
55
+ ## Advanced Usage 🔧
56
+
57
+ ### Custom Settings
58
+ ```python
59
+ provider = FreeAIImager(
60
+ model="Flux Pro Ultra",
61
+ timeout=120,
62
+ logging=True
63
+ )
64
+
65
+ images = provider.generate(
66
+ prompt="Epic dragon",
67
+ amount=2,
68
+ size="1024x1024",
69
+ quality="hd",
70
+ style="vivid"
71
+ )
72
+ paths = provider.save(images, dir="dragons")
73
+ ```
74
+
75
+ ### Async with Error Handling ⚡
76
+ ```python
77
+ async def generate_safely():
78
+ provider = AsyncFreeAIImager()
79
+ try:
80
+ images = await provider.generate(
81
+ prompt="Epic dragon",
82
+ model="Flux Pro Ultra",
83
+ amount=2
84
+ )
85
+ paths = await provider.save(images, dir="dragons")
86
+ print(f"Saved to: {paths}")
87
+ except Exception as e:
88
+ print(f"Oops! Something went wrong: {e}")
89
+
90
+ asyncio.run(generate_safely())
91
+ ```
92
+
93
+ ## Tips & Tricks 💡
94
+
95
+ 1. Use `Flux Realism` for photorealistic images
96
+ 2. Use `Flux Pro Ultra` for highest quality
97
+ 3. Use `Flux Schnell` for quick drafts
98
+ 4. Set custom timeouts for large generations
99
+ 5. Enable logging for detailed progress updates