webscout 8.0__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  2. webscout/Provider/AISEARCH/ISou.py +1 -1
  3. webscout/Provider/AISEARCH/__init__.py +2 -1
  4. webscout/Provider/AISEARCH/felo_search.py +1 -1
  5. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +1 -1
  7. webscout/Provider/AISEARCH/iask_search.py +436 -0
  8. webscout/Provider/AISEARCH/scira_search.py +1 -1
  9. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  10. webscout/Provider/ExaAI.py +1 -1
  11. webscout/Provider/Jadve.py +2 -2
  12. webscout/Provider/OPENAI/__init__.py +17 -0
  13. webscout/Provider/OPENAI/base.py +46 -0
  14. webscout/Provider/OPENAI/c4ai.py +347 -0
  15. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  16. webscout/Provider/OPENAI/deepinfra.py +284 -0
  17. webscout/Provider/OPENAI/exaai.py +419 -0
  18. webscout/Provider/OPENAI/exachat.py +421 -0
  19. webscout/Provider/OPENAI/freeaichat.py +355 -0
  20. webscout/Provider/OPENAI/glider.py +314 -0
  21. webscout/Provider/OPENAI/heckai.py +337 -0
  22. webscout/Provider/OPENAI/llmchatco.py +325 -0
  23. webscout/Provider/OPENAI/netwrck.py +348 -0
  24. webscout/Provider/OPENAI/scirachat.py +459 -0
  25. webscout/Provider/OPENAI/sonus.py +294 -0
  26. webscout/Provider/OPENAI/typegpt.py +361 -0
  27. webscout/Provider/OPENAI/utils.py +211 -0
  28. webscout/Provider/OPENAI/venice.py +428 -0
  29. webscout/Provider/OPENAI/wisecat.py +381 -0
  30. webscout/Provider/OPENAI/x0gpt.py +389 -0
  31. webscout/Provider/OPENAI/yep.py +329 -0
  32. webscout/Provider/Venice.py +1 -1
  33. webscout/Provider/__init__.py +6 -6
  34. webscout/Provider/scira_chat.py +13 -10
  35. webscout/Provider/typegpt.py +3 -184
  36. webscout/prompt_manager.py +2 -1
  37. webscout/version.py +1 -1
  38. webscout-8.1.dist-info/METADATA +683 -0
  39. {webscout-8.0.dist-info → webscout-8.1.dist-info}/RECORD +43 -23
  40. webscout/Provider/flowith.py +0 -207
  41. webscout-8.0.dist-info/METADATA +0 -995
  42. {webscout-8.0.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  43. {webscout-8.0.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  44. {webscout-8.0.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.0.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ from typing import Any, Dict, Generator, Optional, Union
5
5
 
6
6
  from webscout.AIbase import AISearch
7
7
  from webscout import exceptions
8
- from webscout import LitAgent
8
+ from webscout.litagent import LitAgent
9
9
 
10
10
  class Response:
11
11
  """A wrapper class for DeepFind API responses.
@@ -2,7 +2,7 @@ import requests
2
2
  import json
3
3
  import re
4
4
  from typing import Dict, Optional, Generator, Any, Union
5
- from webscout import LitAgent
5
+ from webscout.litagent import LitAgent
6
6
  from webscout import exceptions
7
7
  from webscout.AIbase import AISearch
8
8
 
@@ -5,4 +5,5 @@ from .genspark_search import *
5
5
  from .monica_search import *
6
6
  from .webpilotai_search import *
7
7
  from .hika_search import *
8
- from .scira_search import *
8
+ from .scira_search import *
9
+ from .iask_search import *
@@ -5,7 +5,7 @@ from typing import Any, Dict, Generator, Optional, Union
5
5
 
6
6
  from webscout.AIbase import AISearch
7
7
  from webscout import exceptions
8
- from webscout import LitAgent
8
+ from webscout.litagent import LitAgent
9
9
 
10
10
 
11
11
  class Response:
@@ -6,7 +6,7 @@ from typing import Dict, Optional, Generator, Union, Any
6
6
 
7
7
  from webscout.AIbase import AISearch
8
8
  from webscout import exceptions
9
- from webscout import LitAgent
9
+ from webscout.litagent import LitAgent
10
10
 
11
11
 
12
12
  class Response:
@@ -8,7 +8,7 @@ from typing import Dict, Optional, Generator, Union, Any
8
8
 
9
9
  from webscout.AIbase import AISearch
10
10
  from webscout import exceptions
11
- from webscout import LitAgent
11
+ from webscout.litagent import LitAgent
12
12
 
13
13
 
14
14
  class Response:
@@ -0,0 +1,436 @@
1
+ import aiohttp
2
+ import asyncio
3
+ import lxml.html
4
+ import re
5
+ import urllib.parse
6
+ from markdownify import markdownify as md
7
+ from typing import Dict, Optional, Generator, Union, AsyncIterator, Literal
8
+
9
+ from webscout.AIbase import AISearch
10
+ from webscout import exceptions
11
+ from webscout.scout import Scout
12
+
13
+
14
+ class Response:
15
+ """A wrapper class for IAsk API responses.
16
+
17
+ This class automatically converts response objects to their text representation
18
+ when printed or converted to string.
19
+
20
+ Attributes:
21
+ text (str): The text content of the response
22
+
23
+ Example:
24
+ >>> response = Response("Hello, world!")
25
+ >>> print(response)
26
+ Hello, world!
27
+ >>> str(response)
28
+ 'Hello, world!'
29
+ """
30
+ def __init__(self, text: str):
31
+ self.text = text
32
+
33
+ def __str__(self):
34
+ return self.text
35
+
36
+ def __repr__(self):
37
+ return self.text
38
+
39
+
40
+ def cache_find(diff: Union[dict, list]) -> Optional[str]:
41
+ """Find HTML content in a nested dictionary or list structure.
42
+
43
+ Args:
44
+ diff (Union[dict, list]): The nested structure to search
45
+
46
+ Returns:
47
+ Optional[str]: The found HTML content, or None if not found
48
+ """
49
+ values = diff if isinstance(diff, list) else diff.values()
50
+ for value in values:
51
+ if isinstance(value, (list, dict)):
52
+ cache = cache_find(value)
53
+ if cache:
54
+ return cache
55
+ if isinstance(value, str) and re.search(r"<p>.+?</p>", value):
56
+ return md(value).strip()
57
+
58
+ return None
59
+
60
+
61
+ ModeType = Literal["question", "academic", "fast", "forums", "wiki", "advanced"]
62
+ DetailLevelType = Literal["concise", "detailed", "comprehensive"]
63
+
64
+
65
+ class IAsk(AISearch):
66
+ """A class to interact with the IAsk AI search API.
67
+
68
+ IAsk provides a powerful search interface that returns AI-generated responses
69
+ based on web content. It supports both streaming and non-streaming responses,
70
+ as well as different search modes and detail levels.
71
+
72
+ Basic Usage:
73
+ >>> from webscout import IAsk
74
+ >>> ai = IAsk()
75
+ >>> # Non-streaming example
76
+ >>> response = ai.search("What is Python?")
77
+ >>> print(response)
78
+ Python is a high-level programming language...
79
+
80
+ >>> # Streaming example
81
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
82
+ ... print(chunk, end="", flush=True)
83
+ Artificial Intelligence is...
84
+
85
+ >>> # With specific mode and detail level
86
+ >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
87
+ >>> print(response)
88
+ Climate change refers to...
89
+
90
+ Args:
91
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
92
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
93
+ mode (ModeType, optional): Default search mode. Defaults to "question".
94
+ detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
95
+ """
96
+
97
+ def __init__(
98
+ self,
99
+ timeout: int = 30,
100
+ proxies: Optional[dict] = None,
101
+ mode: ModeType = "question",
102
+ detail_level: Optional[DetailLevelType] = None,
103
+ ):
104
+ """Initialize the IAsk API client.
105
+
106
+ Args:
107
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
108
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
109
+ mode (ModeType, optional): Default search mode. Defaults to "question".
110
+ detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
111
+ """
112
+ self.timeout = timeout
113
+ self.proxies = proxies or {}
114
+ self.default_mode = mode
115
+ self.default_detail_level = detail_level
116
+ self.api_endpoint = "https://iask.ai/"
117
+ self.last_response = {}
118
+
119
+ def create_url(self, query: str, mode: ModeType = "question", detail_level: Optional[DetailLevelType] = None) -> str:
120
+ """Create a properly formatted URL with mode and detail level parameters.
121
+
122
+ Args:
123
+ query (str): The search query.
124
+ mode (ModeType, optional): Search mode. Defaults to "question".
125
+ detail_level (DetailLevelType, optional): Detail level. Defaults to None.
126
+
127
+ Returns:
128
+ str: Formatted URL with query parameters.
129
+
130
+ Example:
131
+ >>> ai = IAsk()
132
+ >>> url = ai.create_url("Climate change", mode="academic", detail_level="detailed")
133
+ >>> print(url)
134
+ https://iask.ai/?mode=academic&q=Climate+change&options%5Bdetail_level%5D=detailed
135
+ """
136
+ # Create a dictionary of parameters with flattened structure
137
+ params = {
138
+ "mode": mode,
139
+ "q": query
140
+ }
141
+
142
+ # Add detail_level if provided using the flattened format
143
+ if detail_level:
144
+ params["options[detail_level]"] = detail_level
145
+
146
+ # Encode the parameters and build the URL
147
+ query_string = urllib.parse.urlencode(params)
148
+ url = f"{self.api_endpoint}?{query_string}"
149
+
150
+ return url
151
+
152
+ def format_html(self, html_content: str) -> str:
153
+ """Format HTML content into a more readable text format.
154
+
155
+ Args:
156
+ html_content (str): The HTML content to format.
157
+
158
+ Returns:
159
+ str: Formatted text.
160
+ """
161
+ scout = Scout(html_content, features='html.parser')
162
+ output_lines = []
163
+
164
+ for child in scout.find_all(['h1', 'h2', 'h3', 'p', 'ol', 'ul', 'div']):
165
+ if child.name in ["h1", "h2", "h3"]:
166
+ output_lines.append(f"\n**{child.get_text().strip()}**\n")
167
+ elif child.name == "p":
168
+ text = child.get_text().strip()
169
+ text = re.sub(r"^According to Ask AI & Question AI www\.iAsk\.ai:\s*", "", text).strip()
170
+ # Remove footnote markers
171
+ text = re.sub(r'\[\d+\]\(#fn:\d+ \'see footnote\'\)', '', text)
172
+ output_lines.append(text + "\n")
173
+ elif child.name in ["ol", "ul"]:
174
+ for li in child.find_all("li"):
175
+ output_lines.append("- " + li.get_text().strip() + "\n")
176
+ elif child.name == "div" and "footnotes" in child.get("class", []):
177
+ output_lines.append("\n**Authoritative Sources**\n")
178
+ for li in child.find_all("li"):
179
+ link = li.find("a")
180
+ if link:
181
+ output_lines.append(f"- {link.get_text().strip()} ({link.get('href')})\n")
182
+
183
+ return "".join(output_lines)
184
+
185
+ def search(
186
+ self,
187
+ prompt: str,
188
+ stream: bool = False,
189
+ raw: bool = False,
190
+ mode: Optional[ModeType] = None,
191
+ detail_level: Optional[DetailLevelType] = None,
192
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
193
+ """Search using the IAsk API and get AI-generated responses.
194
+
195
+ This method sends a search query to IAsk and returns the AI-generated response.
196
+ It supports both streaming and non-streaming modes, as well as raw response format.
197
+
198
+ Args:
199
+ prompt (str): The search query or prompt to send to the API.
200
+ stream (bool, optional): If True, yields response chunks as they arrive.
201
+ If False, returns complete response. Defaults to False.
202
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
203
+ If False, returns Response objects that convert to text automatically.
204
+ Defaults to False.
205
+ mode (ModeType, optional): Search mode to use. Defaults to None (uses instance default).
206
+ detail_level (DetailLevelType, optional): Detail level to use. Defaults to None (uses instance default).
207
+
208
+ Returns:
209
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
210
+ - If stream=False: Returns complete response as Response object
211
+ - If stream=True: Yields response chunks as either Dict or Response objects
212
+
213
+ Raises:
214
+ APIConnectionError: If the API request fails
215
+
216
+ Examples:
217
+ Basic search:
218
+ >>> ai = IAsk()
219
+ >>> response = ai.search("What is Python?")
220
+ >>> print(response)
221
+ Python is a programming language...
222
+
223
+ Streaming response:
224
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
225
+ ... print(chunk, end="")
226
+ Artificial Intelligence...
227
+
228
+ Raw response format:
229
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
230
+ ... print(chunk)
231
+ {'text': 'Hello'}
232
+ {'text': ' there!'}
233
+
234
+ With specific mode and detail level:
235
+ >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
236
+ >>> print(response)
237
+ Climate change refers to...
238
+ """
239
+ # Use provided parameters or fall back to instance defaults
240
+ search_mode = mode or self.default_mode
241
+ search_detail_level = detail_level or self.default_detail_level
242
+
243
+ # For non-streaming, run the async search and return the complete response
244
+ if not stream:
245
+ # Create a new event loop for this request
246
+ loop = asyncio.new_event_loop()
247
+ asyncio.set_event_loop(loop)
248
+ try:
249
+ result = loop.run_until_complete(
250
+ self._async_search(prompt, False, raw, search_mode, search_detail_level)
251
+ )
252
+ return result
253
+ finally:
254
+ loop.close()
255
+
256
+ # For streaming, use a simpler approach with a single event loop
257
+ # that stays open until the generator is exhausted
258
+ buffer = ""
259
+
260
+ def sync_generator():
261
+ nonlocal buffer
262
+ # Create a new event loop for this generator
263
+ loop = asyncio.new_event_loop()
264
+ asyncio.set_event_loop(loop)
265
+
266
+ try:
267
+ # Get the async generator
268
+ async_gen_coro = self._async_search(prompt, True, raw, search_mode, search_detail_level)
269
+ async_gen = loop.run_until_complete(async_gen_coro)
270
+
271
+ # Process chunks one by one
272
+ while True:
273
+ try:
274
+ # Get the next chunk
275
+ chunk_coro = async_gen.__anext__()
276
+ chunk = loop.run_until_complete(chunk_coro)
277
+
278
+ # Update buffer and yield the chunk
279
+ if isinstance(chunk, dict) and 'text' in chunk:
280
+ buffer += chunk['text']
281
+ elif isinstance(chunk, Response):
282
+ buffer += chunk.text
283
+ else:
284
+ buffer += str(chunk)
285
+
286
+ yield chunk
287
+ except StopAsyncIteration:
288
+ break
289
+ except Exception as e:
290
+ print(f"Error in generator: {e}")
291
+ break
292
+ finally:
293
+ # Store the final response and close the loop
294
+ self.last_response = {"text": buffer}
295
+ loop.close()
296
+
297
+ return sync_generator()
298
+
299
+ async def _async_search(
300
+ self,
301
+ prompt: str,
302
+ stream: bool = False,
303
+ raw: bool = False,
304
+ mode: ModeType = "question",
305
+ detail_level: Optional[DetailLevelType] = None,
306
+ ) -> Union[Response, AsyncIterator[Union[Dict[str, str], Response]]]:
307
+ """Internal async implementation of the search method."""
308
+
309
+ async def stream_generator() -> AsyncIterator[str]:
310
+ async with aiohttp.ClientSession() as session:
311
+ # Prepare parameters
312
+ params = {"mode": mode, "q": prompt}
313
+ if detail_level:
314
+ params["options[detail_level]"] = detail_level
315
+
316
+ try:
317
+ async with session.get(
318
+ self.api_endpoint,
319
+ params=params,
320
+ proxy=self.proxies.get('http') if self.proxies else None,
321
+ timeout=self.timeout
322
+ ) as response:
323
+ if not response.ok:
324
+ raise exceptions.APIConnectionError(
325
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {await response.text()}"
326
+ )
327
+
328
+ etree = lxml.html.fromstring(await response.text())
329
+ phx_node = etree.xpath('//*[starts-with(@id, "phx-")]').pop()
330
+ csrf_token = (
331
+ etree.xpath('//*[@name="csrf-token"]').pop().get("content")
332
+ )
333
+
334
+ async with session.ws_connect(
335
+ f"{self.api_endpoint}live/websocket",
336
+ params={
337
+ "_csrf_token": csrf_token,
338
+ "vsn": "2.0.0",
339
+ },
340
+ proxy=self.proxies.get('http') if self.proxies else None,
341
+ timeout=self.timeout
342
+ ) as wsResponse:
343
+ await wsResponse.send_json(
344
+ [
345
+ None,
346
+ None,
347
+ f"lv:{phx_node.get('id')}",
348
+ "phx_join",
349
+ {
350
+ "params": {"_csrf_token": csrf_token},
351
+ "url": str(response.url),
352
+ "session": phx_node.get("data-phx-session"),
353
+ },
354
+ ]
355
+ )
356
+ while True:
357
+ json_data = await wsResponse.receive_json()
358
+ if not json_data:
359
+ break
360
+ diff: dict = json_data[4]
361
+ try:
362
+ chunk: str = diff["e"][0][1]["data"]
363
+ # Check if the chunk contains HTML content
364
+ if re.search(r"<[^>]+>", chunk):
365
+ formatted_chunk = self.format_html(chunk)
366
+ yield formatted_chunk
367
+ else:
368
+ yield chunk.replace("<br/>", "\n")
369
+ except:
370
+ cache = cache_find(diff)
371
+ if cache:
372
+ if diff.get("response", None):
373
+ # Format the cache content if it contains HTML
374
+ if re.search(r"<[^>]+>", cache):
375
+ formatted_cache = self.format_html(cache)
376
+ yield formatted_cache
377
+ else:
378
+ yield cache
379
+ break
380
+ except Exception as e:
381
+ raise exceptions.APIConnectionError(f"Error connecting to IAsk API: {str(e)}")
382
+
383
+ # For non-streaming, collect all chunks and return a single response
384
+ if not stream:
385
+ buffer = ""
386
+ async for chunk in stream_generator():
387
+ buffer += chunk
388
+ self.last_response = {"text": buffer}
389
+ return Response(buffer) if not raw else {"text": buffer}
390
+
391
+ # For streaming, create an async generator that yields chunks
392
+ async def process_stream():
393
+ buffer = ""
394
+ async for chunk in stream_generator():
395
+ buffer += chunk
396
+ if raw:
397
+ yield {"text": chunk}
398
+ else:
399
+ yield Response(chunk)
400
+ self.last_response = {"text": buffer}
401
+
402
+ # Return the async generator
403
+ return process_stream()
404
+
405
+
406
+ if __name__ == "__main__":
407
+ from rich import print
408
+
409
+ ai = IAsk()
410
+
411
+ # Example 1: Simple search with default mode
412
+ print("\n[bold cyan]Example 1: Simple search with default mode[/bold cyan]")
413
+ response = ai.search("What is Python?", stream=True)
414
+ for chunk in response:
415
+ print(chunk, end="", flush=True)
416
+ print("\n\n[bold green]Response complete.[/bold green]\n")
417
+
418
+ # Example 2: Search with academic mode
419
+ print("\n[bold cyan]Example 2: Search with academic mode[/bold cyan]")
420
+ response = ai.search("Quantum computing applications", mode="academic", stream=True)
421
+ for chunk in response:
422
+ print(chunk, end="", flush=True)
423
+ print("\n\n[bold green]Response complete.[/bold green]\n")
424
+
425
+ # Example 3: Search with advanced mode and detailed level
426
+ print("\n[bold cyan]Example 3: Search with advanced mode and detailed level[/bold cyan]")
427
+ response = ai.search("Climate change solutions", mode="advanced", detail_level="detailed", stream=True)
428
+ for chunk in response:
429
+ print(chunk, end="", flush=True)
430
+ print("\n\n[bold green]Response complete.[/bold green]\n")
431
+
432
+ # Example 4: Demonstrating the create_url method
433
+ print("\n[bold cyan]Example 4: Generated URL for browser access[/bold cyan]")
434
+ url = ai.create_url("Helpingai details", mode="question", detail_level="detailed")
435
+ print(f"URL: {url}")
436
+ print("This URL can be used directly in a browser or with other HTTP clients.")
@@ -7,7 +7,7 @@ from typing import Dict, Optional, Generator, Union, Any
7
7
 
8
8
  from webscout.AIbase import AISearch
9
9
  from webscout import exceptions
10
- from webscout import LitAgent
10
+ from webscout.litagent import LitAgent
11
11
 
12
12
 
13
13
  class Response:
@@ -5,7 +5,7 @@ from typing import Dict, Optional, Generator, Union, Any
5
5
 
6
6
  from webscout.AIbase import AISearch
7
7
  from webscout import exceptions
8
- from webscout import LitAgent
8
+ from webscout.litagent import LitAgent
9
9
 
10
10
 
11
11
  class Response:
@@ -155,7 +155,7 @@ class ExaAI(Provider):
155
155
  payload = {
156
156
  "id": conversation_id,
157
157
  "messages": [
158
- # {"role": "system", "content": self.system_prompt},
158
+ # {"role": "system", "content": self.system_prompt}, # system role not supported by this provider
159
159
  {"role": "user", "content": conversation_prompt}
160
160
  ]
161
161
  }
@@ -13,7 +13,7 @@ class JadveOpenAI(Provider):
13
13
  A class to interact with the OpenAI API through jadve.com using the streaming endpoint.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = ["gpt-4o", "gpt-4o-mini", "claude-3-7-sonnet-20250219", "claude-3-5-sonnet-20240620", "o1-mini", "deepseek-chat", "o1-mini", "claude-3-5-haiku-20241022"]
16
+ AVAILABLE_MODELS = ["gpt-4o-mini"]
17
17
 
18
18
  def __init__(
19
19
  self,
@@ -26,7 +26,7 @@ class JadveOpenAI(Provider):
26
26
  proxies: dict = {},
27
27
  history_offset: int = 10250,
28
28
  act: str = None,
29
- model: str = "claude-3-7-sonnet-20250219",
29
+ model: str = "gpt-4o-mini",
30
30
  system_prompt: str = "You are a helpful AI assistant."
31
31
  ):
32
32
  """
@@ -0,0 +1,17 @@
1
+ # This file marks the directory as a Python package.
2
+ from .deepinfra import *
3
+ from .glider import *
4
+ from .chatgptclone import *
5
+ from .x0gpt import *
6
+ from .wisecat import *
7
+ from .venice import *
8
+ from .exaai import *
9
+ from .typegpt import *
10
+ from .scirachat import *
11
+ from .freeaichat import *
12
+ from .llmchatco import *
13
+ from .yep import * # Add YEPCHAT
14
+ from .heckai import *
15
+ from .sonus import *
16
+ from .exachat import *
17
+ from .netwrck import *
@@ -0,0 +1,46 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Optional, Union, Generator, Any
3
+
4
+ # Re-define or import necessary response structure classes (like ChatCompletion, ChatCompletionChunk)
5
+ # For simplicity, we'll assume they are defined elsewhere or passed directly.
6
+ # You might want to define base versions of these classes here as well.
7
+
8
+ class BaseChatCompletionChunk: # Placeholder
9
+ pass
10
+ class BaseChatCompletion: # Placeholder
11
+ pass
12
+
13
+
14
+ class BaseCompletions(ABC):
15
+ @abstractmethod
16
+ def create(
17
+ self,
18
+ *,
19
+ model: str,
20
+ messages: List[Dict[str, str]],
21
+ max_tokens: Optional[int] = None,
22
+ stream: bool = False,
23
+ temperature: Optional[float] = None,
24
+ top_p: Optional[float] = None,
25
+ **kwargs: Any
26
+ ) -> Union[BaseChatCompletion, Generator[BaseChatCompletionChunk, None, None]]:
27
+ """Abstract method to create chat completions."""
28
+ raise NotImplementedError
29
+
30
+
31
+ class BaseChat(ABC):
32
+ completions: BaseCompletions
33
+
34
+
35
+ class OpenAICompatibleProvider(ABC):
36
+ """
37
+ Abstract Base Class for providers mimicking the OpenAI Python client structure.
38
+ Requires a nested 'chat.completions' structure.
39
+ """
40
+ chat: BaseChat
41
+
42
+ @abstractmethod
43
+ def __init__(self, api_key: Optional[str] = None, **kwargs: Any):
44
+ """Initialize the provider, potentially with an API key."""
45
+ raise NotImplementedError
46
+