raccoonai 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of raccoonai might be problematic. Click here for more details.

Files changed (44) hide show
  1. raccoonai/__init__.py +96 -0
  2. raccoonai/_base_client.py +2051 -0
  3. raccoonai/_client.py +473 -0
  4. raccoonai/_compat.py +219 -0
  5. raccoonai/_constants.py +14 -0
  6. raccoonai/_exceptions.py +108 -0
  7. raccoonai/_files.py +123 -0
  8. raccoonai/_models.py +795 -0
  9. raccoonai/_qs.py +150 -0
  10. raccoonai/_resource.py +43 -0
  11. raccoonai/_response.py +830 -0
  12. raccoonai/_streaming.py +333 -0
  13. raccoonai/_types.py +217 -0
  14. raccoonai/_utils/__init__.py +57 -0
  15. raccoonai/_utils/_logs.py +25 -0
  16. raccoonai/_utils/_proxy.py +62 -0
  17. raccoonai/_utils/_reflection.py +42 -0
  18. raccoonai/_utils/_streams.py +12 -0
  19. raccoonai/_utils/_sync.py +71 -0
  20. raccoonai/_utils/_transform.py +392 -0
  21. raccoonai/_utils/_typing.py +149 -0
  22. raccoonai/_utils/_utils.py +414 -0
  23. raccoonai/_version.py +4 -0
  24. raccoonai/lib/.keep +4 -0
  25. raccoonai/py.typed +0 -0
  26. raccoonai/resources/__init__.py +33 -0
  27. raccoonai/resources/fleet.py +485 -0
  28. raccoonai/resources/lam.py +1161 -0
  29. raccoonai/types/__init__.py +15 -0
  30. raccoonai/types/fleet_create_params.py +77 -0
  31. raccoonai/types/fleet_create_response.py +20 -0
  32. raccoonai/types/fleet_logs_response.py +14 -0
  33. raccoonai/types/fleet_status_response.py +17 -0
  34. raccoonai/types/fleet_terminate_response.py +17 -0
  35. raccoonai/types/lam_extract_params.py +51 -0
  36. raccoonai/types/lam_extract_response.py +28 -0
  37. raccoonai/types/lam_integration_run_params.py +35 -0
  38. raccoonai/types/lam_integration_run_response.py +47 -0
  39. raccoonai/types/lam_run_params.py +41 -0
  40. raccoonai/types/lam_run_response.py +21 -0
  41. raccoonai-0.1.0a1.dist-info/METADATA +422 -0
  42. raccoonai-0.1.0a1.dist-info/RECORD +44 -0
  43. raccoonai-0.1.0a1.dist-info/WHEEL +4 -0
  44. raccoonai-0.1.0a1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .lam_run_params import LamRunParams as LamRunParams
6
+ from .lam_run_response import LamRunResponse as LamRunResponse
7
+ from .lam_extract_params import LamExtractParams as LamExtractParams
8
+ from .fleet_create_params import FleetCreateParams as FleetCreateParams
9
+ from .fleet_logs_response import FleetLogsResponse as FleetLogsResponse
10
+ from .lam_extract_response import LamExtractResponse as LamExtractResponse
11
+ from .fleet_create_response import FleetCreateResponse as FleetCreateResponse
12
+ from .fleet_status_response import FleetStatusResponse as FleetStatusResponse
13
+ from .fleet_terminate_response import FleetTerminateResponse as FleetTerminateResponse
14
+ from .lam_integration_run_params import LamIntegrationRunParams as LamIntegrationRunParams
15
+ from .lam_integration_run_response import LamIntegrationRunResponse as LamIntegrationRunResponse
@@ -0,0 +1,77 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Optional
6
+ from typing_extensions import TypedDict
7
+
8
+ __all__ = ["FleetCreateParams", "Advanced", "Settings"]
9
+
10
+
11
+ class FleetCreateParams(TypedDict, total=False):
12
+ advanced: Optional[Advanced]
13
+ """
14
+ Advanced configuration options for the session, such as ad-blocking and CAPTCHA
15
+ solving.
16
+ """
17
+
18
+ app_name: Optional[str]
19
+ """
20
+ The name of the app for which the session is going to run for, used for
21
+ streamlining authentication.
22
+ """
23
+
24
+ browser_type: Optional[str]
25
+ """The type of browser to use.
26
+
27
+ Supported values include 'chromium', 'firefox', and 'webkit'.
28
+ """
29
+
30
+ headless: Optional[bool]
31
+ """Whether to run the browser in headless mode."""
32
+
33
+ raccoon_passcode: Optional[str]
34
+ """
35
+ The raccoon passcode associated with the end user on behalf of which the call is
36
+ being made if any.
37
+ """
38
+
39
+ session_timeout: Optional[int]
40
+ """The timeout for the browser session in seconds."""
41
+
42
+ settings: Optional[Settings]
43
+ """
44
+ Configuration settings for the browser, such as viewport size and User-Agent
45
+ string.
46
+ """
47
+
48
+ url: Optional[str]
49
+ """The entrypoint url for the session."""
50
+
51
+
52
+ class Advanced(TypedDict, total=False):
53
+ block_ads: Optional[bool]
54
+ """Whether to block advertisements during the browser session."""
55
+
56
+ proxy: Optional[bool]
57
+ """Whether to use a proxy for the browser session."""
58
+
59
+ solve_captchas: Optional[bool]
60
+ """Whether to attempt automatic CAPTCHA solving."""
61
+
62
+
63
+ class Settings(TypedDict, total=False):
64
+ locales: Optional[List[str]]
65
+ """A list of locales or languages to use for the browser session.
66
+
67
+ These determine language preferences.
68
+ """
69
+
70
+ user_agent: Optional[str]
71
+ """The User-Agent string to use for the browser.
72
+
73
+ Defaults to internal auto user-agent rotation mechanism.
74
+ """
75
+
76
+ viewport: Optional[Dict[str, int]]
77
+ """The viewport size (screen dimensions) for the browser in pixels."""
@@ -0,0 +1,20 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from .._models import BaseModel
5
+
6
+ __all__ = ["FleetCreateResponse"]
7
+
8
+
9
+ class FleetCreateResponse(BaseModel):
10
+ session_id: str
11
+ """A unique identifier for the created session."""
12
+
13
+ status: str
14
+ """The current status of the session.
15
+
16
+ Possible values include 'running', 'unknown', or 'terminated'.
17
+ """
18
+
19
+ websocket_url: str
20
+ """The WebSocket URL for interacting with the session."""
@@ -0,0 +1,14 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from .._models import BaseModel
5
+
6
+ __all__ = ["FleetLogsResponse"]
7
+
8
+
9
+ class FleetLogsResponse(BaseModel):
10
+ session_id: str
11
+ """A unique identifier for the session."""
12
+
13
+ session_logs: object
14
+ """A dictionary containing the browser console logs for the session."""
@@ -0,0 +1,17 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from .._models import BaseModel
5
+
6
+ __all__ = ["FleetStatusResponse"]
7
+
8
+
9
+ class FleetStatusResponse(BaseModel):
10
+ session_id: str
11
+ """A unique identifier for the session."""
12
+
13
+ status: str
14
+ """The current status of the session.
15
+
16
+ Possible values include 'running', 'unknown', or 'terminated'.
17
+ """
@@ -0,0 +1,17 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from .._models import BaseModel
5
+
6
+ __all__ = ["FleetTerminateResponse"]
7
+
8
+
9
+ class FleetTerminateResponse(BaseModel):
10
+ session_id: str
11
+ """A unique identifier for the session."""
12
+
13
+ status: str
14
+ """The current status of the session.
15
+
16
+ Possible values include 'running', 'unknown', or 'terminated'.
17
+ """
@@ -0,0 +1,51 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable, Optional
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["LamExtractParamsBase", "LamExtractParamsNonStreaming", "LamExtractParamsStreaming"]
9
+
10
+
11
+ class LamExtractParamsBase(TypedDict, total=False):
12
+ query: Required[str]
13
+ """The input query string for the request. This is typically the main prompt."""
14
+
15
+ raccoon_passcode: Required[str]
16
+ """
17
+ The raccoon passcode associated with the end user on behalf of which the call is
18
+ being made.
19
+ """
20
+
21
+ app_url: Optional[str]
22
+ """This is the entrypoint URL for the web agent."""
23
+
24
+ chat_history: Optional[Iterable[object]]
25
+ """
26
+ The history of the conversation as a list of messages or objects you might use
27
+ while building a chat app to give the model context of the past conversation.
28
+ """
29
+
30
+ max_count: Optional[int]
31
+ """The maximum number of results to extract."""
32
+
33
+ schema: object
34
+ """The expected schema for the response.
35
+
36
+ This is a dictionary where the keys describe the fields and the values describe
37
+ their purposes.
38
+ """
39
+
40
+
41
+ class LamExtractParamsNonStreaming(LamExtractParamsBase, total=False):
42
+ stream: Optional[Literal[False]]
43
+ """Whether the response should be streamed back or not."""
44
+
45
+
46
+ class LamExtractParamsStreaming(LamExtractParamsBase):
47
+ stream: Required[Literal[True]]
48
+ """Whether the response should be streamed back or not."""
49
+
50
+
51
+ LamExtractParams = Union[LamExtractParamsNonStreaming, LamExtractParamsStreaming]
@@ -0,0 +1,28 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["LamExtractResponse"]
8
+
9
+
10
+ class LamExtractResponse(BaseModel):
11
+ data: List[object]
12
+ """The extracted data as a list of objects when the status is DONE.
13
+
14
+ Each object represents an extracted entity.
15
+ """
16
+
17
+ message: str
18
+ """A message providing the thought summary if the status is processing currently."""
19
+
20
+ properties: object
21
+ """Additional metadata or information related to the extraction task."""
22
+
23
+ task_status: str
24
+ """The current status of the extraction task.
25
+
26
+ For example: 'STARTING', 'PROCESSING', 'DONE', 'HUMAN_INTERACTION', or
27
+ 'FAILURE'.
28
+ """
@@ -0,0 +1,35 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Optional
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["LamIntegrationRunParamsBase", "LamIntegrationRunParamsNonStreaming", "LamIntegrationRunParamsStreaming"]
9
+
10
+
11
+ class LamIntegrationRunParamsBase(TypedDict, total=False):
12
+ raccoon_passcode: Required[str]
13
+ """
14
+ The raccoon passcode associated with the end user on behalf of which the call is
15
+ being made.
16
+ """
17
+
18
+ integration_id: Optional[str]
19
+ """The unique identifier for the integration being called."""
20
+
21
+ properties: Optional[object]
22
+ """Additional properties or data related to the particular integration."""
23
+
24
+
25
+ class LamIntegrationRunParamsNonStreaming(LamIntegrationRunParamsBase, total=False):
26
+ stream: Optional[Literal[False]]
27
+ """Whether the response should be streamed back or not."""
28
+
29
+
30
+ class LamIntegrationRunParamsStreaming(LamIntegrationRunParamsBase):
31
+ stream: Required[Literal[True]]
32
+ """Whether the response should be streamed back or not."""
33
+
34
+
35
+ LamIntegrationRunParams = Union[LamIntegrationRunParamsNonStreaming, LamIntegrationRunParamsStreaming]
@@ -0,0 +1,47 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union
4
+ from typing_extensions import TypeAlias
5
+
6
+ from .._models import BaseModel
7
+
8
+ __all__ = ["LamIntegrationRunResponse", "UnionMember0", "IntegrationResponse"]
9
+
10
+
11
+ class UnionMember0(BaseModel):
12
+ integration_id: str
13
+ """A unique identifier for the integration in use."""
14
+
15
+ message: str
16
+ """A message providing the thought summary if the status is processing currently."""
17
+
18
+ properties: object
19
+ """Additional metadata or details related to the integration task."""
20
+
21
+ task_status: str
22
+ """The current status of the extraction task.
23
+
24
+ For example: 'STARTING', 'PROCESSING', 'DONE', 'HUMAN_INTERACTION', or
25
+ 'FAILURE'.
26
+ """
27
+
28
+
29
+ class IntegrationResponse(BaseModel):
30
+ integration_id: str
31
+ """A unique identifier for the integration in use."""
32
+
33
+ message: str
34
+ """A message providing the thought summary if the status is processing currently."""
35
+
36
+ properties: object
37
+ """Additional metadata or details related to the integration task."""
38
+
39
+ task_status: str
40
+ """The current status of the extraction task.
41
+
42
+ For example: 'STARTING', 'PROCESSING', 'DONE', 'HUMAN_INTERACTION', or
43
+ 'FAILURE'.
44
+ """
45
+
46
+
47
+ LamIntegrationRunResponse: TypeAlias = Union[List[UnionMember0], IntegrationResponse]
@@ -0,0 +1,41 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable, Optional
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["LamRunParamsBase", "LamRunParamsNonStreaming", "LamRunParamsStreaming"]
9
+
10
+
11
+ class LamRunParamsBase(TypedDict, total=False):
12
+ query: Required[str]
13
+ """The input query string for the request. This is typically the main prompt."""
14
+
15
+ raccoon_passcode: Required[str]
16
+ """
17
+ The raccoon passcode associated with the end user on behalf of which the call is
18
+ being made.
19
+ """
20
+
21
+ app_url: Optional[str]
22
+ """This is the entrypoint URL for the web agent."""
23
+
24
+ chat_history: Optional[Iterable[object]]
25
+ """
26
+ The history of the conversation as a list of messages or objects you might use
27
+ while building a chat app to give the model context of the past conversation.
28
+ """
29
+
30
+
31
+ class LamRunParamsNonStreaming(LamRunParamsBase, total=False):
32
+ stream: Optional[Literal[False]]
33
+ """Whether the response should be streamed back or not."""
34
+
35
+
36
+ class LamRunParamsStreaming(LamRunParamsBase):
37
+ stream: Required[Literal[True]]
38
+ """Whether the response should be streamed back or not."""
39
+
40
+
41
+ LamRunParams = Union[LamRunParamsNonStreaming, LamRunParamsStreaming]
@@ -0,0 +1,21 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from .._models import BaseModel
5
+
6
+ __all__ = ["LamRunResponse"]
7
+
8
+
9
+ class LamRunResponse(BaseModel):
10
+ message: str
11
+ """A message providing the thought summary if the status is processing currently."""
12
+
13
+ properties: object
14
+ """Additional metadata or details related to the run task."""
15
+
16
+ task_status: str
17
+ """The current status of the extraction task.
18
+
19
+ For example: 'STARTING', 'PROCESSING', 'DONE', 'HUMAN_INTERACTION', or
20
+ 'FAILURE'.
21
+ """