raccoonai 0.1.0a5__py3-none-any.whl → 0.1.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of raccoonai might be problematic. Click here for more details.

@@ -7,7 +7,7 @@ from typing_extensions import Literal, overload
7
7
 
8
8
  import httpx
9
9
 
10
- from ..types import lam_run_params, lam_extract_params, lam_integration_run_params
10
+ from ..types import lam_run_params, lam_integration_run_params
11
11
  from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
12
12
  from .._utils import (
13
13
  required_args,
@@ -25,7 +25,6 @@ from .._response import (
25
25
  from .._streaming import Stream, AsyncStream
26
26
  from .._base_client import make_request_options
27
27
  from ..types.lam_run_response import LamRunResponse
28
- from ..types.lam_extract_response import LamExtractResponse
29
28
  from ..types.lam_integration_run_response import LamIntegrationRunResponse
30
29
 
31
30
  __all__ = ["LamResource", "AsyncLamResource"]
@@ -51,210 +50,6 @@ class LamResource(SyncAPIResource):
51
50
  """
52
51
  return LamResourceWithStreamingResponse(self)
53
52
 
54
- @overload
55
- def extract(
56
- self,
57
- *,
58
- query: str,
59
- raccoon_passcode: str,
60
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
61
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
62
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
63
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
64
- schema: object | NotGiven = NOT_GIVEN,
65
- stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
66
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
67
- # The extra values given here take precedence over values defined on the client or passed to this method.
68
- extra_headers: Headers | None = None,
69
- extra_query: Query | None = None,
70
- extra_body: Body | None = None,
71
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
72
- ) -> LamExtractResponse:
73
- """Lam Extract Endpoint
74
-
75
- Args:
76
- query: The input query string for the request.
77
-
78
- This is typically the main prompt.
79
-
80
- raccoon_passcode: The raccoon passcode associated with the end user on behalf of which the call is
81
- being made.
82
-
83
- advanced: Advanced configuration options for the session, such as ad-blocking and CAPTCHA
84
- solving.
85
-
86
- app_url: This is the entrypoint URL for the web agent.
87
-
88
- chat_history: The history of the conversation as a list of messages or objects you might use
89
- while building a chat app to give the model context of the past conversation.
90
-
91
- max_count: The maximum number of results to extract.
92
-
93
- schema: The expected schema for the response. This is a dictionary where the keys
94
- describe the fields and the values describe their purposes.
95
-
96
- stream: Whether the response should be streamed back or not.
97
-
98
- extra_headers: Send extra headers
99
-
100
- extra_query: Add additional query parameters to the request
101
-
102
- extra_body: Add additional JSON properties to the request
103
-
104
- timeout: Override the client-level default timeout for this request, in seconds
105
- """
106
- ...
107
-
108
- @overload
109
- def extract(
110
- self,
111
- *,
112
- query: str,
113
- raccoon_passcode: str,
114
- stream: Literal[True],
115
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
116
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
117
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
118
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
119
- schema: object | NotGiven = NOT_GIVEN,
120
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
121
- # The extra values given here take precedence over values defined on the client or passed to this method.
122
- extra_headers: Headers | None = None,
123
- extra_query: Query | None = None,
124
- extra_body: Body | None = None,
125
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
126
- ) -> Stream[LamExtractResponse]:
127
- """Lam Extract Endpoint
128
-
129
- Args:
130
- query: The input query string for the request.
131
-
132
- This is typically the main prompt.
133
-
134
- raccoon_passcode: The raccoon passcode associated with the end user on behalf of which the call is
135
- being made.
136
-
137
- stream: Whether the response should be streamed back or not.
138
-
139
- advanced: Advanced configuration options for the session, such as ad-blocking and CAPTCHA
140
- solving.
141
-
142
- app_url: This is the entrypoint URL for the web agent.
143
-
144
- chat_history: The history of the conversation as a list of messages or objects you might use
145
- while building a chat app to give the model context of the past conversation.
146
-
147
- max_count: The maximum number of results to extract.
148
-
149
- schema: The expected schema for the response. This is a dictionary where the keys
150
- describe the fields and the values describe their purposes.
151
-
152
- extra_headers: Send extra headers
153
-
154
- extra_query: Add additional query parameters to the request
155
-
156
- extra_body: Add additional JSON properties to the request
157
-
158
- timeout: Override the client-level default timeout for this request, in seconds
159
- """
160
- ...
161
-
162
- @overload
163
- def extract(
164
- self,
165
- *,
166
- query: str,
167
- raccoon_passcode: str,
168
- stream: bool,
169
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
170
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
171
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
172
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
173
- schema: object | NotGiven = NOT_GIVEN,
174
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
175
- # The extra values given here take precedence over values defined on the client or passed to this method.
176
- extra_headers: Headers | None = None,
177
- extra_query: Query | None = None,
178
- extra_body: Body | None = None,
179
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
180
- ) -> LamExtractResponse | Stream[LamExtractResponse]:
181
- """Lam Extract Endpoint
182
-
183
- Args:
184
- query: The input query string for the request.
185
-
186
- This is typically the main prompt.
187
-
188
- raccoon_passcode: The raccoon passcode associated with the end user on behalf of which the call is
189
- being made.
190
-
191
- stream: Whether the response should be streamed back or not.
192
-
193
- advanced: Advanced configuration options for the session, such as ad-blocking and CAPTCHA
194
- solving.
195
-
196
- app_url: This is the entrypoint URL for the web agent.
197
-
198
- chat_history: The history of the conversation as a list of messages or objects you might use
199
- while building a chat app to give the model context of the past conversation.
200
-
201
- max_count: The maximum number of results to extract.
202
-
203
- schema: The expected schema for the response. This is a dictionary where the keys
204
- describe the fields and the values describe their purposes.
205
-
206
- extra_headers: Send extra headers
207
-
208
- extra_query: Add additional query parameters to the request
209
-
210
- extra_body: Add additional JSON properties to the request
211
-
212
- timeout: Override the client-level default timeout for this request, in seconds
213
- """
214
- ...
215
-
216
- @required_args(["query", "raccoon_passcode"], ["query", "raccoon_passcode", "stream"])
217
- def extract(
218
- self,
219
- *,
220
- query: str,
221
- raccoon_passcode: str,
222
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
223
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
224
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
225
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
226
- schema: object | NotGiven = NOT_GIVEN,
227
- stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
228
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
229
- # The extra values given here take precedence over values defined on the client or passed to this method.
230
- extra_headers: Headers | None = None,
231
- extra_query: Query | None = None,
232
- extra_body: Body | None = None,
233
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
234
- ) -> LamExtractResponse | Stream[LamExtractResponse]:
235
- return self._post(
236
- "/lam/extract",
237
- body=maybe_transform(
238
- {
239
- "query": query,
240
- "raccoon_passcode": raccoon_passcode,
241
- "advanced": advanced,
242
- "app_url": app_url,
243
- "chat_history": chat_history,
244
- "max_count": max_count,
245
- "schema": schema,
246
- "stream": stream,
247
- },
248
- lam_extract_params.LamExtractParams,
249
- ),
250
- options=make_request_options(
251
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
252
- ),
253
- cast_to=LamExtractResponse,
254
- stream=stream or False,
255
- stream_cls=Stream[LamExtractResponse],
256
- )
257
-
258
53
  @overload
259
54
  def integration_run(
260
55
  self,
@@ -437,6 +232,9 @@ class LamResource(SyncAPIResource):
437
232
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
438
233
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
439
234
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
235
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
236
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
237
+ schema: object | NotGiven = NOT_GIVEN,
440
238
  stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
441
239
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
442
240
  # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -463,6 +261,13 @@ class LamResource(SyncAPIResource):
463
261
  chat_history: The history of the conversation as a list of messages or objects you might use
464
262
  while building a chat app to give the model context of the past conversation.
465
263
 
264
+ max_count: The maximum number of results to extract.
265
+
266
+ mode: Mode of execution.
267
+
268
+ schema: The expected schema for the response. This is a dictionary where the keys
269
+ describe the fields and the values describe their purposes.
270
+
466
271
  stream: Whether the response should be streamed back or not.
467
272
 
468
273
  extra_headers: Send extra headers
@@ -485,6 +290,9 @@ class LamResource(SyncAPIResource):
485
290
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
486
291
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
487
292
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
293
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
294
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
295
+ schema: object | NotGiven = NOT_GIVEN,
488
296
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
489
297
  # The extra values given here take precedence over values defined on the client or passed to this method.
490
298
  extra_headers: Headers | None = None,
@@ -512,6 +320,13 @@ class LamResource(SyncAPIResource):
512
320
  chat_history: The history of the conversation as a list of messages or objects you might use
513
321
  while building a chat app to give the model context of the past conversation.
514
322
 
323
+ max_count: The maximum number of results to extract.
324
+
325
+ mode: Mode of execution.
326
+
327
+ schema: The expected schema for the response. This is a dictionary where the keys
328
+ describe the fields and the values describe their purposes.
329
+
515
330
  extra_headers: Send extra headers
516
331
 
517
332
  extra_query: Add additional query parameters to the request
@@ -532,6 +347,9 @@ class LamResource(SyncAPIResource):
532
347
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
533
348
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
534
349
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
350
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
351
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
352
+ schema: object | NotGiven = NOT_GIVEN,
535
353
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
536
354
  # The extra values given here take precedence over values defined on the client or passed to this method.
537
355
  extra_headers: Headers | None = None,
@@ -559,6 +377,13 @@ class LamResource(SyncAPIResource):
559
377
  chat_history: The history of the conversation as a list of messages or objects you might use
560
378
  while building a chat app to give the model context of the past conversation.
561
379
 
380
+ max_count: The maximum number of results to extract.
381
+
382
+ mode: Mode of execution.
383
+
384
+ schema: The expected schema for the response. This is a dictionary where the keys
385
+ describe the fields and the values describe their purposes.
386
+
562
387
  extra_headers: Send extra headers
563
388
 
564
389
  extra_query: Add additional query parameters to the request
@@ -578,6 +403,9 @@ class LamResource(SyncAPIResource):
578
403
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
579
404
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
580
405
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
406
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
407
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
408
+ schema: object | NotGiven = NOT_GIVEN,
581
409
  stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
582
410
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
583
411
  # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -595,6 +423,9 @@ class LamResource(SyncAPIResource):
595
423
  "advanced": advanced,
596
424
  "app_url": app_url,
597
425
  "chat_history": chat_history,
426
+ "max_count": max_count,
427
+ "mode": mode,
428
+ "schema": schema,
598
429
  "stream": stream,
599
430
  },
600
431
  lam_run_params.LamRunParams,
@@ -628,210 +459,6 @@ class AsyncLamResource(AsyncAPIResource):
628
459
  """
629
460
  return AsyncLamResourceWithStreamingResponse(self)
630
461
 
631
- @overload
632
- async def extract(
633
- self,
634
- *,
635
- query: str,
636
- raccoon_passcode: str,
637
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
638
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
639
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
640
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
641
- schema: object | NotGiven = NOT_GIVEN,
642
- stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
643
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
644
- # The extra values given here take precedence over values defined on the client or passed to this method.
645
- extra_headers: Headers | None = None,
646
- extra_query: Query | None = None,
647
- extra_body: Body | None = None,
648
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
649
- ) -> LamExtractResponse:
650
- """Lam Extract Endpoint
651
-
652
- Args:
653
- query: The input query string for the request.
654
-
655
- This is typically the main prompt.
656
-
657
- raccoon_passcode: The raccoon passcode associated with the end user on behalf of which the call is
658
- being made.
659
-
660
- advanced: Advanced configuration options for the session, such as ad-blocking and CAPTCHA
661
- solving.
662
-
663
- app_url: This is the entrypoint URL for the web agent.
664
-
665
- chat_history: The history of the conversation as a list of messages or objects you might use
666
- while building a chat app to give the model context of the past conversation.
667
-
668
- max_count: The maximum number of results to extract.
669
-
670
- schema: The expected schema for the response. This is a dictionary where the keys
671
- describe the fields and the values describe their purposes.
672
-
673
- stream: Whether the response should be streamed back or not.
674
-
675
- extra_headers: Send extra headers
676
-
677
- extra_query: Add additional query parameters to the request
678
-
679
- extra_body: Add additional JSON properties to the request
680
-
681
- timeout: Override the client-level default timeout for this request, in seconds
682
- """
683
- ...
684
-
685
- @overload
686
- async def extract(
687
- self,
688
- *,
689
- query: str,
690
- raccoon_passcode: str,
691
- stream: Literal[True],
692
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
693
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
694
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
695
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
696
- schema: object | NotGiven = NOT_GIVEN,
697
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
698
- # The extra values given here take precedence over values defined on the client or passed to this method.
699
- extra_headers: Headers | None = None,
700
- extra_query: Query | None = None,
701
- extra_body: Body | None = None,
702
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
703
- ) -> AsyncStream[LamExtractResponse]:
704
- """Lam Extract Endpoint
705
-
706
- Args:
707
- query: The input query string for the request.
708
-
709
- This is typically the main prompt.
710
-
711
- raccoon_passcode: The raccoon passcode associated with the end user on behalf of which the call is
712
- being made.
713
-
714
- stream: Whether the response should be streamed back or not.
715
-
716
- advanced: Advanced configuration options for the session, such as ad-blocking and CAPTCHA
717
- solving.
718
-
719
- app_url: This is the entrypoint URL for the web agent.
720
-
721
- chat_history: The history of the conversation as a list of messages or objects you might use
722
- while building a chat app to give the model context of the past conversation.
723
-
724
- max_count: The maximum number of results to extract.
725
-
726
- schema: The expected schema for the response. This is a dictionary where the keys
727
- describe the fields and the values describe their purposes.
728
-
729
- extra_headers: Send extra headers
730
-
731
- extra_query: Add additional query parameters to the request
732
-
733
- extra_body: Add additional JSON properties to the request
734
-
735
- timeout: Override the client-level default timeout for this request, in seconds
736
- """
737
- ...
738
-
739
- @overload
740
- async def extract(
741
- self,
742
- *,
743
- query: str,
744
- raccoon_passcode: str,
745
- stream: bool,
746
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
747
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
748
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
749
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
750
- schema: object | NotGiven = NOT_GIVEN,
751
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
752
- # The extra values given here take precedence over values defined on the client or passed to this method.
753
- extra_headers: Headers | None = None,
754
- extra_query: Query | None = None,
755
- extra_body: Body | None = None,
756
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
757
- ) -> LamExtractResponse | AsyncStream[LamExtractResponse]:
758
- """Lam Extract Endpoint
759
-
760
- Args:
761
- query: The input query string for the request.
762
-
763
- This is typically the main prompt.
764
-
765
- raccoon_passcode: The raccoon passcode associated with the end user on behalf of which the call is
766
- being made.
767
-
768
- stream: Whether the response should be streamed back or not.
769
-
770
- advanced: Advanced configuration options for the session, such as ad-blocking and CAPTCHA
771
- solving.
772
-
773
- app_url: This is the entrypoint URL for the web agent.
774
-
775
- chat_history: The history of the conversation as a list of messages or objects you might use
776
- while building a chat app to give the model context of the past conversation.
777
-
778
- max_count: The maximum number of results to extract.
779
-
780
- schema: The expected schema for the response. This is a dictionary where the keys
781
- describe the fields and the values describe their purposes.
782
-
783
- extra_headers: Send extra headers
784
-
785
- extra_query: Add additional query parameters to the request
786
-
787
- extra_body: Add additional JSON properties to the request
788
-
789
- timeout: Override the client-level default timeout for this request, in seconds
790
- """
791
- ...
792
-
793
- @required_args(["query", "raccoon_passcode"], ["query", "raccoon_passcode", "stream"])
794
- async def extract(
795
- self,
796
- *,
797
- query: str,
798
- raccoon_passcode: str,
799
- advanced: Optional[lam_extract_params.Advanced] | NotGiven = NOT_GIVEN,
800
- app_url: Optional[str] | NotGiven = NOT_GIVEN,
801
- chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
802
- max_count: Optional[int] | NotGiven = NOT_GIVEN,
803
- schema: object | NotGiven = NOT_GIVEN,
804
- stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
805
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
806
- # The extra values given here take precedence over values defined on the client or passed to this method.
807
- extra_headers: Headers | None = None,
808
- extra_query: Query | None = None,
809
- extra_body: Body | None = None,
810
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
811
- ) -> LamExtractResponse | AsyncStream[LamExtractResponse]:
812
- return await self._post(
813
- "/lam/extract",
814
- body=await async_maybe_transform(
815
- {
816
- "query": query,
817
- "raccoon_passcode": raccoon_passcode,
818
- "advanced": advanced,
819
- "app_url": app_url,
820
- "chat_history": chat_history,
821
- "max_count": max_count,
822
- "schema": schema,
823
- "stream": stream,
824
- },
825
- lam_extract_params.LamExtractParams,
826
- ),
827
- options=make_request_options(
828
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
829
- ),
830
- cast_to=LamExtractResponse,
831
- stream=stream or False,
832
- stream_cls=AsyncStream[LamExtractResponse],
833
- )
834
-
835
462
  @overload
836
463
  async def integration_run(
837
464
  self,
@@ -1014,6 +641,9 @@ class AsyncLamResource(AsyncAPIResource):
1014
641
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
1015
642
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
1016
643
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
644
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
645
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
646
+ schema: object | NotGiven = NOT_GIVEN,
1017
647
  stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
1018
648
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1019
649
  # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1040,6 +670,13 @@ class AsyncLamResource(AsyncAPIResource):
1040
670
  chat_history: The history of the conversation as a list of messages or objects you might use
1041
671
  while building a chat app to give the model context of the past conversation.
1042
672
 
673
+ max_count: The maximum number of results to extract.
674
+
675
+ mode: Mode of execution.
676
+
677
+ schema: The expected schema for the response. This is a dictionary where the keys
678
+ describe the fields and the values describe their purposes.
679
+
1043
680
  stream: Whether the response should be streamed back or not.
1044
681
 
1045
682
  extra_headers: Send extra headers
@@ -1062,6 +699,9 @@ class AsyncLamResource(AsyncAPIResource):
1062
699
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
1063
700
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
1064
701
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
702
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
703
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
704
+ schema: object | NotGiven = NOT_GIVEN,
1065
705
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1066
706
  # The extra values given here take precedence over values defined on the client or passed to this method.
1067
707
  extra_headers: Headers | None = None,
@@ -1089,6 +729,13 @@ class AsyncLamResource(AsyncAPIResource):
1089
729
  chat_history: The history of the conversation as a list of messages or objects you might use
1090
730
  while building a chat app to give the model context of the past conversation.
1091
731
 
732
+ max_count: The maximum number of results to extract.
733
+
734
+ mode: Mode of execution.
735
+
736
+ schema: The expected schema for the response. This is a dictionary where the keys
737
+ describe the fields and the values describe their purposes.
738
+
1092
739
  extra_headers: Send extra headers
1093
740
 
1094
741
  extra_query: Add additional query parameters to the request
@@ -1109,6 +756,9 @@ class AsyncLamResource(AsyncAPIResource):
1109
756
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
1110
757
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
1111
758
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
759
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
760
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
761
+ schema: object | NotGiven = NOT_GIVEN,
1112
762
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1113
763
  # The extra values given here take precedence over values defined on the client or passed to this method.
1114
764
  extra_headers: Headers | None = None,
@@ -1136,6 +786,13 @@ class AsyncLamResource(AsyncAPIResource):
1136
786
  chat_history: The history of the conversation as a list of messages or objects you might use
1137
787
  while building a chat app to give the model context of the past conversation.
1138
788
 
789
+ max_count: The maximum number of results to extract.
790
+
791
+ mode: Mode of execution.
792
+
793
+ schema: The expected schema for the response. This is a dictionary where the keys
794
+ describe the fields and the values describe their purposes.
795
+
1139
796
  extra_headers: Send extra headers
1140
797
 
1141
798
  extra_query: Add additional query parameters to the request
@@ -1155,6 +812,9 @@ class AsyncLamResource(AsyncAPIResource):
1155
812
  advanced: Optional[lam_run_params.Advanced] | NotGiven = NOT_GIVEN,
1156
813
  app_url: Optional[str] | NotGiven = NOT_GIVEN,
1157
814
  chat_history: Optional[Iterable[object]] | NotGiven = NOT_GIVEN,
815
+ max_count: Optional[int] | NotGiven = NOT_GIVEN,
816
+ mode: Optional[Literal["deepsearch", "default"]] | NotGiven = NOT_GIVEN,
817
+ schema: object | NotGiven = NOT_GIVEN,
1158
818
  stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
1159
819
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1160
820
  # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1172,6 +832,9 @@ class AsyncLamResource(AsyncAPIResource):
1172
832
  "advanced": advanced,
1173
833
  "app_url": app_url,
1174
834
  "chat_history": chat_history,
835
+ "max_count": max_count,
836
+ "mode": mode,
837
+ "schema": schema,
1175
838
  "stream": stream,
1176
839
  },
1177
840
  lam_run_params.LamRunParams,
@@ -1189,9 +852,6 @@ class LamResourceWithRawResponse:
1189
852
  def __init__(self, lam: LamResource) -> None:
1190
853
  self._lam = lam
1191
854
 
1192
- self.extract = to_raw_response_wrapper(
1193
- lam.extract,
1194
- )
1195
855
  self.integration_run = to_raw_response_wrapper(
1196
856
  lam.integration_run,
1197
857
  )
@@ -1204,9 +864,6 @@ class AsyncLamResourceWithRawResponse:
1204
864
  def __init__(self, lam: AsyncLamResource) -> None:
1205
865
  self._lam = lam
1206
866
 
1207
- self.extract = async_to_raw_response_wrapper(
1208
- lam.extract,
1209
- )
1210
867
  self.integration_run = async_to_raw_response_wrapper(
1211
868
  lam.integration_run,
1212
869
  )
@@ -1219,9 +876,6 @@ class LamResourceWithStreamingResponse:
1219
876
  def __init__(self, lam: LamResource) -> None:
1220
877
  self._lam = lam
1221
878
 
1222
- self.extract = to_streamed_response_wrapper(
1223
- lam.extract,
1224
- )
1225
879
  self.integration_run = to_streamed_response_wrapper(
1226
880
  lam.integration_run,
1227
881
  )
@@ -1234,9 +888,6 @@ class AsyncLamResourceWithStreamingResponse:
1234
888
  def __init__(self, lam: AsyncLamResource) -> None:
1235
889
  self._lam = lam
1236
890
 
1237
- self.extract = async_to_streamed_response_wrapper(
1238
- lam.extract,
1239
- )
1240
891
  self.integration_run = async_to_streamed_response_wrapper(
1241
892
  lam.integration_run,
1242
893
  )
@@ -4,10 +4,8 @@ from __future__ import annotations
4
4
 
5
5
  from .lam_run_params import LamRunParams as LamRunParams
6
6
  from .lam_run_response import LamRunResponse as LamRunResponse
7
- from .lam_extract_params import LamExtractParams as LamExtractParams
8
7
  from .fleet_create_params import FleetCreateParams as FleetCreateParams
9
8
  from .fleet_logs_response import FleetLogsResponse as FleetLogsResponse
10
- from .lam_extract_response import LamExtractResponse as LamExtractResponse
11
9
  from .fleet_create_response import FleetCreateResponse as FleetCreateResponse
12
10
  from .fleet_status_response import FleetStatusResponse as FleetStatusResponse
13
11
  from .fleet_terminate_response import FleetTerminateResponse as FleetTerminateResponse