isaacus 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,263 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+ from typing_extensions import Literal
7
+
8
+ import httpx
9
+
10
+ from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
11
+ from ..._utils import (
12
+ maybe_transform,
13
+ async_maybe_transform,
14
+ )
15
+ from ..._compat import cached_property
16
+ from ..._resource import SyncAPIResource, AsyncAPIResource
17
+ from ..._response import (
18
+ to_raw_response_wrapper,
19
+ to_streamed_response_wrapper,
20
+ async_to_raw_response_wrapper,
21
+ async_to_streamed_response_wrapper,
22
+ )
23
+ from ..._base_client import make_request_options
24
+ from ...types.classifications import universal_create_params
25
+ from ...types.classifications.universal_classification import UniversalClassification
26
+
27
+ __all__ = ["UniversalResource", "AsyncUniversalResource"]
28
+
29
+
30
+ class UniversalResource(SyncAPIResource):
31
+ @cached_property
32
+ def with_raw_response(self) -> UniversalResourceWithRawResponse:
33
+ """
34
+ This property can be used as a prefix for any HTTP method call to return
35
+ the raw response object instead of the parsed content.
36
+
37
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#accessing-raw-response-data-eg-headers
38
+ """
39
+ return UniversalResourceWithRawResponse(self)
40
+
41
+ @cached_property
42
+ def with_streaming_response(self) -> UniversalResourceWithStreamingResponse:
43
+ """
44
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
45
+
46
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#with_streaming_response
47
+ """
48
+ return UniversalResourceWithStreamingResponse(self)
49
+
50
+ def create(
51
+ self,
52
+ *,
53
+ model: Literal["kanon-universal-classifier", "kanon-universal-classifier-mini"],
54
+ query: str,
55
+ text: str,
56
+ chunking_options: Optional[universal_create_params.ChunkingOptions] | NotGiven = NOT_GIVEN,
57
+ is_iql: bool | NotGiven = NOT_GIVEN,
58
+ scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"] | NotGiven = NOT_GIVEN,
59
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
60
+ # The extra values given here take precedence over values defined on the client or passed to this method.
61
+ extra_headers: Headers | None = None,
62
+ extra_query: Query | None = None,
63
+ extra_body: Body | None = None,
64
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
65
+ ) -> UniversalClassification:
66
+ """
67
+ Classify the relevance of a legal document to a query using an Isaacus universal
68
+ legal AI classifier.
69
+
70
+ Args:
71
+ model: The ID of the model to use for universal classification.
72
+
73
+ query: The Isaacus Query Language (IQL) query or, if IQL is disabled, the statement, to
74
+ evaluate the text against.
75
+
76
+ The query must contain at least one non-whitespace character.
77
+
78
+ Unlike the text being classified, the query cannot be so long that it exceeds
79
+ the maximum input length of the universal classifier.
80
+
81
+ text: The text to classify.
82
+
83
+ The text must contain at least one non-whitespace character.
84
+
85
+ chunking_options: Options for how to split text into smaller chunks.
86
+
87
+ is_iql: Whether the query should be interpreted as an Isaacus Query Language (IQL) query
88
+ or else as a statement.
89
+
90
+ scoring_method: The method to use for producing an overall confidence score.
91
+
92
+ `auto` is the default scoring method and is recommended for most use cases.
93
+ Currently, it is equivalent to `chunk_max`. In the future, it will automatically
94
+ select the best method based on the model and input.
95
+
96
+ `chunk_max` uses the highest confidence score of all of the text's chunks.
97
+
98
+ `chunk_avg` averages the confidence scores of all of the text's chunks.
99
+
100
+ `chunk_min` uses the lowest confidence score of all of the text's chunks.
101
+
102
+ extra_headers: Send extra headers
103
+
104
+ extra_query: Add additional query parameters to the request
105
+
106
+ extra_body: Add additional JSON properties to the request
107
+
108
+ timeout: Override the client-level default timeout for this request, in seconds
109
+ """
110
+ return self._post(
111
+ "/classifications/universal",
112
+ body=maybe_transform(
113
+ {
114
+ "model": model,
115
+ "query": query,
116
+ "text": text,
117
+ "chunking_options": chunking_options,
118
+ "is_iql": is_iql,
119
+ "scoring_method": scoring_method,
120
+ },
121
+ universal_create_params.UniversalCreateParams,
122
+ ),
123
+ options=make_request_options(
124
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
125
+ ),
126
+ cast_to=UniversalClassification,
127
+ )
128
+
129
+
130
+ class AsyncUniversalResource(AsyncAPIResource):
131
+ @cached_property
132
+ def with_raw_response(self) -> AsyncUniversalResourceWithRawResponse:
133
+ """
134
+ This property can be used as a prefix for any HTTP method call to return
135
+ the raw response object instead of the parsed content.
136
+
137
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#accessing-raw-response-data-eg-headers
138
+ """
139
+ return AsyncUniversalResourceWithRawResponse(self)
140
+
141
+ @cached_property
142
+ def with_streaming_response(self) -> AsyncUniversalResourceWithStreamingResponse:
143
+ """
144
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
145
+
146
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#with_streaming_response
147
+ """
148
+ return AsyncUniversalResourceWithStreamingResponse(self)
149
+
150
+ async def create(
151
+ self,
152
+ *,
153
+ model: Literal["kanon-universal-classifier", "kanon-universal-classifier-mini"],
154
+ query: str,
155
+ text: str,
156
+ chunking_options: Optional[universal_create_params.ChunkingOptions] | NotGiven = NOT_GIVEN,
157
+ is_iql: bool | NotGiven = NOT_GIVEN,
158
+ scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"] | NotGiven = NOT_GIVEN,
159
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
160
+ # The extra values given here take precedence over values defined on the client or passed to this method.
161
+ extra_headers: Headers | None = None,
162
+ extra_query: Query | None = None,
163
+ extra_body: Body | None = None,
164
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
165
+ ) -> UniversalClassification:
166
+ """
167
+ Classify the relevance of a legal document to a query using an Isaacus universal
168
+ legal AI classifier.
169
+
170
+ Args:
171
+ model: The ID of the model to use for universal classification.
172
+
173
+ query: The Isaacus Query Language (IQL) query or, if IQL is disabled, the statement, to
174
+ evaluate the text against.
175
+
176
+ The query must contain at least one non-whitespace character.
177
+
178
+ Unlike the text being classified, the query cannot be so long that it exceeds
179
+ the maximum input length of the universal classifier.
180
+
181
+ text: The text to classify.
182
+
183
+ The text must contain at least one non-whitespace character.
184
+
185
+ chunking_options: Options for how to split text into smaller chunks.
186
+
187
+ is_iql: Whether the query should be interpreted as an Isaacus Query Language (IQL) query
188
+ or else as a statement.
189
+
190
+ scoring_method: The method to use for producing an overall confidence score.
191
+
192
+ `auto` is the default scoring method and is recommended for most use cases.
193
+ Currently, it is equivalent to `chunk_max`. In the future, it will automatically
194
+ select the best method based on the model and input.
195
+
196
+ `chunk_max` uses the highest confidence score of all of the text's chunks.
197
+
198
+ `chunk_avg` averages the confidence scores of all of the text's chunks.
199
+
200
+ `chunk_min` uses the lowest confidence score of all of the text's chunks.
201
+
202
+ extra_headers: Send extra headers
203
+
204
+ extra_query: Add additional query parameters to the request
205
+
206
+ extra_body: Add additional JSON properties to the request
207
+
208
+ timeout: Override the client-level default timeout for this request, in seconds
209
+ """
210
+ return await self._post(
211
+ "/classifications/universal",
212
+ body=await async_maybe_transform(
213
+ {
214
+ "model": model,
215
+ "query": query,
216
+ "text": text,
217
+ "chunking_options": chunking_options,
218
+ "is_iql": is_iql,
219
+ "scoring_method": scoring_method,
220
+ },
221
+ universal_create_params.UniversalCreateParams,
222
+ ),
223
+ options=make_request_options(
224
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
225
+ ),
226
+ cast_to=UniversalClassification,
227
+ )
228
+
229
+
230
+ class UniversalResourceWithRawResponse:
231
+ def __init__(self, universal: UniversalResource) -> None:
232
+ self._universal = universal
233
+
234
+ self.create = to_raw_response_wrapper(
235
+ universal.create,
236
+ )
237
+
238
+
239
+ class AsyncUniversalResourceWithRawResponse:
240
+ def __init__(self, universal: AsyncUniversalResource) -> None:
241
+ self._universal = universal
242
+
243
+ self.create = async_to_raw_response_wrapper(
244
+ universal.create,
245
+ )
246
+
247
+
248
+ class UniversalResourceWithStreamingResponse:
249
+ def __init__(self, universal: UniversalResource) -> None:
250
+ self._universal = universal
251
+
252
+ self.create = to_streamed_response_wrapper(
253
+ universal.create,
254
+ )
255
+
256
+
257
+ class AsyncUniversalResourceWithStreamingResponse:
258
+ def __init__(self, universal: AsyncUniversalResource) -> None:
259
+ self._universal = universal
260
+
261
+ self.create = async_to_streamed_response_wrapper(
262
+ universal.create,
263
+ )
@@ -0,0 +1,3 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
@@ -0,0 +1,6 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .universal_create_params import UniversalCreateParams as UniversalCreateParams
6
+ from .universal_classification import UniversalClassification as UniversalClassification
@@ -0,0 +1,55 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+
5
+ from ..._models import BaseModel
6
+
7
+ __all__ = ["UniversalClassification", "Chunk", "Usage"]
8
+
9
+
10
+ class Chunk(BaseModel):
11
+ end: int
12
+ """The end index of the chunk in the original text."""
13
+
14
+ score: float
15
+ """
16
+ The model's score of the likelihood that the query expressed about the chunk is
17
+ supported by the chunk.
18
+
19
+ A score greater than `0.5` indicates that the chunk supports the query, while a
20
+ score less than `0.5` indicates that the chunk does not support the query.
21
+ """
22
+
23
+ start: int
24
+ """The start index of the chunk in the original text."""
25
+
26
+ text: str
27
+ """The text of the chunk."""
28
+
29
+
30
+ class Usage(BaseModel):
31
+ input_tokens: int
32
+ """The number of tokens inputted to the model."""
33
+
34
+
35
+ class UniversalClassification(BaseModel):
36
+ chunks: Optional[List[Chunk]] = None
37
+ """
38
+ The text as broken into chunks by
39
+ [semchunk](https://github.com/isaacus-dev/semchunk), each chunk with its own
40
+ confidence score.
41
+
42
+ If no chunking occurred, this will be `null`.
43
+ """
44
+
45
+ score: float
46
+ """
47
+ A score of the likelihood that the query expressed about the text is supported
48
+ by the text.
49
+
50
+ A score greater than `0.5` indicates that the text supports the query, while a
51
+ score less than `0.5` indicates that the text does not support the query.
52
+ """
53
+
54
+ usage: Usage
55
+ """Statistics about the usage of resources in the process of classifying the text."""
@@ -0,0 +1,64 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["UniversalCreateParams", "ChunkingOptions"]
9
+
10
+
11
+ class UniversalCreateParams(TypedDict, total=False):
12
+ model: Required[Literal["kanon-universal-classifier", "kanon-universal-classifier-mini"]]
13
+ """The ID of the model to use for universal classification."""
14
+
15
+ query: Required[str]
16
+ """
17
+ The Isaacus Query Language (IQL) query or, if IQL is disabled, the statement, to
18
+ evaluate the text against.
19
+
20
+ The query must contain at least one non-whitespace character.
21
+
22
+ Unlike the text being classified, the query cannot be so long that it exceeds
23
+ the maximum input length of the universal classifier.
24
+ """
25
+
26
+ text: Required[str]
27
+ """The text to classify.
28
+
29
+ The text must contain at least one non-whitespace character.
30
+ """
31
+
32
+ chunking_options: Optional[ChunkingOptions]
33
+ """Options for how to split text into smaller chunks."""
34
+
35
+ is_iql: bool
36
+ """
37
+ Whether the query should be interpreted as an Isaacus Query Language (IQL) query
38
+ or else as a statement.
39
+ """
40
+
41
+ scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"]
42
+ """The method to use for producing an overall confidence score.
43
+
44
+ `auto` is the default scoring method and is recommended for most use cases.
45
+ Currently, it is equivalent to `chunk_max`. In the future, it will automatically
46
+ select the best method based on the model and input.
47
+
48
+ `chunk_max` uses the highest confidence score of all of the text's chunks.
49
+
50
+ `chunk_avg` averages the confidence scores of all of the text's chunks.
51
+
52
+ `chunk_min` uses the lowest confidence score of all of the text's chunks.
53
+ """
54
+
55
+
56
+ class ChunkingOptions(TypedDict, total=False):
57
+ overlap_ratio: Optional[float]
58
+ """A number greater than or equal to 0 and less than 1."""
59
+
60
+ overlap_tokens: Optional[int]
61
+ """A whole number greater than -1."""
62
+
63
+ size: Optional[int]
64
+ """A whole number greater than or equal to 1."""