isaacus 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. isaacus/__init__.py +5 -2
  2. isaacus/_base_client.py +86 -15
  3. isaacus/_client.py +17 -9
  4. isaacus/_compat.py +48 -48
  5. isaacus/_files.py +4 -4
  6. isaacus/_models.py +80 -50
  7. isaacus/_qs.py +7 -7
  8. isaacus/_types.py +53 -12
  9. isaacus/_utils/__init__.py +9 -2
  10. isaacus/_utils/_compat.py +45 -0
  11. isaacus/_utils/_datetime_parse.py +136 -0
  12. isaacus/_utils/_transform.py +13 -3
  13. isaacus/_utils/_typing.py +6 -1
  14. isaacus/_utils/_utils.py +4 -5
  15. isaacus/_version.py +1 -1
  16. isaacus/resources/__init__.py +14 -0
  17. isaacus/resources/classifications/universal.py +17 -17
  18. isaacus/resources/embeddings.py +246 -0
  19. isaacus/resources/extractions/qa.py +23 -21
  20. isaacus/resources/rerankings.py +19 -19
  21. isaacus/types/__init__.py +3 -1
  22. isaacus/types/classifications/__init__.py +1 -1
  23. isaacus/types/classifications/{universal_classification.py → universal_classification_response.py} +2 -2
  24. isaacus/types/classifications/universal_create_params.py +4 -2
  25. isaacus/types/embedding_create_params.py +49 -0
  26. isaacus/types/embedding_response.py +31 -0
  27. isaacus/types/extractions/__init__.py +1 -1
  28. isaacus/types/extractions/{answer_extraction.py → answer_extraction_response.py} +2 -2
  29. isaacus/types/extractions/qa_create_params.py +7 -4
  30. isaacus/types/reranking_create_params.py +4 -2
  31. isaacus/types/{reranking.py → reranking_response.py} +2 -2
  32. {isaacus-0.7.0.dist-info → isaacus-0.9.0.dist-info}/METADATA +90 -37
  33. isaacus-0.9.0.dist-info/RECORD +52 -0
  34. isaacus-0.7.0.dist-info/RECORD +0 -47
  35. {isaacus-0.7.0.dist-info → isaacus-0.9.0.dist-info}/WHEEL +0 -0
  36. {isaacus-0.7.0.dist-info → isaacus-0.9.0.dist-info}/licenses/LICENSE +0 -0
@@ -2,13 +2,13 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import List, Optional
5
+ from typing import Optional
6
6
  from typing_extensions import Literal
7
7
 
8
8
  import httpx
9
9
 
10
10
  from ..types import reranking_create_params
11
- from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
11
+ from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
12
12
  from .._utils import maybe_transform, async_maybe_transform
13
13
  from .._compat import cached_property
14
14
  from .._resource import SyncAPIResource, AsyncAPIResource
@@ -19,7 +19,7 @@ from .._response import (
19
19
  async_to_streamed_response_wrapper,
20
20
  )
21
21
  from .._base_client import make_request_options
22
- from ..types.reranking import Reranking
22
+ from ..types.reranking_response import RerankingResponse
23
23
 
24
24
  __all__ = ["RerankingsResource", "AsyncRerankingsResource"]
25
25
 
@@ -49,18 +49,18 @@ class RerankingsResource(SyncAPIResource):
49
49
  *,
50
50
  model: Literal["kanon-universal-classifier", "kanon-universal-classifier-mini"],
51
51
  query: str,
52
- texts: List[str],
53
- chunking_options: Optional[reranking_create_params.ChunkingOptions] | NotGiven = NOT_GIVEN,
54
- is_iql: bool | NotGiven = NOT_GIVEN,
55
- scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"] | NotGiven = NOT_GIVEN,
56
- top_n: Optional[int] | NotGiven = NOT_GIVEN,
52
+ texts: SequenceNotStr[str],
53
+ chunking_options: Optional[reranking_create_params.ChunkingOptions] | Omit = omit,
54
+ is_iql: bool | Omit = omit,
55
+ scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"] | Omit = omit,
56
+ top_n: Optional[int] | Omit = omit,
57
57
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
58
58
  # The extra values given here take precedence over values defined on the client or passed to this method.
59
59
  extra_headers: Headers | None = None,
60
60
  extra_query: Query | None = None,
61
61
  extra_body: Body | None = None,
62
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
63
- ) -> Reranking:
62
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
63
+ ) -> RerankingResponse:
64
64
  """
65
65
  Rerank legal documents by their relevance to a query with an Isaacus legal AI
66
66
  reranker.
@@ -131,7 +131,7 @@ class RerankingsResource(SyncAPIResource):
131
131
  options=make_request_options(
132
132
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
133
133
  ),
134
- cast_to=Reranking,
134
+ cast_to=RerankingResponse,
135
135
  )
136
136
 
137
137
 
@@ -160,18 +160,18 @@ class AsyncRerankingsResource(AsyncAPIResource):
160
160
  *,
161
161
  model: Literal["kanon-universal-classifier", "kanon-universal-classifier-mini"],
162
162
  query: str,
163
- texts: List[str],
164
- chunking_options: Optional[reranking_create_params.ChunkingOptions] | NotGiven = NOT_GIVEN,
165
- is_iql: bool | NotGiven = NOT_GIVEN,
166
- scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"] | NotGiven = NOT_GIVEN,
167
- top_n: Optional[int] | NotGiven = NOT_GIVEN,
163
+ texts: SequenceNotStr[str],
164
+ chunking_options: Optional[reranking_create_params.ChunkingOptions] | Omit = omit,
165
+ is_iql: bool | Omit = omit,
166
+ scoring_method: Literal["auto", "chunk_max", "chunk_avg", "chunk_min"] | Omit = omit,
167
+ top_n: Optional[int] | Omit = omit,
168
168
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
169
169
  # The extra values given here take precedence over values defined on the client or passed to this method.
170
170
  extra_headers: Headers | None = None,
171
171
  extra_query: Query | None = None,
172
172
  extra_body: Body | None = None,
173
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
174
- ) -> Reranking:
173
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
174
+ ) -> RerankingResponse:
175
175
  """
176
176
  Rerank legal documents by their relevance to a query with an Isaacus legal AI
177
177
  reranker.
@@ -242,7 +242,7 @@ class AsyncRerankingsResource(AsyncAPIResource):
242
242
  options=make_request_options(
243
243
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
244
244
  ),
245
- cast_to=Reranking,
245
+ cast_to=RerankingResponse,
246
246
  )
247
247
 
248
248
 
isaacus/types/__init__.py CHANGED
@@ -2,5 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from .reranking import Reranking as Reranking
5
+ from .embedding_response import EmbeddingResponse as EmbeddingResponse
6
+ from .reranking_response import RerankingResponse as RerankingResponse
7
+ from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
6
8
  from .reranking_create_params import RerankingCreateParams as RerankingCreateParams
@@ -3,4 +3,4 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from .universal_create_params import UniversalCreateParams as UniversalCreateParams
6
- from .universal_classification import UniversalClassification as UniversalClassification
6
+ from .universal_classification_response import UniversalClassificationResponse as UniversalClassificationResponse
@@ -4,7 +4,7 @@ from typing import List, Optional
4
4
 
5
5
  from ..._models import BaseModel
6
6
 
7
- __all__ = ["UniversalClassification", "Classification", "ClassificationChunk", "Usage"]
7
+ __all__ = ["UniversalClassificationResponse", "Classification", "ClassificationChunk", "Usage"]
8
8
 
9
9
 
10
10
  class ClassificationChunk(BaseModel):
@@ -72,7 +72,7 @@ class Usage(BaseModel):
72
72
  """The number of tokens inputted to the model."""
73
73
 
74
74
 
75
- class UniversalClassification(BaseModel):
75
+ class UniversalClassificationResponse(BaseModel):
76
76
  classifications: List[Classification]
77
77
  """
78
78
  The classifications of the texts, by relevance to the query, in order from
@@ -2,9 +2,11 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import List, Optional
5
+ from typing import Optional
6
6
  from typing_extensions import Literal, Required, TypedDict
7
7
 
8
+ from ..._types import SequenceNotStr
9
+
8
10
  __all__ = ["UniversalCreateParams", "ChunkingOptions"]
9
11
 
10
12
 
@@ -26,7 +28,7 @@ class UniversalCreateParams(TypedDict, total=False):
26
28
  the maximum input length of the universal classifier.
27
29
  """
28
30
 
29
- texts: Required[List[str]]
31
+ texts: Required[SequenceNotStr[str]]
30
32
  """The texts to classify.
31
33
 
32
34
  Each text must contain at least one non-whitespace character.
@@ -0,0 +1,49 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Optional
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .._types import SequenceNotStr
9
+
10
+ __all__ = ["EmbeddingCreateParams"]
11
+
12
+
13
+ class EmbeddingCreateParams(TypedDict, total=False):
14
+ model: Required[Literal["kanon-2-embedder"]]
15
+ """
16
+ The ID of the [model](https://docs.isaacus.com/models#embedding) to use for
17
+ embedding.
18
+ """
19
+
20
+ texts: Required[Union[SequenceNotStr[str], str]]
21
+ """The text or array of texts to embed.
22
+
23
+ Each text must contain at least one non-whitespace character.
24
+
25
+ No more than 128 texts can be embedded in a single request.
26
+ """
27
+
28
+ dimensions: Optional[int]
29
+ """A whole number greater than or equal to 1."""
30
+
31
+ overflow_strategy: Optional[Literal["drop_end"]]
32
+ """The strategy to employ when content exceeds the model's maximum input length.
33
+
34
+ `drop_end`, which is the default setting, drops tokens from the end of the
35
+ content exceeding the limit.
36
+
37
+ If `null`, an error will be raised if any content exceeds the model's maximum
38
+ input length.
39
+ """
40
+
41
+ task: Optional[Literal["retrieval/query", "retrieval/document"]]
42
+ """The task the embeddings will be used for.
43
+
44
+ `retrieval/query` is meant for queries and statements, and `retrieval/document`
45
+ is meant for anything to be retrieved using query embeddings.
46
+
47
+ If `null`, which is the default setting, embeddings will not be optimized for
48
+ any particular task.
49
+ """
@@ -0,0 +1,31 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["EmbeddingResponse", "Embedding", "Usage"]
8
+
9
+
10
+ class Embedding(BaseModel):
11
+ embedding: List[float]
12
+ """The embedding of the content represented as an array of floating point numbers."""
13
+
14
+ index: int
15
+ """
16
+ The position of the content in the input array of contents, starting from `0`
17
+ (and, therefore, ending at the number of contents minus `1`).
18
+ """
19
+
20
+
21
+ class Usage(BaseModel):
22
+ input_tokens: int
23
+ """The number of tokens inputted to the model."""
24
+
25
+
26
+ class EmbeddingResponse(BaseModel):
27
+ embeddings: List[Embedding]
28
+ """The embeddings of the inputs."""
29
+
30
+ usage: Usage
31
+ """Statistics about the usage of resources in the process of embedding the inputs."""
@@ -3,4 +3,4 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from .qa_create_params import QaCreateParams as QaCreateParams
6
- from .answer_extraction import AnswerExtraction as AnswerExtraction
6
+ from .answer_extraction_response import AnswerExtractionResponse as AnswerExtractionResponse
@@ -4,7 +4,7 @@ from typing import List
4
4
 
5
5
  from ..._models import BaseModel
6
6
 
7
- __all__ = ["AnswerExtraction", "Extraction", "ExtractionAnswer", "Usage"]
7
+ __all__ = ["AnswerExtractionResponse", "Extraction", "ExtractionAnswer", "Usage"]
8
8
 
9
9
 
10
10
  class ExtractionAnswer(BaseModel):
@@ -57,7 +57,7 @@ class Usage(BaseModel):
57
57
  """The number of tokens inputted to the model."""
58
58
 
59
59
 
60
- class AnswerExtraction(BaseModel):
60
+ class AnswerExtractionResponse(BaseModel):
61
61
  extractions: List[Extraction]
62
62
  """
63
63
  The results of extracting answers from the texts, ordered from highest to lowest
@@ -2,17 +2,20 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import List, Optional
5
+ from typing import Optional
6
6
  from typing_extensions import Literal, Required, TypedDict
7
7
 
8
+ from ..._types import SequenceNotStr
9
+
8
10
  __all__ = ["QaCreateParams", "ChunkingOptions"]
9
11
 
10
12
 
11
13
  class QaCreateParams(TypedDict, total=False):
12
14
  model: Required[Literal["kanon-answer-extractor", "kanon-answer-extractor-mini"]]
13
15
  """
14
- The ID of the [model](https://docs.isaacus.com/models#extractive-qa) to use for
15
- extractive question answering.
16
+ The ID of the
17
+ [model](https://docs.isaacus.com/models#extractive-question-answering) to use
18
+ for extractive question answering.
16
19
  """
17
20
 
18
21
  query: Required[str]
@@ -24,7 +27,7 @@ class QaCreateParams(TypedDict, total=False):
24
27
  long that it exceeds the maximum input length of the model.
25
28
  """
26
29
 
27
- texts: Required[List[str]]
30
+ texts: Required[SequenceNotStr[str]]
28
31
  """The texts to search for the answer in and extract the answer from.
29
32
 
30
33
  There must be at least one text.
@@ -2,9 +2,11 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import List, Optional
5
+ from typing import Optional
6
6
  from typing_extensions import Literal, Required, TypedDict
7
7
 
8
+ from .._types import SequenceNotStr
9
+
8
10
  __all__ = ["RerankingCreateParams", "ChunkingOptions"]
9
11
 
10
12
 
@@ -24,7 +26,7 @@ class RerankingCreateParams(TypedDict, total=False):
24
26
  maximum input length of the reranker.
25
27
  """
26
28
 
27
- texts: Required[List[str]]
29
+ texts: Required[SequenceNotStr[str]]
28
30
  """The texts to rerank.
29
31
 
30
32
  There must be at least one text.
@@ -4,7 +4,7 @@ from typing import List
4
4
 
5
5
  from .._models import BaseModel
6
6
 
7
- __all__ = ["Reranking", "Result", "Usage"]
7
+ __all__ = ["RerankingResponse", "Result", "Usage"]
8
8
 
9
9
 
10
10
  class Result(BaseModel):
@@ -26,7 +26,7 @@ class Usage(BaseModel):
26
26
  """The number of tokens inputted to the model."""
27
27
 
28
28
 
29
- class Reranking(BaseModel):
29
+ class RerankingResponse(BaseModel):
30
30
  results: List[Result]
31
31
  """
32
32
  The rerankings of the texts, by relevance to the query, in order from highest to
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: isaacus
3
- Version: 0.7.0
3
+ Version: 0.9.0
4
4
  Summary: The official Python library for the isaacus API
5
5
  Project-URL: Homepage, https://github.com/isaacus-dev/isaacus-python
6
6
  Project-URL: Repository, https://github.com/isaacus-dev/isaacus-python
@@ -18,6 +18,7 @@ Classifier: Programming Language :: Python :: 3.9
18
18
  Classifier: Programming Language :: Python :: 3.10
19
19
  Classifier: Programming Language :: Python :: 3.11
20
20
  Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Programming Language :: Python :: 3.13
21
22
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
23
  Classifier: Typing :: Typed
23
24
  Requires-Python: >=3.8
@@ -27,11 +28,15 @@ Requires-Dist: httpx<1,>=0.23.0
27
28
  Requires-Dist: pydantic<3,>=1.9.0
28
29
  Requires-Dist: sniffio
29
30
  Requires-Dist: typing-extensions<5,>=4.10
31
+ Provides-Extra: aiohttp
32
+ Requires-Dist: aiohttp; extra == 'aiohttp'
33
+ Requires-Dist: httpx-aiohttp>=0.1.8; extra == 'aiohttp'
30
34
  Description-Content-Type: text/markdown
31
35
 
32
36
  # Isaacus Python API library
33
37
 
34
- [![PyPI version](https://img.shields.io/pypi/v/isaacus.svg)](https://pypi.org/project/isaacus/)
38
+ <!-- prettier-ignore -->
39
+ [![PyPI version](https://img.shields.io/pypi/v/isaacus.svg?label=pypi%20(stable))](https://pypi.org/project/isaacus/)
35
40
 
36
41
  The Isaacus Python library provides convenient access to the Isaacus REST API from any Python 3.8+
37
42
  application. The library includes type definitions for all request params and response fields,
@@ -62,12 +67,14 @@ client = Isaacus(
62
67
  api_key=os.environ.get("ISAACUS_API_KEY"), # This is the default and can be omitted
63
68
  )
64
69
 
65
- universal_classification = client.classifications.universal.create(
66
- model="kanon-universal-classifier",
67
- query="This is a confidentiality clause.",
68
- texts=["I agree not to tell anyone about the document."],
70
+ embedding_response = client.embeddings.create(
71
+ model="kanon-2-embedder",
72
+ texts=[
73
+ "Are restraints of trade enforceable under English law?",
74
+ "What is a non-compete clause?",
75
+ ],
69
76
  )
70
- print(universal_classification.classifications)
77
+ print(embedding_response.embeddings)
71
78
  ```
72
79
 
73
80
  While you can provide an `api_key` keyword argument,
@@ -90,12 +97,14 @@ client = AsyncIsaacus(
90
97
 
91
98
 
92
99
  async def main() -> None:
93
- universal_classification = await client.classifications.universal.create(
94
- model="kanon-universal-classifier",
95
- query="This is a confidentiality clause.",
96
- texts=["I agree not to tell anyone about the document."],
100
+ embedding_response = await client.embeddings.create(
101
+ model="kanon-2-embedder",
102
+ texts=[
103
+ "Are restraints of trade enforceable under English law?",
104
+ "What is a non-compete clause?",
105
+ ],
97
106
  )
98
- print(universal_classification.classifications)
107
+ print(embedding_response.embeddings)
99
108
 
100
109
 
101
110
  asyncio.run(main())
@@ -103,6 +112,43 @@ asyncio.run(main())
103
112
 
104
113
  Functionality between the synchronous and asynchronous clients is otherwise identical.
105
114
 
115
+ ### With aiohttp
116
+
117
+ By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend.
118
+
119
+ You can enable this by installing `aiohttp`:
120
+
121
+ ```sh
122
+ # install from PyPI
123
+ pip install isaacus[aiohttp]
124
+ ```
125
+
126
+ Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
127
+
128
+ ```python
129
+ import asyncio
130
+ from isaacus import DefaultAioHttpClient
131
+ from isaacus import AsyncIsaacus
132
+
133
+
134
+ async def main() -> None:
135
+ async with AsyncIsaacus(
136
+ api_key="My API Key",
137
+ http_client=DefaultAioHttpClient(),
138
+ ) as client:
139
+ embedding_response = await client.embeddings.create(
140
+ model="kanon-2-embedder",
141
+ texts=[
142
+ "Are restraints of trade enforceable under English law?",
143
+ "What is a non-compete clause?",
144
+ ],
145
+ )
146
+ print(embedding_response.embeddings)
147
+
148
+
149
+ asyncio.run(main())
150
+ ```
151
+
106
152
  ## Using types
107
153
 
108
154
  Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like:
@@ -121,7 +167,7 @@ from isaacus import Isaacus
121
167
 
122
168
  client = Isaacus()
123
169
 
124
- universal_classification = client.classifications.universal.create(
170
+ universal_classification_response = client.classifications.universal.create(
125
171
  model="kanon-universal-classifier",
126
172
  query="This is a confidentiality clause.",
127
173
  texts=["I agree not to tell anyone about the document."],
@@ -131,7 +177,7 @@ universal_classification = client.classifications.universal.create(
131
177
  "size": 512,
132
178
  },
133
179
  )
134
- print(universal_classification.classifications)
180
+ print(universal_classification_response.classifications)
135
181
  ```
136
182
 
137
183
  ## Handling errors
@@ -150,10 +196,12 @@ from isaacus import Isaacus
150
196
  client = Isaacus()
151
197
 
152
198
  try:
153
- client.classifications.universal.create(
154
- model="kanon-universal-classifier",
155
- query="This is a confidentiality clause.",
156
- texts=["I agree not to tell anyone about the document."],
199
+ client.embeddings.create(
200
+ model="kanon-2-embedder",
201
+ texts=[
202
+ "Are restraints of trade enforceable under English law?",
203
+ "What is a non-compete clause?",
204
+ ],
157
205
  )
158
206
  except isaacus.APIConnectionError as e:
159
207
  print("The server could not be reached")
@@ -197,17 +245,19 @@ client = Isaacus(
197
245
  )
198
246
 
199
247
  # Or, configure per-request:
200
- client.with_options(max_retries=5).classifications.universal.create(
201
- model="kanon-universal-classifier",
202
- query="This is a confidentiality clause.",
203
- texts=["I agree not to tell anyone about the document."],
248
+ client.with_options(max_retries=5).embeddings.create(
249
+ model="kanon-2-embedder",
250
+ texts=[
251
+ "Are restraints of trade enforceable under English law?",
252
+ "What is a non-compete clause?",
253
+ ],
204
254
  )
205
255
  ```
206
256
 
207
257
  ### Timeouts
208
258
 
209
259
  By default requests time out after 1 minute. You can configure this with a `timeout` option,
210
- which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
260
+ which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object:
211
261
 
212
262
  ```python
213
263
  from isaacus import Isaacus
@@ -224,10 +274,12 @@ client = Isaacus(
224
274
  )
225
275
 
226
276
  # Override per-request:
227
- client.with_options(timeout=5.0).classifications.universal.create(
228
- model="kanon-universal-classifier",
229
- query="This is a confidentiality clause.",
230
- texts=["I agree not to tell anyone about the document."],
277
+ client.with_options(timeout=5.0).embeddings.create(
278
+ model="kanon-2-embedder",
279
+ texts=[
280
+ "Are restraints of trade enforceable under English law?",
281
+ "What is a non-compete clause?",
282
+ ],
231
283
  )
232
284
  ```
233
285
 
@@ -269,15 +321,14 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
269
321
  from isaacus import Isaacus
270
322
 
271
323
  client = Isaacus()
272
- response = client.classifications.universal.with_raw_response.create(
273
- model="kanon-universal-classifier",
274
- query="This is a confidentiality clause.",
275
- texts=["I agree not to tell anyone about the document."],
324
+ response = client.embeddings.with_raw_response.create(
325
+ model="kanon-2-embedder",
326
+ texts=["Are restraints of trade enforceable under English law?", "What is a non-compete clause?"],
276
327
  )
277
328
  print(response.headers.get('X-My-Header'))
278
329
 
279
- universal = response.parse() # get the object that `classifications.universal.create()` would have returned
280
- print(universal.classifications)
330
+ embedding = response.parse() # get the object that `embeddings.create()` would have returned
331
+ print(embedding.embeddings)
281
332
  ```
282
333
 
283
334
  These methods return an [`APIResponse`](https://github.com/isaacus-dev/isaacus-python/tree/main/src/isaacus/_response.py) object.
@@ -291,10 +342,12 @@ The above interface eagerly reads the full response body when you make the reque
291
342
  To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
292
343
 
293
344
  ```python
294
- with client.classifications.universal.with_streaming_response.create(
295
- model="kanon-universal-classifier",
296
- query="This is a confidentiality clause.",
297
- texts=["I agree not to tell anyone about the document."],
345
+ with client.embeddings.with_streaming_response.create(
346
+ model="kanon-2-embedder",
347
+ texts=[
348
+ "Are restraints of trade enforceable under English law?",
349
+ "What is a non-compete clause?",
350
+ ],
298
351
  ) as response:
299
352
  print(response.headers.get("X-My-Header"))
300
353
 
@@ -0,0 +1,52 @@
1
+ isaacus/__init__.py,sha256=wtI0vXNsVgND6Lmq0G6l2iQALnXyM7HqyL9C6gOiaFE,2633
2
+ isaacus/_base_client.py,sha256=Az9XYe7zI4rpf8OKfALV4Yd1yFTPbx7jm_Fv-me8_w4,67048
3
+ isaacus/_client.py,sha256=EOZacvJHWvcvEyvpjPGFiwr3egLvFQEwWDiVnJY_veA,17149
4
+ isaacus/_compat.py,sha256=DQBVORjFb33zch24jzkhM14msvnzY7mmSmgDLaVFUM8,6562
5
+ isaacus/_constants.py,sha256=S14PFzyN9-I31wiV7SmIlL5Ga0MLHxdvegInGdXH7tM,462
6
+ isaacus/_exceptions.py,sha256=L82uluhizzc94VydHIaJkNxkcG-2DAe74tNhrE2eN2A,3222
7
+ isaacus/_files.py,sha256=KnEzGi_O756MvKyJ4fOCW_u3JhOeWPQ4RsmDvqihDQU,3545
8
+ isaacus/_models.py,sha256=lKnskYPONAWDvWo8tmbbVk7HmG7UOsI0Nve0vSMmkRc,30452
9
+ isaacus/_qs.py,sha256=craIKyvPktJ94cvf9zn8j8ekG9dWJzhWv0ob34lIOv4,4828
10
+ isaacus/_resource.py,sha256=iP_oYhz5enCI58mK7hlwLoPMPh4Q5s8-KBv-jGfv2aM,1106
11
+ isaacus/_response.py,sha256=aXLF5ia58bjjQXTxY574lh7JfKXiGL2tDTX09klm8lw,28794
12
+ isaacus/_streaming.py,sha256=tMBfwrfEFWm0v7vWFgjn_lizsoD70lPkYigIBuADaCM,10104
13
+ isaacus/_types.py,sha256=Bc2gbOMQA8Un1hMX-lkCISJ0_WFoL7iXMs9l9siVwEg,7237
14
+ isaacus/_version.py,sha256=ZkqD0WJK4erB_yaQT_LZ-xcMaownzF36jZcBFywvSn4,159
15
+ isaacus/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ isaacus/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
+ isaacus/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
18
+ isaacus/_utils/_datetime_parse.py,sha256=bABTs0Bc6rabdFvnIwXjEhWL15TcRgWZ_6XGTqN8xUk,4204
19
+ isaacus/_utils/_logs.py,sha256=rwa1Yzjbs2JaFn9KQ06rH5c_GSNa--BVwWnWhvvT1tY,777
20
+ isaacus/_utils/_proxy.py,sha256=aglnj2yBTDyGX9Akk2crZHrl10oqRmceUy2Zp008XEs,1975
21
+ isaacus/_utils/_reflection.py,sha256=ZmGkIgT_PuwedyNBrrKGbxoWtkpytJNU1uU4QHnmEMU,1364
22
+ isaacus/_utils/_resources_proxy.py,sha256=vW2q6wobLs4JH9DnlVsdaotKEzn5bWqqe8WhNTAOv_k,594
23
+ isaacus/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289
24
+ isaacus/_utils/_sync.py,sha256=TpGLrrhRNWTJtODNE6Fup3_k7zrWm1j2RlirzBwre-0,2862
25
+ isaacus/_utils/_transform.py,sha256=NjCzmnfqYrsAikUHQig6N9QfuTVbKipuP3ur9mcNF-E,15951
26
+ isaacus/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4786
27
+ isaacus/_utils/_utils.py,sha256=0dDqauUbVZEXV0NVl7Bwu904Wwo5eyFCZpQThhFNhyA,12253
28
+ isaacus/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
29
+ isaacus/resources/__init__.py,sha256=hYuuyfGpHUmFIXOnO_qcjbgyyoaSm4yt6EhDL_NXUoU,2188
30
+ isaacus/resources/embeddings.py,sha256=Ol-K756e376CnzdoEBuuRRIIdbNpYUOLD0bUN7o6ffA,9470
31
+ isaacus/resources/rerankings.py,sha256=6mfn-T7BKBD0gJcjqOoPhUJsHr7nb2QW0ZYuRND5p8k,11193
32
+ isaacus/resources/classifications/__init__.py,sha256=tYSnDm-o0CVuTC95VoNJzOqHsb8jTzYmW8hdwW14K60,1158
33
+ isaacus/resources/classifications/classifications.py,sha256=Td5Gscg1PNJJeobxow_hJq_RicpFe3ibEYN0Gh3Kpsg,4018
34
+ isaacus/resources/classifications/universal.py,sha256=TuTkM2d0bC5A3Eo_1km06IISu2nctEGpWzKHKHnW2IE,10714
35
+ isaacus/resources/extractions/__init__.py,sha256=24ccXv3kRlfXwnZJ4572kWNjJKiJ0Cd5vWeRkKCuMyY,1015
36
+ isaacus/resources/extractions/extractions.py,sha256=RaUnv1OG4i5J3JhpBNfpnxELpSHvmkqZmS2_DVL9Wvw,3671
37
+ isaacus/resources/extractions/qa.py,sha256=5KgPXamGlkR4qTglw0LBsIiytxGwdALQ-RBV7hS5kGo,10029
38
+ isaacus/types/__init__.py,sha256=rJe7gBVr0PgUiBlmSw0LoKhUsBVGsj2NoSSMieJGEpM,433
39
+ isaacus/types/embedding_create_params.py,sha256=Vcqa_CSnj_rXl8CWyGzBtjJKin18dRmy-q8N0eNFYQ8,1575
40
+ isaacus/types/embedding_response.py,sha256=LMBDLmYDu8oMoFH-Ov1_N4LovyUPNOeDg8aOHmzjykw,846
41
+ isaacus/types/reranking_create_params.py,sha256=T6N23D1N6zE8rC_wMh1xylj3XSOSqd1-truPNrxZwlE,2522
42
+ isaacus/types/reranking_response.py,sha256=uZDUZrYYbgygj2GpyV2seUWaCiXAzCZBQxmLxV_hEoU,927
43
+ isaacus/types/classifications/__init__.py,sha256=5wz2ChA8Ld8Yfx-7z7PShbfeyvE3wXRfpkctjS27t10,321
44
+ isaacus/types/classifications/universal_classification_response.py,sha256=C-dPzlM4WAHO6ylPcUXZSY2rzjHkwnX77u4LQrHrbSY,2454
45
+ isaacus/types/classifications/universal_create_params.py,sha256=ygK0Ge6PH92c_SyZB94zNGrvwxt9lskza8UTO_RFoDc,2319
46
+ isaacus/types/extractions/__init__.py,sha256=em0yfSoMG1XqO_LfmqveKlKh03y5x6g7hbaUn7ck21c,279
47
+ isaacus/types/extractions/answer_extraction_response.py,sha256=2-EHPss-y5s519W3-PpWyJzjLjUl4OkUfr9teWIN02o,2168
48
+ isaacus/types/extractions/qa_create_params.py,sha256=2ethTpU4W2aOkDO6SZfd4TBkk4aVWeK38nrIzO3EXjg,2127
49
+ isaacus-0.9.0.dist-info/METADATA,sha256=FPzfRdjxMpqKuOuUi9uU9yI5UulsLYaiu49MJn5Ec98,15438
50
+ isaacus-0.9.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
51
+ isaacus-0.9.0.dist-info/licenses/LICENSE,sha256=lUen4LYVFVGEVXBsntBAPsQsOWgMkno1e9WfgWkpZ-k,11337
52
+ isaacus-0.9.0.dist-info/RECORD,,