c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python 0.1.0a8__py3-none-any.whl → 0.1.0a10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
3
- Version: 0.1.0a8
3
+ Version: 0.1.0a10
4
4
  Summary: The official Python library for GradientAI
5
5
  Project-URL: Homepage, https://github.com/digitalocean/gradientai-python
6
6
  Project-URL: Repository, https://github.com/digitalocean/gradientai-python
@@ -61,26 +61,33 @@ The full API of this library can be found in [api.md](https://github.com/digital
61
61
  import os
62
62
  from gradientai import GradientAI
63
63
 
64
- client = GradientAI(
64
+ api_client = GradientAI(
65
65
  api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
66
66
  )
67
+ inference_client = GradientAI(
68
+ inference_key=os.environ.get("GRADIENTAI_INFERENCE_KEY"), # This is the default and can be omitted
69
+ )
67
70
 
68
- completion = client.agents.chat.completions.create(
71
+ print(api_client.agents.list())
72
+
73
+ completion = inference_client.chat.completions.create(
69
74
  messages=[
70
75
  {
71
- "content": "string",
72
- "role": "system",
76
+ "role": "user",
77
+ "content": "What is the capital of France?",
73
78
  }
74
79
  ],
75
- model="llama3-8b-instruct",
80
+ model="llama3.3-70b-instruct",
76
81
  )
77
- print(completion.id)
82
+
83
+ print(completion.choices[0].message)
84
+
78
85
  ```
79
86
 
80
- While you can provide an `api_key` keyword argument,
87
+ While you can provide an `api_key`, `inference_key` keyword argument,
81
88
  we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
82
- to add `GRADIENTAI_API_KEY="My API Key"` to your `.env` file
83
- so that your API Key is not stored in source control.
89
+ to add `GRADIENTAI_API_KEY="My API Key"`, `GRADIENTAI_INFERENCE_KEY="My INFERENCE Key"` to your `.env` file
90
+ so that your keys are not stored in source control.
84
91
 
85
92
  ## Async usage
86
93
 
@@ -100,13 +107,13 @@ async def main() -> None:
100
107
  completion = await client.agents.chat.completions.create(
101
108
  messages=[
102
109
  {
103
- "content": "string",
104
- "role": "system",
110
+ "role": "user",
111
+ "content": "What is the capital of France?",
105
112
  }
106
113
  ],
107
- model="llama3-8b-instruct",
114
+ model="llama3.3-70b-instruct",
108
115
  )
109
- print(completion.id)
116
+ print(completion.choices)
110
117
 
111
118
 
112
119
  asyncio.run(main())
@@ -142,18 +149,62 @@ async def main() -> None:
142
149
  completion = await client.agents.chat.completions.create(
143
150
  messages=[
144
151
  {
145
- "content": "string",
146
- "role": "system",
152
+ "role": "user",
153
+ "content": "What is the capital of France?",
147
154
  }
148
155
  ],
149
- model="llama3-8b-instruct",
156
+ model="llama3.3-70b-instruct",
150
157
  )
151
- print(completion.id)
158
+ print(completion.choices)
152
159
 
153
160
 
154
161
  asyncio.run(main())
155
162
  ```
156
163
 
164
+ ## Streaming responses
165
+
166
+ We provide support for streaming responses using Server Side Events (SSE).
167
+
168
+ ```python
169
+ from gradientai import GradientAI
170
+
171
+ client = GradientAI()
172
+
173
+ stream = client.agents.chat.completions.create(
174
+ messages=[
175
+ {
176
+ "role": "user",
177
+ "content": "What is the capital of France?",
178
+ }
179
+ ],
180
+ model="llama3.3-70b-instruct",
181
+ stream=True,
182
+ )
183
+ for completion in stream:
184
+ print(completion.choices)
185
+ ```
186
+
187
+ The async client uses the exact same interface.
188
+
189
+ ```python
190
+ from gradientai import AsyncGradientAI
191
+
192
+ client = AsyncGradientAI()
193
+
194
+ stream = await client.agents.chat.completions.create(
195
+ messages=[
196
+ {
197
+ "role": "user",
198
+ "content": "What is the capital of France?",
199
+ }
200
+ ],
201
+ model="llama3.3-70b-instruct",
202
+ stream=True,
203
+ )
204
+ async for completion in stream:
205
+ print(completion.choices)
206
+ ```
207
+
157
208
  ## Using types
158
209
 
159
210
  Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like:
@@ -201,8 +252,14 @@ from gradientai import GradientAI
201
252
  client = GradientAI()
202
253
 
203
254
  try:
204
- client.agents.versions.list(
205
- uuid="REPLACE_ME",
255
+ client.agents.chat.completions.create(
256
+ messages=[
257
+ {
258
+ "role": "user",
259
+ "content": "What is the capital of France?",
260
+ }
261
+ ],
262
+ model="llama3.3-70b-instruct",
206
263
  )
207
264
  except gradientai.APIConnectionError as e:
208
265
  print("The server could not be reached")
@@ -246,8 +303,14 @@ client = GradientAI(
246
303
  )
247
304
 
248
305
  # Or, configure per-request:
249
- client.with_options(max_retries=5).agents.versions.list(
250
- uuid="REPLACE_ME",
306
+ client.with_options(max_retries=5).agents.chat.completions.create(
307
+ messages=[
308
+ {
309
+ "role": "user",
310
+ "content": "What is the capital of France?",
311
+ }
312
+ ],
313
+ model="llama3.3-70b-instruct",
251
314
  )
252
315
  ```
253
316
 
@@ -271,8 +334,14 @@ client = GradientAI(
271
334
  )
272
335
 
273
336
  # Override per-request:
274
- client.with_options(timeout=5.0).agents.versions.list(
275
- uuid="REPLACE_ME",
337
+ client.with_options(timeout=5.0).agents.chat.completions.create(
338
+ messages=[
339
+ {
340
+ "role": "user",
341
+ "content": "What is the capital of France?",
342
+ }
343
+ ],
344
+ model="llama3.3-70b-instruct",
276
345
  )
277
346
  ```
278
347
 
@@ -314,13 +383,17 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
314
383
  from gradientai import GradientAI
315
384
 
316
385
  client = GradientAI()
317
- response = client.agents.versions.with_raw_response.list(
318
- uuid="REPLACE_ME",
386
+ response = client.agents.chat.completions.with_raw_response.create(
387
+ messages=[{
388
+ "role": "user",
389
+ "content": "What is the capital of France?",
390
+ }],
391
+ model="llama3.3-70b-instruct",
319
392
  )
320
393
  print(response.headers.get('X-My-Header'))
321
394
 
322
- version = response.parse() # get the object that `agents.versions.list()` would have returned
323
- print(version.agent_versions)
395
+ completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned
396
+ print(completion.choices)
324
397
  ```
325
398
 
326
399
  These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object.
@@ -334,8 +407,14 @@ The above interface eagerly reads the full response body when you make the reque
334
407
  To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
335
408
 
336
409
  ```python
337
- with client.agents.versions.with_streaming_response.list(
338
- uuid="REPLACE_ME",
410
+ with client.agents.chat.completions.with_streaming_response.create(
411
+ messages=[
412
+ {
413
+ "role": "user",
414
+ "content": "What is the capital of France?",
415
+ }
416
+ ],
417
+ model="llama3.3-70b-instruct",
339
418
  ) as response:
340
419
  print(response.headers.get("X-My-Header"))
341
420
 
@@ -1,6 +1,6 @@
1
1
  gradientai/__init__.py,sha256=yqy3yZnX0JVUS-H01MAvroChzqS67Qf072OrPfNckjI,2655
2
2
  gradientai/_base_client.py,sha256=TADFnPHK7WpsNuJUY76SxMFf2IVoOdz_tlloQEXkutk,66719
3
- gradientai/_client.py,sha256=SwygmkQnjfqCEKgMA7rovhfWiEOk3BjyenunyiDS0F8,27425
3
+ gradientai/_client.py,sha256=17o0JpmlQLZAdCzYpeGfRKOFf_pKT1-YmJi9_z-EFC4,27425
4
4
  gradientai/_compat.py,sha256=VWemUKbj6DDkQ-O4baSpHVLJafotzeXmCQGJugfVTIw,6580
5
5
  gradientai/_constants.py,sha256=S14PFzyN9-I31wiV7SmIlL5Ga0MLHxdvegInGdXH7tM,462
6
6
  gradientai/_exceptions.py,sha256=o1GvaW36c7_LMj5WasVKUBOpae8tzETBJsfbVphb3Vk,3228
@@ -11,7 +11,7 @@ gradientai/_resource.py,sha256=4NZbH2h8dQ-t-DQPida4VANJ_oZJNA7qxV84mwJT8oM,1124
11
11
  gradientai/_response.py,sha256=RhlDdupxTcKNyDDj045MZD3-a_lsEc3yjiOzxWg0cDc,28842
12
12
  gradientai/_streaming.py,sha256=3KH-GBmqhoS1KAOhecADOsbW9WuzhIi8wSdmrEj5PPA,11404
13
13
  gradientai/_types.py,sha256=22gBoIuoGJ1R6l5nPwquWCRzJodKhO-3e7k22-h37JQ,6201
14
- gradientai/_version.py,sha256=vl2rc2vAkezojVTxFct2GJxzipzCGzHOxgS9Ld5ASmM,170
14
+ gradientai/_version.py,sha256=zEAgnL7ofmga7omAv5szOmrwUcIVNZOhqxHF85-0A04,171
15
15
  gradientai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  gradientai/_utils/__init__.py,sha256=PNZ_QJuzZEgyYXqkO1HVhGkj5IU9bglVUcw7H-Knjzw,2062
17
17
  gradientai/_utils/_logs.py,sha256=1QuZcxzSKHcqNFbPoz_pvfLD5eDfOMBzGMyanAm_2gw,787
@@ -24,7 +24,7 @@ gradientai/_utils/_transform.py,sha256=n7kskEWz6o__aoNvhFoGVyDoalNe6mJwp-g7BWkdj
24
24
  gradientai/_utils/_typing.py,sha256=D0DbbNu8GnYQTSICnTSHDGsYXj8TcAKyhejb0XcnjtY,4602
25
25
  gradientai/_utils/_utils.py,sha256=ts4CiiuNpFiGB6YMdkQRh2SZvYvsl7mAF-JWHCcLDf4,12312
26
26
  gradientai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
27
- gradientai/resources/__init__.py,sha256=Sej63-wOANl_4F78LH6m2Zx2Qu_-wGPHirIiq_20gwk,3488
27
+ gradientai/resources/__init__.py,sha256=S-pc73Saio0NXtBI11LNt3WM3_mDJ9IYZyoAHDyW2iQ,3488
28
28
  gradientai/resources/models.py,sha256=SKdBMKr6NP1ldD_xzbr-gUL8hdo33omUAz5UPAJ0KWQ,9434
29
29
  gradientai/resources/regions.py,sha256=c4KX_qFTu83Svil1CO8CP4XdkEIfNQD3zwU_E6w734A,7172
30
30
  gradientai/resources/agents/__init__.py,sha256=2LkcBGQQJzBhZhVaeAy_IiqAPu28nV-d3_S-b__lTmk,5787
@@ -39,7 +39,7 @@ gradientai/resources/agents/routes.py,sha256=pDoK5hUgluhz5awJ9FR_krF8DIbkRzLX__M
39
39
  gradientai/resources/agents/versions.py,sha256=hmHDaawFkpqoUEYI4Vi5jckH3mamkPKupXO3cZccyKE,11546
40
40
  gradientai/resources/agents/chat/__init__.py,sha256=BVAfz9TM3DT5W9f_mt0P9YRxL_MsUxKCWAH6u1iogmA,1041
41
41
  gradientai/resources/agents/chat/chat.py,sha256=nt97777qa-xM71JQBKDFG_x7fUDJRvy6rF5LoBiMOKE,3698
42
- gradientai/resources/agents/chat/completions.py,sha256=jq62v8gN3hJ7POxBCHMcUJUi6Zj4IgvNGlr7D36W1M8,46188
42
+ gradientai/resources/agents/chat/completions.py,sha256=uYlI68LprqXL-W1hVRjd0tb3jutXYosKKMOEecuzpV8,46239
43
43
  gradientai/resources/agents/evaluation_metrics/__init__.py,sha256=qUCsT_vI2TrZrUBPe8h-VMNBE4ytcoe0RXshDctV0g0,1198
44
44
  gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py,sha256=BueqcWT0lqbElo-lgntkr1s7J0-qi2KqEZV2qzc7dcI,7089
45
45
  gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py,sha256=Zf3wa7WSVOKyG1x9Fppny38_ewQCbdghxbptqGFVfOM,1054
@@ -47,7 +47,7 @@ gradientai/resources/agents/evaluation_metrics/workspaces/agents.py,sha256=AVgDN
47
47
  gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py,sha256=iSQS3IW5xjOX5EsyJraoS5enwzrBARUhm6Xl1EgiICE,27079
48
48
  gradientai/resources/chat/__init__.py,sha256=BVAfz9TM3DT5W9f_mt0P9YRxL_MsUxKCWAH6u1iogmA,1041
49
49
  gradientai/resources/chat/chat.py,sha256=ANOEzzDATXj-onkkdtvVad9O-Dfwe7Uza7yfA_3bU4U,3696
50
- gradientai/resources/chat/completions.py,sha256=KWtB8ro2ClciI4NhbXzgXL-bIE7fcInDdGR7EElA5nU,19108
50
+ gradientai/resources/chat/completions.py,sha256=u_TvPvwNU79e3I5-v1P384LE5ysVz9gjJpYOKxnkl7E,47059
51
51
  gradientai/resources/inference/__init__.py,sha256=5Yk9bdOpUJPTero0_CvA-GJvcU5_gVlN1jf5r2GGGPY,1055
52
52
  gradientai/resources/inference/api_keys.py,sha256=xgWTFTp8IVRkw2nvXnfjBZPOTJyLUDIKlE7fSmb1y2I,22021
53
53
  gradientai/resources/inference/inference.py,sha256=jBimuBx3kKsuwC3pgy-PPrWg1TryO_a108CC_xLS2-Y,3771
@@ -150,8 +150,8 @@ gradientai/types/agents/version_list_params.py,sha256=0_3DhUbFDRyjUnn2G4saFOvuot
150
150
  gradientai/types/agents/version_list_response.py,sha256=Y0Y8CSPUPVHKRA3zTRRfQ8gC2aFyBacSCaGz-D5k8nk,2725
151
151
  gradientai/types/agents/version_update_params.py,sha256=j9tOda5wXmSOHsmcxQONo2mM-hEtrYi5-19HfGU_XnI,379
152
152
  gradientai/types/agents/version_update_response.py,sha256=nspPIkxQskT82tcW0JyG7bBVlXq_KU6CZzodTd9jfkQ,709
153
- gradientai/types/agents/chat/__init__.py,sha256=c-PmEwuvWZQ4CRBTs9gzbKAq2sxL7V7JlVxddeoaGl0,381
154
- gradientai/types/agents/chat/chat_completion_chunk.py,sha256=1K-F0JdUmQ_4idDk3oUGUB_mhxAxCzjq0C8hMhGtDuY,3048
153
+ gradientai/types/agents/chat/__init__.py,sha256=VGhGOtQE4pcOIHKsOeLDYDpixc_5ExOfIJcR0q6AqvY,397
154
+ gradientai/types/agents/chat/agent_chat_completion_chunk.py,sha256=bktrs-Ao6uta3k4PVzBrreMKbArztBcsHIEa8uzIK9s,3058
155
155
  gradientai/types/agents/chat/completion_create_params.py,sha256=ADEJ0N3MMsouT9AqBLE1-rho4FVVhlp9U9E-buqIAYs,7165
156
156
  gradientai/types/agents/chat/completion_create_response.py,sha256=9uKS3memEoV0_Xd1CZwI0jQGsQyfVkhXRPGlRO3rUIc,2415
157
157
  gradientai/types/agents/evaluation_metrics/__init__.py,sha256=XWH_utxMx-JwArRpr-rHQfmoxQRGK6GciKOllbkqg40,894
@@ -168,8 +168,9 @@ gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py,sha25
168
168
  gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py,sha256=W4O5v1LoWh2hQJTpUm5SfQCYcv6Q9Yz1Id5Pm1sPQNA,503
169
169
  gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py,sha256=4INiLEvgT9UDqFbrGwp3nuWOzFhwv7sX_YCr1Um1RaQ,422
170
170
  gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py,sha256=j2uoTmFwnLNU7kGI5LZdPpMUxWmUk-HdQIVVDOwSy4Y,350
171
- gradientai/types/chat/__init__.py,sha256=A5VCUPqJZydjjOqEXC01GXmcDkKM3bq6zuCu9lmi5Es,303
172
- gradientai/types/chat/completion_create_params.py,sha256=F4Dcrt5aYC_GEWUSN2OA3Zm5ImevZ7tFuTxBH7RyooM,6635
171
+ gradientai/types/chat/__init__.py,sha256=c-PmEwuvWZQ4CRBTs9gzbKAq2sxL7V7JlVxddeoaGl0,381
172
+ gradientai/types/chat/chat_completion_chunk.py,sha256=o1gDgYtzM477RmKWg-q5CE0tP3p0J7YKlZWaoqjCJOU,3046
173
+ gradientai/types/chat/completion_create_params.py,sha256=ADEJ0N3MMsouT9AqBLE1-rho4FVVhlp9U9E-buqIAYs,7165
173
174
  gradientai/types/chat/completion_create_response.py,sha256=nNPWSXZYbyYLjT_ikVvDcjRw3f9eRGHFsUrLKtQHYGI,2413
174
175
  gradientai/types/inference/__init__.py,sha256=4Dt7-03NeP9ehdHLkLsZMiL_YLQwZsl92D0mMoDQ5g0,857
175
176
  gradientai/types/inference/api_key_create_params.py,sha256=MOy5Bdr1wNBqCvqzyZ0FLfFY2a97q6eXCzgCR1wcLAE,263
@@ -232,7 +233,7 @@ gradientai/types/shared/__init__.py,sha256=YA2_qLkZLySOac1HrqOfCTEz6GeipnjIJh1mK
232
233
  gradientai/types/shared/api_links.py,sha256=Iq5iQwOkRYuwLcuDLk54dUfrq0f2ZVEOXSpF744gYgA,403
233
234
  gradientai/types/shared/api_meta.py,sha256=-KyinzQqM5GSjD7E5xm7A4UALXAvLOyVNR1SYVOUFJM,297
234
235
  gradientai/types/shared/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769
235
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a8.dist-info/METADATA,sha256=xwI-Z-rvG_Z_-L5Xu3x_NW2F7CauiUQmYG8wtunHk-Q,15049
236
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a8.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
237
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a8.dist-info/licenses/LICENSE,sha256=AzxEF8mEks6hu5V_87CXF8gLdL875WeO8FmQtEZTFok,11341
238
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a8.dist-info/RECORD,,
236
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/METADATA,sha256=lysDQ9wBs7xwfexNXqIaZ8CwjQCmy2eF-gnMHIdlv8s,17031
237
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
238
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/licenses/LICENSE,sha256=AzxEF8mEks6hu5V_87CXF8gLdL875WeO8FmQtEZTFok,11341
239
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/RECORD,,
gradientai/_client.py CHANGED
@@ -125,6 +125,12 @@ class GradientAI(SyncAPIClient):
125
125
 
126
126
  return AgentsResource(self)
127
127
 
128
+ @cached_property
129
+ def chat(self) -> ChatResource:
130
+ from .resources.chat import ChatResource
131
+
132
+ return ChatResource(self)
133
+
128
134
  @cached_property
129
135
  def model_providers(self) -> ModelProvidersResource:
130
136
  from .resources.model_providers import ModelProvidersResource
@@ -143,12 +149,6 @@ class GradientAI(SyncAPIClient):
143
149
 
144
150
  return KnowledgeBasesResource(self)
145
151
 
146
- @cached_property
147
- def chat(self) -> ChatResource:
148
- from .resources.chat import ChatResource
149
-
150
- return ChatResource(self)
151
-
152
152
  @cached_property
153
153
  def inference(self) -> InferenceResource:
154
154
  from .resources.inference import InferenceResource
@@ -365,6 +365,12 @@ class AsyncGradientAI(AsyncAPIClient):
365
365
 
366
366
  return AsyncAgentsResource(self)
367
367
 
368
+ @cached_property
369
+ def chat(self) -> AsyncChatResource:
370
+ from .resources.chat import AsyncChatResource
371
+
372
+ return AsyncChatResource(self)
373
+
368
374
  @cached_property
369
375
  def model_providers(self) -> AsyncModelProvidersResource:
370
376
  from .resources.model_providers import AsyncModelProvidersResource
@@ -383,12 +389,6 @@ class AsyncGradientAI(AsyncAPIClient):
383
389
 
384
390
  return AsyncKnowledgeBasesResource(self)
385
391
 
386
- @cached_property
387
- def chat(self) -> AsyncChatResource:
388
- from .resources.chat import AsyncChatResource
389
-
390
- return AsyncChatResource(self)
391
-
392
392
  @cached_property
393
393
  def inference(self) -> AsyncInferenceResource:
394
394
  from .resources.inference import AsyncInferenceResource
@@ -545,6 +545,12 @@ class GradientAIWithRawResponse:
545
545
 
546
546
  return AgentsResourceWithRawResponse(self._client.agents)
547
547
 
548
+ @cached_property
549
+ def chat(self) -> chat.ChatResourceWithRawResponse:
550
+ from .resources.chat import ChatResourceWithRawResponse
551
+
552
+ return ChatResourceWithRawResponse(self._client.chat)
553
+
548
554
  @cached_property
549
555
  def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse:
550
556
  from .resources.model_providers import ModelProvidersResourceWithRawResponse
@@ -563,12 +569,6 @@ class GradientAIWithRawResponse:
563
569
 
564
570
  return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
565
571
 
566
- @cached_property
567
- def chat(self) -> chat.ChatResourceWithRawResponse:
568
- from .resources.chat import ChatResourceWithRawResponse
569
-
570
- return ChatResourceWithRawResponse(self._client.chat)
571
-
572
572
  @cached_property
573
573
  def inference(self) -> inference.InferenceResourceWithRawResponse:
574
574
  from .resources.inference import InferenceResourceWithRawResponse
@@ -594,6 +594,12 @@ class AsyncGradientAIWithRawResponse:
594
594
 
595
595
  return AsyncAgentsResourceWithRawResponse(self._client.agents)
596
596
 
597
+ @cached_property
598
+ def chat(self) -> chat.AsyncChatResourceWithRawResponse:
599
+ from .resources.chat import AsyncChatResourceWithRawResponse
600
+
601
+ return AsyncChatResourceWithRawResponse(self._client.chat)
602
+
597
603
  @cached_property
598
604
  def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse:
599
605
  from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse
@@ -612,12 +618,6 @@ class AsyncGradientAIWithRawResponse:
612
618
 
613
619
  return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
614
620
 
615
- @cached_property
616
- def chat(self) -> chat.AsyncChatResourceWithRawResponse:
617
- from .resources.chat import AsyncChatResourceWithRawResponse
618
-
619
- return AsyncChatResourceWithRawResponse(self._client.chat)
620
-
621
621
  @cached_property
622
622
  def inference(self) -> inference.AsyncInferenceResourceWithRawResponse:
623
623
  from .resources.inference import AsyncInferenceResourceWithRawResponse
@@ -643,6 +643,12 @@ class GradientAIWithStreamedResponse:
643
643
 
644
644
  return AgentsResourceWithStreamingResponse(self._client.agents)
645
645
 
646
+ @cached_property
647
+ def chat(self) -> chat.ChatResourceWithStreamingResponse:
648
+ from .resources.chat import ChatResourceWithStreamingResponse
649
+
650
+ return ChatResourceWithStreamingResponse(self._client.chat)
651
+
646
652
  @cached_property
647
653
  def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse:
648
654
  from .resources.model_providers import ModelProvidersResourceWithStreamingResponse
@@ -661,12 +667,6 @@ class GradientAIWithStreamedResponse:
661
667
 
662
668
  return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
663
669
 
664
- @cached_property
665
- def chat(self) -> chat.ChatResourceWithStreamingResponse:
666
- from .resources.chat import ChatResourceWithStreamingResponse
667
-
668
- return ChatResourceWithStreamingResponse(self._client.chat)
669
-
670
670
  @cached_property
671
671
  def inference(self) -> inference.InferenceResourceWithStreamingResponse:
672
672
  from .resources.inference import InferenceResourceWithStreamingResponse
@@ -692,6 +692,12 @@ class AsyncGradientAIWithStreamedResponse:
692
692
 
693
693
  return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
694
694
 
695
+ @cached_property
696
+ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
697
+ from .resources.chat import AsyncChatResourceWithStreamingResponse
698
+
699
+ return AsyncChatResourceWithStreamingResponse(self._client.chat)
700
+
695
701
  @cached_property
696
702
  def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse:
697
703
  from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse
@@ -710,12 +716,6 @@ class AsyncGradientAIWithStreamedResponse:
710
716
 
711
717
  return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
712
718
 
713
- @cached_property
714
- def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
715
- from .resources.chat import AsyncChatResourceWithStreamingResponse
716
-
717
- return AsyncChatResourceWithStreamingResponse(self._client.chat)
718
-
719
719
  @cached_property
720
720
  def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse:
721
721
  from .resources.inference import AsyncInferenceResourceWithStreamingResponse
gradientai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "gradientai"
4
- __version__ = "0.1.0-alpha.8" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.10" # x-release-please-version
@@ -64,6 +64,12 @@ __all__ = [
64
64
  "AsyncAgentsResourceWithRawResponse",
65
65
  "AgentsResourceWithStreamingResponse",
66
66
  "AsyncAgentsResourceWithStreamingResponse",
67
+ "ChatResource",
68
+ "AsyncChatResource",
69
+ "ChatResourceWithRawResponse",
70
+ "AsyncChatResourceWithRawResponse",
71
+ "ChatResourceWithStreamingResponse",
72
+ "AsyncChatResourceWithStreamingResponse",
67
73
  "ModelProvidersResource",
68
74
  "AsyncModelProvidersResource",
69
75
  "ModelProvidersResourceWithRawResponse",
@@ -82,12 +88,6 @@ __all__ = [
82
88
  "AsyncKnowledgeBasesResourceWithRawResponse",
83
89
  "KnowledgeBasesResourceWithStreamingResponse",
84
90
  "AsyncKnowledgeBasesResourceWithStreamingResponse",
85
- "ChatResource",
86
- "AsyncChatResource",
87
- "ChatResourceWithRawResponse",
88
- "AsyncChatResourceWithRawResponse",
89
- "ChatResourceWithStreamingResponse",
90
- "AsyncChatResourceWithStreamingResponse",
91
91
  "InferenceResource",
92
92
  "AsyncInferenceResource",
93
93
  "InferenceResourceWithRawResponse",
@@ -20,8 +20,8 @@ from ...._response import (
20
20
  from ...._streaming import Stream, AsyncStream
21
21
  from ...._base_client import make_request_options
22
22
  from ....types.agents.chat import completion_create_params
23
- from ....types.agents.chat.chat_completion_chunk import ChatCompletionChunk
24
23
  from ....types.agents.chat.completion_create_response import CompletionCreateResponse
24
+ from ....types.agents.chat.agent_chat_completion_chunk import AgentChatCompletionChunk
25
25
 
26
26
  __all__ = ["CompletionsResource", "AsyncCompletionsResource"]
27
27
 
@@ -186,7 +186,7 @@ class CompletionsResource(SyncAPIResource):
186
186
  extra_query: Query | None = None,
187
187
  extra_body: Body | None = None,
188
188
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
189
- ) -> Stream[ChatCompletionChunk]:
189
+ ) -> Stream[AgentChatCompletionChunk]:
190
190
  """
191
191
  Creates a model response for the given chat conversation.
192
192
 
@@ -299,7 +299,7 @@ class CompletionsResource(SyncAPIResource):
299
299
  extra_query: Query | None = None,
300
300
  extra_body: Body | None = None,
301
301
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
302
- ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
302
+ ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]:
303
303
  """
304
304
  Creates a model response for the given chat conversation.
305
305
 
@@ -412,7 +412,7 @@ class CompletionsResource(SyncAPIResource):
412
412
  extra_query: Query | None = None,
413
413
  extra_body: Body | None = None,
414
414
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
415
- ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
415
+ ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]:
416
416
  return self._post(
417
417
  "/chat/completions"
418
418
  if self._client._base_url_overridden
@@ -446,7 +446,7 @@ class CompletionsResource(SyncAPIResource):
446
446
  ),
447
447
  cast_to=CompletionCreateResponse,
448
448
  stream=stream or False,
449
- stream_cls=Stream[ChatCompletionChunk],
449
+ stream_cls=Stream[AgentChatCompletionChunk],
450
450
  )
451
451
 
452
452
 
@@ -610,7 +610,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
610
610
  extra_query: Query | None = None,
611
611
  extra_body: Body | None = None,
612
612
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
613
- ) -> AsyncStream[ChatCompletionChunk]:
613
+ ) -> AsyncStream[AgentChatCompletionChunk]:
614
614
  """
615
615
  Creates a model response for the given chat conversation.
616
616
 
@@ -723,7 +723,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
723
723
  extra_query: Query | None = None,
724
724
  extra_body: Body | None = None,
725
725
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
726
- ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
726
+ ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]:
727
727
  """
728
728
  Creates a model response for the given chat conversation.
729
729
 
@@ -836,7 +836,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
836
836
  extra_query: Query | None = None,
837
837
  extra_body: Body | None = None,
838
838
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
839
- ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
839
+ ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]:
840
840
  return await self._post(
841
841
  "/chat/completions"
842
842
  if self._client._base_url_overridden
@@ -870,7 +870,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
870
870
  ),
871
871
  cast_to=CompletionCreateResponse,
872
872
  stream=stream or False,
873
- stream_cls=AsyncStream[ChatCompletionChunk],
873
+ stream_cls=AsyncStream[AgentChatCompletionChunk],
874
874
  )
875
875
 
876
876