c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python 0.1.0a9__py3-none-any.whl → 0.1.0a10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
3
- Version: 0.1.0a9
3
+ Version: 0.1.0a10
4
4
  Summary: The official Python library for GradientAI
5
5
  Project-URL: Homepage, https://github.com/digitalocean/gradientai-python
6
6
  Project-URL: Repository, https://github.com/digitalocean/gradientai-python
@@ -61,26 +61,33 @@ The full API of this library can be found in [api.md](https://github.com/digital
61
61
  import os
62
62
  from gradientai import GradientAI
63
63
 
64
- client = GradientAI(
64
+ api_client = GradientAI(
65
65
  api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
66
66
  )
67
+ inference_client = GradientAI(
68
+ inference_key=os.environ.get("GRADIENTAI_INFERENCE_KEY"), # This is the default and can be omitted
69
+ )
67
70
 
68
- completion = client.agents.chat.completions.create(
71
+ print(api_client.agents.list())
72
+
73
+ completion = inference_client.chat.completions.create(
69
74
  messages=[
70
75
  {
71
- "content": "string",
72
- "role": "system",
76
+ "role": "user",
77
+ "content": "What is the capital of France?",
73
78
  }
74
79
  ],
75
- model="llama3-8b-instruct",
80
+ model="llama3.3-70b-instruct",
76
81
  )
77
- print(completion.id)
82
+
83
+ print(completion.choices[0].message)
84
+
78
85
  ```
79
86
 
80
- While you can provide an `api_key` keyword argument,
87
+ While you can provide an `api_key`, `inference_key` keyword argument,
81
88
  we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
82
- to add `GRADIENTAI_API_KEY="My API Key"` to your `.env` file
83
- so that your API Key is not stored in source control.
89
+ to add `GRADIENTAI_API_KEY="My API Key"`, `GRADIENTAI_INFERENCE_KEY="My INFERENCE Key"` to your `.env` file
90
+ so that your keys are not stored in source control.
84
91
 
85
92
  ## Async usage
86
93
 
@@ -100,13 +107,13 @@ async def main() -> None:
100
107
  completion = await client.agents.chat.completions.create(
101
108
  messages=[
102
109
  {
103
- "content": "string",
104
- "role": "system",
110
+ "role": "user",
111
+ "content": "What is the capital of France?",
105
112
  }
106
113
  ],
107
- model="llama3-8b-instruct",
114
+ model="llama3.3-70b-instruct",
108
115
  )
109
- print(completion.id)
116
+ print(completion.choices)
110
117
 
111
118
 
112
119
  asyncio.run(main())
@@ -142,18 +149,62 @@ async def main() -> None:
142
149
  completion = await client.agents.chat.completions.create(
143
150
  messages=[
144
151
  {
145
- "content": "string",
146
- "role": "system",
152
+ "role": "user",
153
+ "content": "What is the capital of France?",
147
154
  }
148
155
  ],
149
- model="llama3-8b-instruct",
156
+ model="llama3.3-70b-instruct",
150
157
  )
151
- print(completion.id)
158
+ print(completion.choices)
152
159
 
153
160
 
154
161
  asyncio.run(main())
155
162
  ```
156
163
 
164
+ ## Streaming responses
165
+
166
+ We provide support for streaming responses using Server Side Events (SSE).
167
+
168
+ ```python
169
+ from gradientai import GradientAI
170
+
171
+ client = GradientAI()
172
+
173
+ stream = client.agents.chat.completions.create(
174
+ messages=[
175
+ {
176
+ "role": "user",
177
+ "content": "What is the capital of France?",
178
+ }
179
+ ],
180
+ model="llama3.3-70b-instruct",
181
+ stream=True,
182
+ )
183
+ for completion in stream:
184
+ print(completion.choices)
185
+ ```
186
+
187
+ The async client uses the exact same interface.
188
+
189
+ ```python
190
+ from gradientai import AsyncGradientAI
191
+
192
+ client = AsyncGradientAI()
193
+
194
+ stream = await client.agents.chat.completions.create(
195
+ messages=[
196
+ {
197
+ "role": "user",
198
+ "content": "What is the capital of France?",
199
+ }
200
+ ],
201
+ model="llama3.3-70b-instruct",
202
+ stream=True,
203
+ )
204
+ async for completion in stream:
205
+ print(completion.choices)
206
+ ```
207
+
157
208
  ## Using types
158
209
 
159
210
  Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like:
@@ -201,8 +252,14 @@ from gradientai import GradientAI
201
252
  client = GradientAI()
202
253
 
203
254
  try:
204
- client.agents.versions.list(
205
- uuid="REPLACE_ME",
255
+ client.agents.chat.completions.create(
256
+ messages=[
257
+ {
258
+ "role": "user",
259
+ "content": "What is the capital of France?",
260
+ }
261
+ ],
262
+ model="llama3.3-70b-instruct",
206
263
  )
207
264
  except gradientai.APIConnectionError as e:
208
265
  print("The server could not be reached")
@@ -246,8 +303,14 @@ client = GradientAI(
246
303
  )
247
304
 
248
305
  # Or, configure per-request:
249
- client.with_options(max_retries=5).agents.versions.list(
250
- uuid="REPLACE_ME",
306
+ client.with_options(max_retries=5).agents.chat.completions.create(
307
+ messages=[
308
+ {
309
+ "role": "user",
310
+ "content": "What is the capital of France?",
311
+ }
312
+ ],
313
+ model="llama3.3-70b-instruct",
251
314
  )
252
315
  ```
253
316
 
@@ -271,8 +334,14 @@ client = GradientAI(
271
334
  )
272
335
 
273
336
  # Override per-request:
274
- client.with_options(timeout=5.0).agents.versions.list(
275
- uuid="REPLACE_ME",
337
+ client.with_options(timeout=5.0).agents.chat.completions.create(
338
+ messages=[
339
+ {
340
+ "role": "user",
341
+ "content": "What is the capital of France?",
342
+ }
343
+ ],
344
+ model="llama3.3-70b-instruct",
276
345
  )
277
346
  ```
278
347
 
@@ -314,13 +383,17 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
314
383
  from gradientai import GradientAI
315
384
 
316
385
  client = GradientAI()
317
- response = client.agents.versions.with_raw_response.list(
318
- uuid="REPLACE_ME",
386
+ response = client.agents.chat.completions.with_raw_response.create(
387
+ messages=[{
388
+ "role": "user",
389
+ "content": "What is the capital of France?",
390
+ }],
391
+ model="llama3.3-70b-instruct",
319
392
  )
320
393
  print(response.headers.get('X-My-Header'))
321
394
 
322
- version = response.parse() # get the object that `agents.versions.list()` would have returned
323
- print(version.agent_versions)
395
+ completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned
396
+ print(completion.choices)
324
397
  ```
325
398
 
326
399
  These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object.
@@ -334,8 +407,14 @@ The above interface eagerly reads the full response body when you make the reque
334
407
  To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
335
408
 
336
409
  ```python
337
- with client.agents.versions.with_streaming_response.list(
338
- uuid="REPLACE_ME",
410
+ with client.agents.chat.completions.with_streaming_response.create(
411
+ messages=[
412
+ {
413
+ "role": "user",
414
+ "content": "What is the capital of France?",
415
+ }
416
+ ],
417
+ model="llama3.3-70b-instruct",
339
418
  ) as response:
340
419
  print(response.headers.get("X-My-Header"))
341
420
 
@@ -1,6 +1,6 @@
1
1
  gradientai/__init__.py,sha256=yqy3yZnX0JVUS-H01MAvroChzqS67Qf072OrPfNckjI,2655
2
2
  gradientai/_base_client.py,sha256=TADFnPHK7WpsNuJUY76SxMFf2IVoOdz_tlloQEXkutk,66719
3
- gradientai/_client.py,sha256=SwygmkQnjfqCEKgMA7rovhfWiEOk3BjyenunyiDS0F8,27425
3
+ gradientai/_client.py,sha256=17o0JpmlQLZAdCzYpeGfRKOFf_pKT1-YmJi9_z-EFC4,27425
4
4
  gradientai/_compat.py,sha256=VWemUKbj6DDkQ-O4baSpHVLJafotzeXmCQGJugfVTIw,6580
5
5
  gradientai/_constants.py,sha256=S14PFzyN9-I31wiV7SmIlL5Ga0MLHxdvegInGdXH7tM,462
6
6
  gradientai/_exceptions.py,sha256=o1GvaW36c7_LMj5WasVKUBOpae8tzETBJsfbVphb3Vk,3228
@@ -11,7 +11,7 @@ gradientai/_resource.py,sha256=4NZbH2h8dQ-t-DQPida4VANJ_oZJNA7qxV84mwJT8oM,1124
11
11
  gradientai/_response.py,sha256=RhlDdupxTcKNyDDj045MZD3-a_lsEc3yjiOzxWg0cDc,28842
12
12
  gradientai/_streaming.py,sha256=3KH-GBmqhoS1KAOhecADOsbW9WuzhIi8wSdmrEj5PPA,11404
13
13
  gradientai/_types.py,sha256=22gBoIuoGJ1R6l5nPwquWCRzJodKhO-3e7k22-h37JQ,6201
14
- gradientai/_version.py,sha256=aATDhXxfQvFSrVaXTkz4f895b3kBEp1bAJcYPj23INc,170
14
+ gradientai/_version.py,sha256=zEAgnL7ofmga7omAv5szOmrwUcIVNZOhqxHF85-0A04,171
15
15
  gradientai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  gradientai/_utils/__init__.py,sha256=PNZ_QJuzZEgyYXqkO1HVhGkj5IU9bglVUcw7H-Knjzw,2062
17
17
  gradientai/_utils/_logs.py,sha256=1QuZcxzSKHcqNFbPoz_pvfLD5eDfOMBzGMyanAm_2gw,787
@@ -24,7 +24,7 @@ gradientai/_utils/_transform.py,sha256=n7kskEWz6o__aoNvhFoGVyDoalNe6mJwp-g7BWkdj
24
24
  gradientai/_utils/_typing.py,sha256=D0DbbNu8GnYQTSICnTSHDGsYXj8TcAKyhejb0XcnjtY,4602
25
25
  gradientai/_utils/_utils.py,sha256=ts4CiiuNpFiGB6YMdkQRh2SZvYvsl7mAF-JWHCcLDf4,12312
26
26
  gradientai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
27
- gradientai/resources/__init__.py,sha256=Sej63-wOANl_4F78LH6m2Zx2Qu_-wGPHirIiq_20gwk,3488
27
+ gradientai/resources/__init__.py,sha256=S-pc73Saio0NXtBI11LNt3WM3_mDJ9IYZyoAHDyW2iQ,3488
28
28
  gradientai/resources/models.py,sha256=SKdBMKr6NP1ldD_xzbr-gUL8hdo33omUAz5UPAJ0KWQ,9434
29
29
  gradientai/resources/regions.py,sha256=c4KX_qFTu83Svil1CO8CP4XdkEIfNQD3zwU_E6w734A,7172
30
30
  gradientai/resources/agents/__init__.py,sha256=2LkcBGQQJzBhZhVaeAy_IiqAPu28nV-d3_S-b__lTmk,5787
@@ -233,7 +233,7 @@ gradientai/types/shared/__init__.py,sha256=YA2_qLkZLySOac1HrqOfCTEz6GeipnjIJh1mK
233
233
  gradientai/types/shared/api_links.py,sha256=Iq5iQwOkRYuwLcuDLk54dUfrq0f2ZVEOXSpF744gYgA,403
234
234
  gradientai/types/shared/api_meta.py,sha256=-KyinzQqM5GSjD7E5xm7A4UALXAvLOyVNR1SYVOUFJM,297
235
235
  gradientai/types/shared/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769
236
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info/METADATA,sha256=knvT5XUviLOr5lmtTU4M4N1a1_opUZDVrfNKXZM1nqM,15049
237
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
238
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info/licenses/LICENSE,sha256=AzxEF8mEks6hu5V_87CXF8gLdL875WeO8FmQtEZTFok,11341
239
- c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info/RECORD,,
236
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/METADATA,sha256=lysDQ9wBs7xwfexNXqIaZ8CwjQCmy2eF-gnMHIdlv8s,17031
237
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
238
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/licenses/LICENSE,sha256=AzxEF8mEks6hu5V_87CXF8gLdL875WeO8FmQtEZTFok,11341
239
+ c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a10.dist-info/RECORD,,
gradientai/_client.py CHANGED
@@ -125,6 +125,12 @@ class GradientAI(SyncAPIClient):
125
125
 
126
126
  return AgentsResource(self)
127
127
 
128
+ @cached_property
129
+ def chat(self) -> ChatResource:
130
+ from .resources.chat import ChatResource
131
+
132
+ return ChatResource(self)
133
+
128
134
  @cached_property
129
135
  def model_providers(self) -> ModelProvidersResource:
130
136
  from .resources.model_providers import ModelProvidersResource
@@ -143,12 +149,6 @@ class GradientAI(SyncAPIClient):
143
149
 
144
150
  return KnowledgeBasesResource(self)
145
151
 
146
- @cached_property
147
- def chat(self) -> ChatResource:
148
- from .resources.chat import ChatResource
149
-
150
- return ChatResource(self)
151
-
152
152
  @cached_property
153
153
  def inference(self) -> InferenceResource:
154
154
  from .resources.inference import InferenceResource
@@ -365,6 +365,12 @@ class AsyncGradientAI(AsyncAPIClient):
365
365
 
366
366
  return AsyncAgentsResource(self)
367
367
 
368
+ @cached_property
369
+ def chat(self) -> AsyncChatResource:
370
+ from .resources.chat import AsyncChatResource
371
+
372
+ return AsyncChatResource(self)
373
+
368
374
  @cached_property
369
375
  def model_providers(self) -> AsyncModelProvidersResource:
370
376
  from .resources.model_providers import AsyncModelProvidersResource
@@ -383,12 +389,6 @@ class AsyncGradientAI(AsyncAPIClient):
383
389
 
384
390
  return AsyncKnowledgeBasesResource(self)
385
391
 
386
- @cached_property
387
- def chat(self) -> AsyncChatResource:
388
- from .resources.chat import AsyncChatResource
389
-
390
- return AsyncChatResource(self)
391
-
392
392
  @cached_property
393
393
  def inference(self) -> AsyncInferenceResource:
394
394
  from .resources.inference import AsyncInferenceResource
@@ -545,6 +545,12 @@ class GradientAIWithRawResponse:
545
545
 
546
546
  return AgentsResourceWithRawResponse(self._client.agents)
547
547
 
548
+ @cached_property
549
+ def chat(self) -> chat.ChatResourceWithRawResponse:
550
+ from .resources.chat import ChatResourceWithRawResponse
551
+
552
+ return ChatResourceWithRawResponse(self._client.chat)
553
+
548
554
  @cached_property
549
555
  def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse:
550
556
  from .resources.model_providers import ModelProvidersResourceWithRawResponse
@@ -563,12 +569,6 @@ class GradientAIWithRawResponse:
563
569
 
564
570
  return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
565
571
 
566
- @cached_property
567
- def chat(self) -> chat.ChatResourceWithRawResponse:
568
- from .resources.chat import ChatResourceWithRawResponse
569
-
570
- return ChatResourceWithRawResponse(self._client.chat)
571
-
572
572
  @cached_property
573
573
  def inference(self) -> inference.InferenceResourceWithRawResponse:
574
574
  from .resources.inference import InferenceResourceWithRawResponse
@@ -594,6 +594,12 @@ class AsyncGradientAIWithRawResponse:
594
594
 
595
595
  return AsyncAgentsResourceWithRawResponse(self._client.agents)
596
596
 
597
+ @cached_property
598
+ def chat(self) -> chat.AsyncChatResourceWithRawResponse:
599
+ from .resources.chat import AsyncChatResourceWithRawResponse
600
+
601
+ return AsyncChatResourceWithRawResponse(self._client.chat)
602
+
597
603
  @cached_property
598
604
  def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse:
599
605
  from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse
@@ -612,12 +618,6 @@ class AsyncGradientAIWithRawResponse:
612
618
 
613
619
  return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
614
620
 
615
- @cached_property
616
- def chat(self) -> chat.AsyncChatResourceWithRawResponse:
617
- from .resources.chat import AsyncChatResourceWithRawResponse
618
-
619
- return AsyncChatResourceWithRawResponse(self._client.chat)
620
-
621
621
  @cached_property
622
622
  def inference(self) -> inference.AsyncInferenceResourceWithRawResponse:
623
623
  from .resources.inference import AsyncInferenceResourceWithRawResponse
@@ -643,6 +643,12 @@ class GradientAIWithStreamedResponse:
643
643
 
644
644
  return AgentsResourceWithStreamingResponse(self._client.agents)
645
645
 
646
+ @cached_property
647
+ def chat(self) -> chat.ChatResourceWithStreamingResponse:
648
+ from .resources.chat import ChatResourceWithStreamingResponse
649
+
650
+ return ChatResourceWithStreamingResponse(self._client.chat)
651
+
646
652
  @cached_property
647
653
  def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse:
648
654
  from .resources.model_providers import ModelProvidersResourceWithStreamingResponse
@@ -661,12 +667,6 @@ class GradientAIWithStreamedResponse:
661
667
 
662
668
  return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
663
669
 
664
- @cached_property
665
- def chat(self) -> chat.ChatResourceWithStreamingResponse:
666
- from .resources.chat import ChatResourceWithStreamingResponse
667
-
668
- return ChatResourceWithStreamingResponse(self._client.chat)
669
-
670
670
  @cached_property
671
671
  def inference(self) -> inference.InferenceResourceWithStreamingResponse:
672
672
  from .resources.inference import InferenceResourceWithStreamingResponse
@@ -692,6 +692,12 @@ class AsyncGradientAIWithStreamedResponse:
692
692
 
693
693
  return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
694
694
 
695
+ @cached_property
696
+ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
697
+ from .resources.chat import AsyncChatResourceWithStreamingResponse
698
+
699
+ return AsyncChatResourceWithStreamingResponse(self._client.chat)
700
+
695
701
  @cached_property
696
702
  def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse:
697
703
  from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse
@@ -710,12 +716,6 @@ class AsyncGradientAIWithStreamedResponse:
710
716
 
711
717
  return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
712
718
 
713
- @cached_property
714
- def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
715
- from .resources.chat import AsyncChatResourceWithStreamingResponse
716
-
717
- return AsyncChatResourceWithStreamingResponse(self._client.chat)
718
-
719
719
  @cached_property
720
720
  def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse:
721
721
  from .resources.inference import AsyncInferenceResourceWithStreamingResponse
gradientai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "gradientai"
4
- __version__ = "0.1.0-alpha.9" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.10" # x-release-please-version
@@ -64,6 +64,12 @@ __all__ = [
64
64
  "AsyncAgentsResourceWithRawResponse",
65
65
  "AgentsResourceWithStreamingResponse",
66
66
  "AsyncAgentsResourceWithStreamingResponse",
67
+ "ChatResource",
68
+ "AsyncChatResource",
69
+ "ChatResourceWithRawResponse",
70
+ "AsyncChatResourceWithRawResponse",
71
+ "ChatResourceWithStreamingResponse",
72
+ "AsyncChatResourceWithStreamingResponse",
67
73
  "ModelProvidersResource",
68
74
  "AsyncModelProvidersResource",
69
75
  "ModelProvidersResourceWithRawResponse",
@@ -82,12 +88,6 @@ __all__ = [
82
88
  "AsyncKnowledgeBasesResourceWithRawResponse",
83
89
  "KnowledgeBasesResourceWithStreamingResponse",
84
90
  "AsyncKnowledgeBasesResourceWithStreamingResponse",
85
- "ChatResource",
86
- "AsyncChatResource",
87
- "ChatResourceWithRawResponse",
88
- "AsyncChatResourceWithRawResponse",
89
- "ChatResourceWithStreamingResponse",
90
- "AsyncChatResourceWithStreamingResponse",
91
91
  "InferenceResource",
92
92
  "AsyncInferenceResource",
93
93
  "InferenceResourceWithRawResponse",