c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python 0.1.0a4__py3-none-any.whl → 0.1.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a4.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a6.dist-info}/METADATA +41 -16
  2. c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a6.dist-info/RECORD +230 -0
  3. gradientai/_client.py +73 -71
  4. gradientai/_version.py +1 -1
  5. gradientai/resources/__init__.py +14 -28
  6. gradientai/resources/agents/__init__.py +69 -13
  7. gradientai/resources/agents/agents.py +148 -20
  8. gradientai/resources/{regions → agents}/evaluation_datasets.py +3 -3
  9. gradientai/resources/agents/evaluation_metrics/__init__.py +33 -0
  10. gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py +177 -0
  11. gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py +33 -0
  12. gradientai/resources/agents/evaluation_metrics/workspaces/agents.py +324 -0
  13. gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py +654 -0
  14. gradientai/resources/{regions/evaluation_runs → agents}/evaluation_runs.py +197 -47
  15. gradientai/resources/{regions → agents}/evaluation_test_cases.py +30 -9
  16. gradientai/resources/agents/{child_agents.py → routes.py} +64 -64
  17. gradientai/resources/chat/completions.py +20 -2
  18. gradientai/resources/inference/__init__.py +0 -14
  19. gradientai/resources/inference/inference.py +0 -32
  20. gradientai/resources/knowledge_bases/__init__.py +14 -0
  21. gradientai/resources/{indexing_jobs.py → knowledge_bases/indexing_jobs.py} +16 -12
  22. gradientai/resources/knowledge_bases/knowledge_bases.py +32 -0
  23. gradientai/resources/{providers → model_providers}/__init__.py +13 -13
  24. gradientai/resources/{providers → model_providers}/anthropic/keys.py +12 -7
  25. gradientai/resources/{providers/providers.py → model_providers/model_providers.py} +31 -31
  26. gradientai/resources/{providers → model_providers}/openai/keys.py +12 -7
  27. gradientai/resources/models.py +77 -105
  28. gradientai/resources/regions.py +195 -0
  29. gradientai/types/__init__.py +2 -17
  30. gradientai/types/agent_list_response.py +2 -2
  31. gradientai/types/agents/__init__.py +44 -8
  32. gradientai/types/{api_evaluation_metric.py → agents/api_evaluation_metric.py} +1 -1
  33. gradientai/types/{regions/evaluation_runs → agents}/api_evaluation_metric_result.py +1 -1
  34. gradientai/types/{regions/evaluation_runs/api_prompt.py → agents/api_evaluation_prompt.py} +3 -3
  35. gradientai/types/{regions/evaluation_runs → agents}/api_evaluation_run.py +13 -1
  36. gradientai/types/{regions → agents}/api_evaluation_test_case.py +1 -1
  37. gradientai/types/agents/api_key_list_response.py +2 -2
  38. gradientai/types/{region_list_evaluation_metrics_response.py → agents/evaluation_metric_list_response.py} +3 -3
  39. gradientai/types/agents/evaluation_metrics/__init__.py +14 -0
  40. gradientai/types/agents/evaluation_metrics/workspace_create_params.py +16 -0
  41. gradientai/types/agents/evaluation_metrics/workspace_create_response.py +16 -0
  42. gradientai/types/agents/evaluation_metrics/workspace_delete_response.py +11 -0
  43. gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py +12 -0
  44. gradientai/types/agents/evaluation_metrics/workspace_list_response.py +16 -0
  45. gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py +16 -0
  46. gradientai/types/agents/evaluation_metrics/workspace_update_params.py +18 -0
  47. gradientai/types/agents/evaluation_metrics/workspace_update_response.py +16 -0
  48. gradientai/types/agents/evaluation_metrics/workspaces/__init__.py +8 -0
  49. gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py +26 -0
  50. gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py +22 -0
  51. gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py +16 -0
  52. gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py +16 -0
  53. gradientai/types/{regions → agents}/evaluation_run_create_params.py +3 -2
  54. gradientai/types/{regions → agents}/evaluation_run_create_response.py +2 -2
  55. gradientai/types/{regions/evaluation_runs/result_retrieve_response.py → agents/evaluation_run_list_results_response.py} +5 -5
  56. gradientai/types/{regions → agents}/evaluation_run_retrieve_response.py +1 -1
  57. gradientai/types/agents/evaluation_run_retrieve_results_response.py +12 -0
  58. gradientai/types/{regions → agents}/evaluation_test_case_list_evaluation_runs_response.py +1 -1
  59. gradientai/types/agents/evaluation_test_case_retrieve_params.py +12 -0
  60. gradientai/types/agents/{child_agent_add_params.py → route_add_params.py} +2 -2
  61. gradientai/types/agents/{child_agent_add_response.py → route_add_response.py} +2 -2
  62. gradientai/types/agents/{child_agent_delete_response.py → route_delete_response.py} +2 -2
  63. gradientai/types/agents/{child_agent_update_params.py → route_update_params.py} +2 -2
  64. gradientai/types/agents/{child_agent_update_response.py → route_update_response.py} +2 -2
  65. gradientai/types/agents/{child_agent_view_response.py → route_view_response.py} +2 -2
  66. gradientai/types/agents/version_list_response.py +2 -2
  67. gradientai/types/api_knowledge_base.py +1 -1
  68. gradientai/types/api_workspace.py +1 -1
  69. gradientai/types/inference/__init__.py +0 -2
  70. gradientai/types/inference/api_key_list_response.py +2 -2
  71. gradientai/types/knowledge_base_list_response.py +2 -2
  72. gradientai/types/knowledge_bases/__init__.py +12 -0
  73. gradientai/types/{indexing_job_retrieve_data_sources_response.py → knowledge_bases/api_indexed_data_source.py} +4 -8
  74. gradientai/types/{api_indexing_job.py → knowledge_bases/api_indexing_job.py} +13 -1
  75. gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +4 -1
  76. gradientai/types/knowledge_bases/data_source_list_response.py +2 -2
  77. gradientai/types/{indexing_job_create_response.py → knowledge_bases/indexing_job_create_response.py} +1 -1
  78. gradientai/types/{indexing_job_list_response.py → knowledge_bases/indexing_job_list_response.py} +3 -3
  79. gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py +12 -0
  80. gradientai/types/{indexing_job_retrieve_response.py → knowledge_bases/indexing_job_retrieve_response.py} +1 -1
  81. gradientai/types/{indexing_job_update_cancel_params.py → knowledge_bases/indexing_job_update_cancel_params.py} +1 -1
  82. gradientai/types/{indexing_job_update_cancel_response.py → knowledge_bases/indexing_job_update_cancel_response.py} +1 -1
  83. gradientai/types/{inference/model.py → model.py} +1 -1
  84. gradientai/types/model_list_response.py +5 -8
  85. gradientai/types/{providers → model_providers}/anthropic/key_list_agents_response.py +2 -2
  86. gradientai/types/{providers → model_providers}/anthropic/key_list_response.py +2 -2
  87. gradientai/types/{providers → model_providers}/openai/key_list_response.py +2 -2
  88. gradientai/types/{providers → model_providers}/openai/key_retrieve_agents_response.py +2 -2
  89. gradientai/types/shared/__init__.py +4 -0
  90. c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a4.dist-info/RECORD +0 -217
  91. gradientai/resources/inference/models.py +0 -226
  92. gradientai/resources/regions/__init__.py +0 -61
  93. gradientai/resources/regions/evaluation_runs/__init__.py +0 -33
  94. gradientai/resources/regions/evaluation_runs/results.py +0 -264
  95. gradientai/resources/regions/regions.py +0 -352
  96. gradientai/types/api_model.py +0 -32
  97. gradientai/types/inference/model_list_response.py +0 -15
  98. gradientai/types/model_list_params.py +0 -42
  99. gradientai/types/regions/__init__.py +0 -32
  100. gradientai/types/regions/evaluation_runs/__init__.py +0 -9
  101. gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py +0 -12
  102. {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a4.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a6.dist-info}/WHEEL +0 -0
  103. {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a4.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a6.dist-info}/licenses/LICENSE +0 -0
  104. /gradientai/resources/{providers → model_providers}/anthropic/__init__.py +0 -0
  105. /gradientai/resources/{providers → model_providers}/anthropic/anthropic.py +0 -0
  106. /gradientai/resources/{providers → model_providers}/openai/__init__.py +0 -0
  107. /gradientai/resources/{providers → model_providers}/openai/openai.py +0 -0
  108. /gradientai/types/{regions → agents}/api_star_metric.py +0 -0
  109. /gradientai/types/{regions → agents}/api_star_metric_param.py +0 -0
  110. /gradientai/types/{regions → agents}/evaluation_dataset_create_file_upload_presigned_urls_params.py +0 -0
  111. /gradientai/types/{regions → agents}/evaluation_dataset_create_file_upload_presigned_urls_response.py +0 -0
  112. /gradientai/types/{regions → agents}/evaluation_dataset_create_params.py +0 -0
  113. /gradientai/types/{regions → agents}/evaluation_dataset_create_response.py +0 -0
  114. /gradientai/types/{regions → agents}/evaluation_test_case_create_params.py +0 -0
  115. /gradientai/types/{regions → agents}/evaluation_test_case_create_response.py +0 -0
  116. /gradientai/types/{regions → agents}/evaluation_test_case_list_evaluation_runs_params.py +0 -0
  117. /gradientai/types/{regions → agents}/evaluation_test_case_list_response.py +0 -0
  118. /gradientai/types/{regions → agents}/evaluation_test_case_retrieve_response.py +0 -0
  119. /gradientai/types/{regions → agents}/evaluation_test_case_update_params.py +0 -0
  120. /gradientai/types/{regions → agents}/evaluation_test_case_update_response.py +0 -0
  121. /gradientai/types/{indexing_job_create_params.py → knowledge_bases/indexing_job_create_params.py} +0 -0
  122. /gradientai/types/{indexing_job_list_params.py → knowledge_bases/indexing_job_list_params.py} +0 -0
  123. /gradientai/types/{providers → model_providers}/__init__.py +0 -0
  124. /gradientai/types/{providers → model_providers}/anthropic/__init__.py +0 -0
  125. /gradientai/types/{providers → model_providers}/anthropic/key_create_params.py +0 -0
  126. /gradientai/types/{providers → model_providers}/anthropic/key_create_response.py +0 -0
  127. /gradientai/types/{providers → model_providers}/anthropic/key_delete_response.py +0 -0
  128. /gradientai/types/{providers → model_providers}/anthropic/key_list_agents_params.py +0 -0
  129. /gradientai/types/{providers → model_providers}/anthropic/key_list_params.py +0 -0
  130. /gradientai/types/{providers → model_providers}/anthropic/key_retrieve_response.py +0 -0
  131. /gradientai/types/{providers → model_providers}/anthropic/key_update_params.py +0 -0
  132. /gradientai/types/{providers → model_providers}/anthropic/key_update_response.py +0 -0
  133. /gradientai/types/{providers → model_providers}/openai/__init__.py +0 -0
  134. /gradientai/types/{providers → model_providers}/openai/key_create_params.py +0 -0
  135. /gradientai/types/{providers → model_providers}/openai/key_create_response.py +0 -0
  136. /gradientai/types/{providers → model_providers}/openai/key_delete_response.py +0 -0
  137. /gradientai/types/{providers → model_providers}/openai/key_list_params.py +0 -0
  138. /gradientai/types/{providers → model_providers}/openai/key_retrieve_agents_params.py +0 -0
  139. /gradientai/types/{providers → model_providers}/openai/key_retrieve_response.py +0 -0
  140. /gradientai/types/{providers → model_providers}/openai/key_update_params.py +0 -0
  141. /gradientai/types/{providers → model_providers}/openai/key_update_response.py +0 -0
  142. /gradientai/types/{agents → shared}/api_links.py +0 -0
  143. /gradientai/types/{agents → shared}/api_meta.py +0 -0
@@ -15,13 +15,18 @@ from ...._response import (
15
15
  async_to_streamed_response_wrapper,
16
16
  )
17
17
  from ...._base_client import make_request_options
18
- from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params
19
- from ....types.providers.anthropic.key_list_response import KeyListResponse
20
- from ....types.providers.anthropic.key_create_response import KeyCreateResponse
21
- from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse
22
- from ....types.providers.anthropic.key_update_response import KeyUpdateResponse
23
- from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse
24
- from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse
18
+ from ....types.model_providers.anthropic import (
19
+ key_list_params,
20
+ key_create_params,
21
+ key_update_params,
22
+ key_list_agents_params,
23
+ )
24
+ from ....types.model_providers.anthropic.key_list_response import KeyListResponse
25
+ from ....types.model_providers.anthropic.key_create_response import KeyCreateResponse
26
+ from ....types.model_providers.anthropic.key_delete_response import KeyDeleteResponse
27
+ from ....types.model_providers.anthropic.key_update_response import KeyUpdateResponse
28
+ from ....types.model_providers.anthropic.key_retrieve_response import KeyRetrieveResponse
29
+ from ....types.model_providers.anthropic.key_list_agents_response import KeyListAgentsResponse
25
30
 
26
31
  __all__ = ["KeysResource", "AsyncKeysResource"]
27
32
 
@@ -21,10 +21,10 @@ from .anthropic.anthropic import (
21
21
  AsyncAnthropicResourceWithStreamingResponse,
22
22
  )
23
23
 
24
- __all__ = ["ProvidersResource", "AsyncProvidersResource"]
24
+ __all__ = ["ModelProvidersResource", "AsyncModelProvidersResource"]
25
25
 
26
26
 
27
- class ProvidersResource(SyncAPIResource):
27
+ class ModelProvidersResource(SyncAPIResource):
28
28
  @cached_property
29
29
  def anthropic(self) -> AnthropicResource:
30
30
  return AnthropicResource(self._client)
@@ -34,26 +34,26 @@ class ProvidersResource(SyncAPIResource):
34
34
  return OpenAIResource(self._client)
35
35
 
36
36
  @cached_property
37
- def with_raw_response(self) -> ProvidersResourceWithRawResponse:
37
+ def with_raw_response(self) -> ModelProvidersResourceWithRawResponse:
38
38
  """
39
39
  This property can be used as a prefix for any HTTP method call to return
40
40
  the raw response object instead of the parsed content.
41
41
 
42
42
  For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
43
43
  """
44
- return ProvidersResourceWithRawResponse(self)
44
+ return ModelProvidersResourceWithRawResponse(self)
45
45
 
46
46
  @cached_property
47
- def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
47
+ def with_streaming_response(self) -> ModelProvidersResourceWithStreamingResponse:
48
48
  """
49
49
  An alternative to `.with_raw_response` that doesn't eagerly read the response body.
50
50
 
51
51
  For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
52
52
  """
53
- return ProvidersResourceWithStreamingResponse(self)
53
+ return ModelProvidersResourceWithStreamingResponse(self)
54
54
 
55
55
 
56
- class AsyncProvidersResource(AsyncAPIResource):
56
+ class AsyncModelProvidersResource(AsyncAPIResource):
57
57
  @cached_property
58
58
  def anthropic(self) -> AsyncAnthropicResource:
59
59
  return AsyncAnthropicResource(self._client)
@@ -63,72 +63,72 @@ class AsyncProvidersResource(AsyncAPIResource):
63
63
  return AsyncOpenAIResource(self._client)
64
64
 
65
65
  @cached_property
66
- def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
66
+ def with_raw_response(self) -> AsyncModelProvidersResourceWithRawResponse:
67
67
  """
68
68
  This property can be used as a prefix for any HTTP method call to return
69
69
  the raw response object instead of the parsed content.
70
70
 
71
71
  For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
72
72
  """
73
- return AsyncProvidersResourceWithRawResponse(self)
73
+ return AsyncModelProvidersResourceWithRawResponse(self)
74
74
 
75
75
  @cached_property
76
- def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
76
+ def with_streaming_response(self) -> AsyncModelProvidersResourceWithStreamingResponse:
77
77
  """
78
78
  An alternative to `.with_raw_response` that doesn't eagerly read the response body.
79
79
 
80
80
  For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
81
81
  """
82
- return AsyncProvidersResourceWithStreamingResponse(self)
82
+ return AsyncModelProvidersResourceWithStreamingResponse(self)
83
83
 
84
84
 
85
- class ProvidersResourceWithRawResponse:
86
- def __init__(self, providers: ProvidersResource) -> None:
87
- self._providers = providers
85
+ class ModelProvidersResourceWithRawResponse:
86
+ def __init__(self, model_providers: ModelProvidersResource) -> None:
87
+ self._model_providers = model_providers
88
88
 
89
89
  @cached_property
90
90
  def anthropic(self) -> AnthropicResourceWithRawResponse:
91
- return AnthropicResourceWithRawResponse(self._providers.anthropic)
91
+ return AnthropicResourceWithRawResponse(self._model_providers.anthropic)
92
92
 
93
93
  @cached_property
94
94
  def openai(self) -> OpenAIResourceWithRawResponse:
95
- return OpenAIResourceWithRawResponse(self._providers.openai)
95
+ return OpenAIResourceWithRawResponse(self._model_providers.openai)
96
96
 
97
97
 
98
- class AsyncProvidersResourceWithRawResponse:
99
- def __init__(self, providers: AsyncProvidersResource) -> None:
100
- self._providers = providers
98
+ class AsyncModelProvidersResourceWithRawResponse:
99
+ def __init__(self, model_providers: AsyncModelProvidersResource) -> None:
100
+ self._model_providers = model_providers
101
101
 
102
102
  @cached_property
103
103
  def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
104
- return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
104
+ return AsyncAnthropicResourceWithRawResponse(self._model_providers.anthropic)
105
105
 
106
106
  @cached_property
107
107
  def openai(self) -> AsyncOpenAIResourceWithRawResponse:
108
- return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
108
+ return AsyncOpenAIResourceWithRawResponse(self._model_providers.openai)
109
109
 
110
110
 
111
- class ProvidersResourceWithStreamingResponse:
112
- def __init__(self, providers: ProvidersResource) -> None:
113
- self._providers = providers
111
+ class ModelProvidersResourceWithStreamingResponse:
112
+ def __init__(self, model_providers: ModelProvidersResource) -> None:
113
+ self._model_providers = model_providers
114
114
 
115
115
  @cached_property
116
116
  def anthropic(self) -> AnthropicResourceWithStreamingResponse:
117
- return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
117
+ return AnthropicResourceWithStreamingResponse(self._model_providers.anthropic)
118
118
 
119
119
  @cached_property
120
120
  def openai(self) -> OpenAIResourceWithStreamingResponse:
121
- return OpenAIResourceWithStreamingResponse(self._providers.openai)
121
+ return OpenAIResourceWithStreamingResponse(self._model_providers.openai)
122
122
 
123
123
 
124
- class AsyncProvidersResourceWithStreamingResponse:
125
- def __init__(self, providers: AsyncProvidersResource) -> None:
126
- self._providers = providers
124
+ class AsyncModelProvidersResourceWithStreamingResponse:
125
+ def __init__(self, model_providers: AsyncModelProvidersResource) -> None:
126
+ self._model_providers = model_providers
127
127
 
128
128
  @cached_property
129
129
  def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
130
- return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
130
+ return AsyncAnthropicResourceWithStreamingResponse(self._model_providers.anthropic)
131
131
 
132
132
  @cached_property
133
133
  def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
134
- return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
134
+ return AsyncOpenAIResourceWithStreamingResponse(self._model_providers.openai)
@@ -15,13 +15,18 @@ from ...._response import (
15
15
  async_to_streamed_response_wrapper,
16
16
  )
17
17
  from ...._base_client import make_request_options
18
- from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params
19
- from ....types.providers.openai.key_list_response import KeyListResponse
20
- from ....types.providers.openai.key_create_response import KeyCreateResponse
21
- from ....types.providers.openai.key_delete_response import KeyDeleteResponse
22
- from ....types.providers.openai.key_update_response import KeyUpdateResponse
23
- from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse
24
- from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse
18
+ from ....types.model_providers.openai import (
19
+ key_list_params,
20
+ key_create_params,
21
+ key_update_params,
22
+ key_retrieve_agents_params,
23
+ )
24
+ from ....types.model_providers.openai.key_list_response import KeyListResponse
25
+ from ....types.model_providers.openai.key_create_response import KeyCreateResponse
26
+ from ....types.model_providers.openai.key_delete_response import KeyDeleteResponse
27
+ from ....types.model_providers.openai.key_update_response import KeyUpdateResponse
28
+ from ....types.model_providers.openai.key_retrieve_response import KeyRetrieveResponse
29
+ from ....types.model_providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse
25
30
 
26
31
  __all__ = ["KeysResource", "AsyncKeysResource"]
27
32
 
@@ -2,14 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import List
6
- from typing_extensions import Literal
7
-
8
5
  import httpx
9
6
 
10
- from ..types import model_list_params
11
7
  from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
12
- from .._utils import maybe_transform, async_maybe_transform
13
8
  from .._compat import cached_property
14
9
  from .._resource import SyncAPIResource, AsyncAPIResource
15
10
  from .._response import (
@@ -18,6 +13,7 @@ from .._response import (
18
13
  async_to_raw_response_wrapper,
19
14
  async_to_streamed_response_wrapper,
20
15
  )
16
+ from ..types.model import Model
21
17
  from .._base_client import make_request_options
22
18
  from ..types.model_list_response import ModelListResponse
23
19
 
@@ -44,52 +40,22 @@ class ModelsResource(SyncAPIResource):
44
40
  """
45
41
  return ModelsResourceWithStreamingResponse(self)
46
42
 
47
- def list(
43
+ def retrieve(
48
44
  self,
45
+ model: str,
49
46
  *,
50
- page: int | NotGiven = NOT_GIVEN,
51
- per_page: int | NotGiven = NOT_GIVEN,
52
- public_only: bool | NotGiven = NOT_GIVEN,
53
- usecases: List[
54
- Literal[
55
- "MODEL_USECASE_UNKNOWN",
56
- "MODEL_USECASE_AGENT",
57
- "MODEL_USECASE_FINETUNED",
58
- "MODEL_USECASE_KNOWLEDGEBASE",
59
- "MODEL_USECASE_GUARDRAIL",
60
- "MODEL_USECASE_REASONING",
61
- "MODEL_USECASE_SERVERLESS",
62
- ]
63
- ]
64
- | NotGiven = NOT_GIVEN,
65
47
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
66
48
  # The extra values given here take precedence over values defined on the client or passed to this method.
67
49
  extra_headers: Headers | None = None,
68
50
  extra_query: Query | None = None,
69
51
  extra_body: Body | None = None,
70
52
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
71
- ) -> ModelListResponse:
53
+ ) -> Model:
72
54
  """
73
- To list all models, send a GET request to `/v2/gen-ai/models`.
55
+ Retrieves a model instance, providing basic information about the model such as
56
+ the owner and permissioning.
74
57
 
75
58
  Args:
76
- page: page number.
77
-
78
- per_page: items per page.
79
-
80
- public_only: only include models that are publicly available.
81
-
82
- usecases: include only models defined for the listed usecases.
83
-
84
- - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
85
- - MODEL_USECASE_AGENT: The model maybe used in an agent
86
- - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
87
- - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
88
- (embedding models)
89
- - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
90
- - MODEL_USECASE_REASONING: The model usecase for reasoning
91
- - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
92
-
93
59
  extra_headers: Send extra headers
94
60
 
95
61
  extra_query: Add additional query parameters to the request
@@ -98,24 +64,36 @@ class ModelsResource(SyncAPIResource):
98
64
 
99
65
  timeout: Override the client-level default timeout for this request, in seconds
100
66
  """
67
+ if not model:
68
+ raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
101
69
  return self._get(
102
- "/v2/gen-ai/models"
70
+ f"/models/{model}"
103
71
  if self._client._base_url_overridden
104
- else "https://api.digitalocean.com/v2/gen-ai/models",
72
+ else f"https://inference.do-ai.run/v1/models/{model}",
105
73
  options=make_request_options(
106
- extra_headers=extra_headers,
107
- extra_query=extra_query,
108
- extra_body=extra_body,
109
- timeout=timeout,
110
- query=maybe_transform(
111
- {
112
- "page": page,
113
- "per_page": per_page,
114
- "public_only": public_only,
115
- "usecases": usecases,
116
- },
117
- model_list_params.ModelListParams,
118
- ),
74
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
75
+ ),
76
+ cast_to=Model,
77
+ )
78
+
79
+ def list(
80
+ self,
81
+ *,
82
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
83
+ # The extra values given here take precedence over values defined on the client or passed to this method.
84
+ extra_headers: Headers | None = None,
85
+ extra_query: Query | None = None,
86
+ extra_body: Body | None = None,
87
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
88
+ ) -> ModelListResponse:
89
+ """
90
+ Lists the currently available models, and provides basic information about each
91
+ one such as the owner and availability.
92
+ """
93
+ return self._get(
94
+ "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
95
+ options=make_request_options(
96
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
119
97
  ),
120
98
  cast_to=ModelListResponse,
121
99
  )
@@ -141,52 +119,22 @@ class AsyncModelsResource(AsyncAPIResource):
141
119
  """
142
120
  return AsyncModelsResourceWithStreamingResponse(self)
143
121
 
144
- async def list(
122
+ async def retrieve(
145
123
  self,
124
+ model: str,
146
125
  *,
147
- page: int | NotGiven = NOT_GIVEN,
148
- per_page: int | NotGiven = NOT_GIVEN,
149
- public_only: bool | NotGiven = NOT_GIVEN,
150
- usecases: List[
151
- Literal[
152
- "MODEL_USECASE_UNKNOWN",
153
- "MODEL_USECASE_AGENT",
154
- "MODEL_USECASE_FINETUNED",
155
- "MODEL_USECASE_KNOWLEDGEBASE",
156
- "MODEL_USECASE_GUARDRAIL",
157
- "MODEL_USECASE_REASONING",
158
- "MODEL_USECASE_SERVERLESS",
159
- ]
160
- ]
161
- | NotGiven = NOT_GIVEN,
162
126
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
163
127
  # The extra values given here take precedence over values defined on the client or passed to this method.
164
128
  extra_headers: Headers | None = None,
165
129
  extra_query: Query | None = None,
166
130
  extra_body: Body | None = None,
167
131
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
168
- ) -> ModelListResponse:
132
+ ) -> Model:
169
133
  """
170
- To list all models, send a GET request to `/v2/gen-ai/models`.
134
+ Retrieves a model instance, providing basic information about the model such as
135
+ the owner and permissioning.
171
136
 
172
137
  Args:
173
- page: page number.
174
-
175
- per_page: items per page.
176
-
177
- public_only: only include models that are publicly available.
178
-
179
- usecases: include only models defined for the listed usecases.
180
-
181
- - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
182
- - MODEL_USECASE_AGENT: The model maybe used in an agent
183
- - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
184
- - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
185
- (embedding models)
186
- - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
187
- - MODEL_USECASE_REASONING: The model usecase for reasoning
188
- - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
189
-
190
138
  extra_headers: Send extra headers
191
139
 
192
140
  extra_query: Add additional query parameters to the request
@@ -195,24 +143,36 @@ class AsyncModelsResource(AsyncAPIResource):
195
143
 
196
144
  timeout: Override the client-level default timeout for this request, in seconds
197
145
  """
146
+ if not model:
147
+ raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
198
148
  return await self._get(
199
- "/v2/gen-ai/models"
149
+ f"/models/{model}"
200
150
  if self._client._base_url_overridden
201
- else "https://api.digitalocean.com/v2/gen-ai/models",
151
+ else f"https://inference.do-ai.run/v1/models/{model}",
152
+ options=make_request_options(
153
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
154
+ ),
155
+ cast_to=Model,
156
+ )
157
+
158
+ async def list(
159
+ self,
160
+ *,
161
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
162
+ # The extra values given here take precedence over values defined on the client or passed to this method.
163
+ extra_headers: Headers | None = None,
164
+ extra_query: Query | None = None,
165
+ extra_body: Body | None = None,
166
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
167
+ ) -> ModelListResponse:
168
+ """
169
+ Lists the currently available models, and provides basic information about each
170
+ one such as the owner and availability.
171
+ """
172
+ return await self._get(
173
+ "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
202
174
  options=make_request_options(
203
- extra_headers=extra_headers,
204
- extra_query=extra_query,
205
- extra_body=extra_body,
206
- timeout=timeout,
207
- query=await async_maybe_transform(
208
- {
209
- "page": page,
210
- "per_page": per_page,
211
- "public_only": public_only,
212
- "usecases": usecases,
213
- },
214
- model_list_params.ModelListParams,
215
- ),
175
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
216
176
  ),
217
177
  cast_to=ModelListResponse,
218
178
  )
@@ -222,6 +182,9 @@ class ModelsResourceWithRawResponse:
222
182
  def __init__(self, models: ModelsResource) -> None:
223
183
  self._models = models
224
184
 
185
+ self.retrieve = to_raw_response_wrapper(
186
+ models.retrieve,
187
+ )
225
188
  self.list = to_raw_response_wrapper(
226
189
  models.list,
227
190
  )
@@ -231,6 +194,9 @@ class AsyncModelsResourceWithRawResponse:
231
194
  def __init__(self, models: AsyncModelsResource) -> None:
232
195
  self._models = models
233
196
 
197
+ self.retrieve = async_to_raw_response_wrapper(
198
+ models.retrieve,
199
+ )
234
200
  self.list = async_to_raw_response_wrapper(
235
201
  models.list,
236
202
  )
@@ -240,6 +206,9 @@ class ModelsResourceWithStreamingResponse:
240
206
  def __init__(self, models: ModelsResource) -> None:
241
207
  self._models = models
242
208
 
209
+ self.retrieve = to_streamed_response_wrapper(
210
+ models.retrieve,
211
+ )
243
212
  self.list = to_streamed_response_wrapper(
244
213
  models.list,
245
214
  )
@@ -249,6 +218,9 @@ class AsyncModelsResourceWithStreamingResponse:
249
218
  def __init__(self, models: AsyncModelsResource) -> None:
250
219
  self._models = models
251
220
 
221
+ self.retrieve = async_to_streamed_response_wrapper(
222
+ models.retrieve,
223
+ )
252
224
  self.list = async_to_streamed_response_wrapper(
253
225
  models.list,
254
226
  )
@@ -0,0 +1,195 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ import httpx
6
+
7
+ from ..types import region_list_params
8
+ from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
9
+ from .._utils import maybe_transform, async_maybe_transform
10
+ from .._compat import cached_property
11
+ from .._resource import SyncAPIResource, AsyncAPIResource
12
+ from .._response import (
13
+ to_raw_response_wrapper,
14
+ to_streamed_response_wrapper,
15
+ async_to_raw_response_wrapper,
16
+ async_to_streamed_response_wrapper,
17
+ )
18
+ from .._base_client import make_request_options
19
+ from ..types.region_list_response import RegionListResponse
20
+
21
+ __all__ = ["RegionsResource", "AsyncRegionsResource"]
22
+
23
+
24
+ class RegionsResource(SyncAPIResource):
25
+ @cached_property
26
+ def with_raw_response(self) -> RegionsResourceWithRawResponse:
27
+ """
28
+ This property can be used as a prefix for any HTTP method call to return
29
+ the raw response object instead of the parsed content.
30
+
31
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
32
+ """
33
+ return RegionsResourceWithRawResponse(self)
34
+
35
+ @cached_property
36
+ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse:
37
+ """
38
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
39
+
40
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
41
+ """
42
+ return RegionsResourceWithStreamingResponse(self)
43
+
44
+ def list(
45
+ self,
46
+ *,
47
+ serves_batch: bool | NotGiven = NOT_GIVEN,
48
+ serves_inference: bool | NotGiven = NOT_GIVEN,
49
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
50
+ # The extra values given here take precedence over values defined on the client or passed to this method.
51
+ extra_headers: Headers | None = None,
52
+ extra_query: Query | None = None,
53
+ extra_body: Body | None = None,
54
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
55
+ ) -> RegionListResponse:
56
+ """
57
+ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
58
+
59
+ Args:
60
+ serves_batch: include datacenters that are capable of running batch jobs.
61
+
62
+ serves_inference: include datacenters that serve inference.
63
+
64
+ extra_headers: Send extra headers
65
+
66
+ extra_query: Add additional query parameters to the request
67
+
68
+ extra_body: Add additional JSON properties to the request
69
+
70
+ timeout: Override the client-level default timeout for this request, in seconds
71
+ """
72
+ return self._get(
73
+ "/v2/gen-ai/regions"
74
+ if self._client._base_url_overridden
75
+ else "https://api.digitalocean.com/v2/gen-ai/regions",
76
+ options=make_request_options(
77
+ extra_headers=extra_headers,
78
+ extra_query=extra_query,
79
+ extra_body=extra_body,
80
+ timeout=timeout,
81
+ query=maybe_transform(
82
+ {
83
+ "serves_batch": serves_batch,
84
+ "serves_inference": serves_inference,
85
+ },
86
+ region_list_params.RegionListParams,
87
+ ),
88
+ ),
89
+ cast_to=RegionListResponse,
90
+ )
91
+
92
+
93
+ class AsyncRegionsResource(AsyncAPIResource):
94
+ @cached_property
95
+ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse:
96
+ """
97
+ This property can be used as a prefix for any HTTP method call to return
98
+ the raw response object instead of the parsed content.
99
+
100
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
101
+ """
102
+ return AsyncRegionsResourceWithRawResponse(self)
103
+
104
+ @cached_property
105
+ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse:
106
+ """
107
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
108
+
109
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
110
+ """
111
+ return AsyncRegionsResourceWithStreamingResponse(self)
112
+
113
+ async def list(
114
+ self,
115
+ *,
116
+ serves_batch: bool | NotGiven = NOT_GIVEN,
117
+ serves_inference: bool | NotGiven = NOT_GIVEN,
118
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
119
+ # The extra values given here take precedence over values defined on the client or passed to this method.
120
+ extra_headers: Headers | None = None,
121
+ extra_query: Query | None = None,
122
+ extra_body: Body | None = None,
123
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
124
+ ) -> RegionListResponse:
125
+ """
126
+ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
127
+
128
+ Args:
129
+ serves_batch: include datacenters that are capable of running batch jobs.
130
+
131
+ serves_inference: include datacenters that serve inference.
132
+
133
+ extra_headers: Send extra headers
134
+
135
+ extra_query: Add additional query parameters to the request
136
+
137
+ extra_body: Add additional JSON properties to the request
138
+
139
+ timeout: Override the client-level default timeout for this request, in seconds
140
+ """
141
+ return await self._get(
142
+ "/v2/gen-ai/regions"
143
+ if self._client._base_url_overridden
144
+ else "https://api.digitalocean.com/v2/gen-ai/regions",
145
+ options=make_request_options(
146
+ extra_headers=extra_headers,
147
+ extra_query=extra_query,
148
+ extra_body=extra_body,
149
+ timeout=timeout,
150
+ query=await async_maybe_transform(
151
+ {
152
+ "serves_batch": serves_batch,
153
+ "serves_inference": serves_inference,
154
+ },
155
+ region_list_params.RegionListParams,
156
+ ),
157
+ ),
158
+ cast_to=RegionListResponse,
159
+ )
160
+
161
+
162
+ class RegionsResourceWithRawResponse:
163
+ def __init__(self, regions: RegionsResource) -> None:
164
+ self._regions = regions
165
+
166
+ self.list = to_raw_response_wrapper(
167
+ regions.list,
168
+ )
169
+
170
+
171
+ class AsyncRegionsResourceWithRawResponse:
172
+ def __init__(self, regions: AsyncRegionsResource) -> None:
173
+ self._regions = regions
174
+
175
+ self.list = async_to_raw_response_wrapper(
176
+ regions.list,
177
+ )
178
+
179
+
180
+ class RegionsResourceWithStreamingResponse:
181
+ def __init__(self, regions: RegionsResource) -> None:
182
+ self._regions = regions
183
+
184
+ self.list = to_streamed_response_wrapper(
185
+ regions.list,
186
+ )
187
+
188
+
189
+ class AsyncRegionsResourceWithStreamingResponse:
190
+ def __init__(self, regions: AsyncRegionsResource) -> None:
191
+ self._regions = regions
192
+
193
+ self.list = async_to_streamed_response_wrapper(
194
+ regions.list,
195
+ )