scale-gp-beta 0.1.0a28__py3-none-any.whl → 0.1.0a30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. scale_gp_beta/_base_client.py +4 -1
  2. scale_gp_beta/_client.py +9 -0
  3. scale_gp_beta/_files.py +4 -4
  4. scale_gp_beta/_models.py +24 -3
  5. scale_gp_beta/_version.py +1 -1
  6. scale_gp_beta/lib/CONTRIBUTING.MD +53 -0
  7. scale_gp_beta/lib/tracing/integrations/openai/openai_span_type_map.py +3 -3
  8. scale_gp_beta/lib/tracing/span.py +8 -7
  9. scale_gp_beta/lib/tracing/trace.py +7 -5
  10. scale_gp_beta/lib/tracing/trace_queue_manager.py +14 -0
  11. scale_gp_beta/lib/tracing/tracing.py +7 -5
  12. scale_gp_beta/lib/tracing/types.py +1 -39
  13. scale_gp_beta/resources/__init__.py +14 -0
  14. scale_gp_beta/resources/chat/completions.py +4 -0
  15. scale_gp_beta/resources/responses.py +314 -0
  16. scale_gp_beta/resources/spans.py +28 -144
  17. scale_gp_beta/types/__init__.py +19 -0
  18. scale_gp_beta/types/chat/chat_completion.py +61 -6
  19. scale_gp_beta/types/chat/chat_completion_chunk.py +17 -1
  20. scale_gp_beta/types/chat/completion_models_params.py +2 -0
  21. scale_gp_beta/types/chat/model_definition.py +6 -0
  22. scale_gp_beta/types/completion.py +8 -0
  23. scale_gp_beta/types/container.py +0 -6
  24. scale_gp_beta/types/dataset.py +3 -1
  25. scale_gp_beta/types/dataset_item.py +3 -1
  26. scale_gp_beta/types/evaluation.py +3 -7
  27. scale_gp_beta/types/evaluation_item.py +3 -1
  28. scale_gp_beta/types/evaluation_task.py +31 -55
  29. scale_gp_beta/types/evaluation_task_param.py +28 -1
  30. scale_gp_beta/types/file.py +3 -1
  31. scale_gp_beta/types/inference_model.py +3 -0
  32. scale_gp_beta/types/question.py +11 -10
  33. scale_gp_beta/types/response.py +2852 -0
  34. scale_gp_beta/types/response_create_params.py +817 -0
  35. scale_gp_beta/types/response_create_response.py +20891 -0
  36. scale_gp_beta/types/shared/__init__.py +3 -0
  37. scale_gp_beta/types/shared/identity.py +16 -0
  38. scale_gp_beta/types/span.py +9 -33
  39. scale_gp_beta/types/span_batch_params.py +6 -30
  40. scale_gp_beta/types/span_create_params.py +6 -30
  41. scale_gp_beta/types/span_search_params.py +8 -37
  42. scale_gp_beta/types/span_status.py +7 -0
  43. scale_gp_beta/types/span_type.py +33 -0
  44. scale_gp_beta/types/span_update_params.py +3 -2
  45. scale_gp_beta/types/span_upsert_batch_params.py +6 -30
  46. {scale_gp_beta-0.1.0a28.dist-info → scale_gp_beta-0.1.0a30.dist-info}/METADATA +2 -3
  47. {scale_gp_beta-0.1.0a28.dist-info → scale_gp_beta-0.1.0a30.dist-info}/RECORD +49 -40
  48. {scale_gp_beta-0.1.0a28.dist-info → scale_gp_beta-0.1.0a30.dist-info}/WHEEL +0 -0
  49. {scale_gp_beta-0.1.0a28.dist-info → scale_gp_beta-0.1.0a30.dist-info}/licenses/LICENSE +0 -0
@@ -532,7 +532,10 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
532
532
  is_body_allowed = options.method.lower() != "get"
533
533
 
534
534
  if is_body_allowed:
535
- kwargs["json"] = json_data if is_given(json_data) else None
535
+ if isinstance(json_data, bytes):
536
+ kwargs["content"] = json_data
537
+ else:
538
+ kwargs["json"] = json_data if is_given(json_data) else None
536
539
  kwargs["files"] = files
537
540
  else:
538
541
  headers.pop("Content-Type", None)
scale_gp_beta/_client.py CHANGED
@@ -27,6 +27,7 @@ from .resources import (
27
27
  datasets,
28
28
  inference,
29
29
  questions,
30
+ responses,
30
31
  completions,
31
32
  evaluations,
32
33
  dataset_items,
@@ -61,6 +62,7 @@ ENVIRONMENTS: Dict[str, str] = {
61
62
 
62
63
 
63
64
  class SGPClient(SyncAPIClient):
65
+ responses: responses.ResponsesResource
64
66
  completions: completions.CompletionsResource
65
67
  chat: chat.ChatResource
66
68
  inference: inference.InferenceResource
@@ -165,6 +167,7 @@ class SGPClient(SyncAPIClient):
165
167
  _strict_response_validation=_strict_response_validation,
166
168
  )
167
169
 
170
+ self.responses = responses.ResponsesResource(self)
168
171
  self.completions = completions.CompletionsResource(self)
169
172
  self.chat = chat.ChatResource(self)
170
173
  self.inference = inference.InferenceResource(self)
@@ -290,6 +293,7 @@ class SGPClient(SyncAPIClient):
290
293
 
291
294
 
292
295
  class AsyncSGPClient(AsyncAPIClient):
296
+ responses: responses.AsyncResponsesResource
293
297
  completions: completions.AsyncCompletionsResource
294
298
  chat: chat.AsyncChatResource
295
299
  inference: inference.AsyncInferenceResource
@@ -394,6 +398,7 @@ class AsyncSGPClient(AsyncAPIClient):
394
398
  _strict_response_validation=_strict_response_validation,
395
399
  )
396
400
 
401
+ self.responses = responses.AsyncResponsesResource(self)
397
402
  self.completions = completions.AsyncCompletionsResource(self)
398
403
  self.chat = chat.AsyncChatResource(self)
399
404
  self.inference = inference.AsyncInferenceResource(self)
@@ -520,6 +525,7 @@ class AsyncSGPClient(AsyncAPIClient):
520
525
 
521
526
  class SGPClientWithRawResponse:
522
527
  def __init__(self, client: SGPClient) -> None:
528
+ self.responses = responses.ResponsesResourceWithRawResponse(client.responses)
523
529
  self.completions = completions.CompletionsResourceWithRawResponse(client.completions)
524
530
  self.chat = chat.ChatResourceWithRawResponse(client.chat)
525
531
  self.inference = inference.InferenceResourceWithRawResponse(client.inference)
@@ -535,6 +541,7 @@ class SGPClientWithRawResponse:
535
541
 
536
542
  class AsyncSGPClientWithRawResponse:
537
543
  def __init__(self, client: AsyncSGPClient) -> None:
544
+ self.responses = responses.AsyncResponsesResourceWithRawResponse(client.responses)
538
545
  self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions)
539
546
  self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
540
547
  self.inference = inference.AsyncInferenceResourceWithRawResponse(client.inference)
@@ -550,6 +557,7 @@ class AsyncSGPClientWithRawResponse:
550
557
 
551
558
  class SGPClientWithStreamedResponse:
552
559
  def __init__(self, client: SGPClient) -> None:
560
+ self.responses = responses.ResponsesResourceWithStreamingResponse(client.responses)
553
561
  self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions)
554
562
  self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
555
563
  self.inference = inference.InferenceResourceWithStreamingResponse(client.inference)
@@ -565,6 +573,7 @@ class SGPClientWithStreamedResponse:
565
573
 
566
574
  class AsyncSGPClientWithStreamedResponse:
567
575
  def __init__(self, client: AsyncSGPClient) -> None:
576
+ self.responses = responses.AsyncResponsesResourceWithStreamingResponse(client.responses)
568
577
  self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions)
569
578
  self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
570
579
  self.inference = inference.AsyncInferenceResourceWithStreamingResponse(client.inference)
scale_gp_beta/_files.py CHANGED
@@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes:
69
69
  return file
70
70
 
71
71
  if is_tuple_t(file):
72
- return (file[0], _read_file_content(file[1]), *file[2:])
72
+ return (file[0], read_file_content(file[1]), *file[2:])
73
73
 
74
74
  raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
75
75
 
76
76
 
77
- def _read_file_content(file: FileContent) -> HttpxFileContent:
77
+ def read_file_content(file: FileContent) -> HttpxFileContent:
78
78
  if isinstance(file, os.PathLike):
79
79
  return pathlib.Path(file).read_bytes()
80
80
  return file
@@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
111
111
  return file
112
112
 
113
113
  if is_tuple_t(file):
114
- return (file[0], await _async_read_file_content(file[1]), *file[2:])
114
+ return (file[0], await async_read_file_content(file[1]), *file[2:])
115
115
 
116
116
  raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
117
117
 
118
118
 
119
- async def _async_read_file_content(file: FileContent) -> HttpxFileContent:
119
+ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
120
120
  if isinstance(file, os.PathLike):
121
121
  return await anyio.Path(file).read_bytes()
122
122
 
scale_gp_beta/_models.py CHANGED
@@ -208,14 +208,18 @@ class BaseModel(pydantic.BaseModel):
208
208
  else:
209
209
  fields_values[name] = field_get_default(field)
210
210
 
211
+ extra_field_type = _get_extra_fields_type(__cls)
212
+
211
213
  _extra = {}
212
214
  for key, value in values.items():
213
215
  if key not in model_fields:
216
+ parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
217
+
214
218
  if PYDANTIC_V2:
215
- _extra[key] = value
219
+ _extra[key] = parsed
216
220
  else:
217
221
  _fields_set.add(key)
218
- fields_values[key] = value
222
+ fields_values[key] = parsed
219
223
 
220
224
  object.__setattr__(m, "__dict__", fields_values)
221
225
 
@@ -370,6 +374,23 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
370
374
  return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None))
371
375
 
372
376
 
377
+ def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
378
+ if not PYDANTIC_V2:
379
+ # TODO
380
+ return None
381
+
382
+ schema = cls.__pydantic_core_schema__
383
+ if schema["type"] == "model":
384
+ fields = schema["schema"]
385
+ if fields["type"] == "model-fields":
386
+ extras = fields.get("extras_schema")
387
+ if extras and "cls" in extras:
388
+ # mypy can't narrow the type
389
+ return extras["cls"] # type: ignore[no-any-return]
390
+
391
+ return None
392
+
393
+
373
394
  def is_basemodel(type_: type) -> bool:
374
395
  """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`"""
375
396
  if is_union(type_):
@@ -439,7 +460,7 @@ def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]
439
460
  type_ = type_.__value__ # type: ignore[unreachable]
440
461
 
441
462
  # unwrap `Annotated[T, ...]` -> `T`
442
- if metadata is not None:
463
+ if metadata is not None and len(metadata) > 0:
443
464
  meta: tuple[Any, ...] = tuple(metadata)
444
465
  elif is_annotated_type(type_):
445
466
  meta = get_args(type_)[1:]
scale_gp_beta/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "scale_gp_beta"
4
- __version__ = "0.1.0-alpha.28" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.30" # x-release-please-version
@@ -0,0 +1,53 @@
1
+ # Custom Code Patch
2
+ You can in theory add custom code patches to anywhere in the repo but at the risk of encountering many merge conflicts with Stainless into the future.
3
+ Stainless will never use the `/lib` and `/examples` directories. When possible try to only modify these directories.
4
+ If you have to add custom code elsewhere, please keep the footprint small and create a library for most of the logic.
5
+
6
+
7
+ For information on custom code patching with Stainless see [here](https://www.stainless.com/docs/guides/add-custom-code).
8
+
9
+ # Process for Adding Features
10
+ Checkout the `next` branch and pull, then create a branch from `next`.
11
+
12
+ > **_NOTE:_** Stainless uses next to "queue up" updates to the SDK.
13
+ >
14
+ > Stainless will update next with their own logic updates to the SDK along with any changes in the OpenAPI spec and changes to the Stainless
15
+ config on their SaaS platform.
16
+
17
+ Make any code changes you need. Ensuring all the tests for the library are passing.
18
+
19
+ There is strict linting in this repo. Use the following commands in order.
20
+
21
+ ```bash
22
+ rye lint --fix
23
+ ```
24
+
25
+ ```bash
26
+ rye run lint | grep /specific_file.py
27
+ ```
28
+
29
+ `rye run lint` will not work if there are errors with `rye lint --fix`.
30
+ I am unsure why but I get many errors which are ignorable in the rest of the repo when running `rye run lint` so I usually use it
31
+ with grep to target the file I am developing.
32
+
33
+ > **_NOTE:_** The strict linting requires all types to be strictly typed. This can be a pain but is worth considering before developing any new solution.
34
+ > Try and avoid using ignore commands when possible, but sometimes it is unavoidable (see OpenAI tracing Plugin).
35
+
36
+ When commiting, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) spec. Usually this is something like:
37
+
38
+ ```bash
39
+ git commit -m "feat: Added OpenAI tracing processor"
40
+ ```
41
+ This allow Stainless to update the release doc with more useful info.
42
+
43
+ **When creating a PR, you must manually change the destination from `main` to `next`.**
44
+
45
+ Once merged, Stainless should pick up the changes and update or create a new PR for `main`.
46
+ You will need to accept changes on this PR, Stainless should then auto merge. Note on occasion I have had to manually merge.
47
+ There was no consequence to this.
48
+
49
+ These PRs Stainless makes have 4 types of changes, all are merged to main via these automatic PR requests:
50
+ 1. Custom Code Changes
51
+ 2. Stainless SDK logic changes
52
+ 3. Changes to the Stainless Config in the SaaS platform
53
+ 4. Changes to the OpenAPI schema
@@ -1,8 +1,8 @@
1
1
  from typing import Dict, Optional
2
2
 
3
- from scale_gp_beta.lib.tracing.types import SpanTypeLiterals
3
+ from scale_gp_beta.types import SpanType
4
4
 
5
- OPENAI_SPAN_TYPE_MAP: Dict[str, SpanTypeLiterals] = {
5
+ OPENAI_SPAN_TYPE_MAP: Dict[str, SpanType] = {
6
6
  "generation": "COMPLETION",
7
7
  "agent": "AGENT_WORKFLOW",
8
8
  "function": "CODE_EXECUTION",
@@ -17,7 +17,7 @@ OPENAI_SPAN_TYPE_MAP: Dict[str, SpanTypeLiterals] = {
17
17
  }
18
18
 
19
19
 
20
- def openai_span_type_map(span_type: Optional[str]) -> SpanTypeLiterals:
20
+ def openai_span_type_map(span_type: Optional[str]) -> SpanType:
21
21
  """
22
22
  Maps an OpenAI span type string to its corresponding SGP SpanTypeLiteral.
23
23
 
@@ -6,11 +6,12 @@ from typing import TYPE_CHECKING, Type, Optional
6
6
  from threading import RLock
7
7
  from typing_extensions import override
8
8
 
9
+ from scale_gp_beta.types import SpanType, SpanStatus
9
10
  from scale_gp_beta.types.span_upsert_batch_params import Item as SpanCreateRequest
10
11
 
11
12
  from .util import iso_timestamp, generate_span_id
12
13
  from .scope import Scope
13
- from .types import SpanInputParam, SpanOutputParam, SpanTypeLiterals, SpanMetadataParam, SpanStatusLiterals
14
+ from .types import SpanInputParam, SpanOutputParam, SpanMetadataParam
14
15
  from .exceptions import ParamsCreationError
15
16
 
16
17
  if TYPE_CHECKING:
@@ -59,7 +60,7 @@ class BaseSpan:
59
60
  input: Optional[SpanInputParam] = None,
60
61
  output: Optional[SpanOutputParam] = None,
61
62
  metadata: Optional[SpanMetadataParam] = None,
62
- span_type: SpanTypeLiterals = "STANDALONE"
63
+ span_type: SpanType = "STANDALONE"
63
64
  ):
64
65
  self._name = name
65
66
  self._trace_id: str = trace_id or "no_trace_id"
@@ -71,8 +72,8 @@ class BaseSpan:
71
72
  self._input: SpanInputParam = input or {}
72
73
  self._output: SpanOutputParam = output or {}
73
74
  self._metadata: SpanMetadataParam = metadata or {}
74
- self._span_type: SpanTypeLiterals = span_type
75
- self._status: SpanStatusLiterals = "SUCCESS"
75
+ self._span_type: SpanType = span_type
76
+ self._status: SpanStatus = "SUCCESS"
76
77
  self._queue_manager = queue_manager
77
78
 
78
79
  self._contextvar_token: Optional[contextvars.Token[Optional[BaseSpan]]] = None
@@ -108,11 +109,11 @@ class BaseSpan:
108
109
  return self._parent_span_id
109
110
 
110
111
  @property
111
- def status(self) -> SpanStatusLiterals:
112
+ def status(self) -> SpanStatus:
112
113
  return self._status
113
114
 
114
115
  @property
115
- def span_type(self) -> SpanTypeLiterals:
116
+ def span_type(self) -> SpanType:
116
117
  return self._span_type
117
118
 
118
119
  # with setters
@@ -291,7 +292,7 @@ class Span(BaseSpan):
291
292
  input: Optional[SpanInputParam] = None,
292
293
  output: Optional[SpanOutputParam] = None,
293
294
  metadata: Optional[SpanMetadataParam] = None,
294
- span_type: SpanTypeLiterals = "STANDALONE",
295
+ span_type: SpanType = "STANDALONE",
295
296
  ):
296
297
  super().__init__(name, trace_id, queue_manager, span_id, parent_span_id, group_id, input, output, metadata, span_type)
297
298
  self._queue_manager: TraceQueueManager = queue_manager
@@ -4,10 +4,12 @@ from types import TracebackType
4
4
  from typing import Type, Optional
5
5
  from typing_extensions import override
6
6
 
7
+ from scale_gp_beta.types import SpanType, SpanStatus
8
+
7
9
  from .span import Span, BaseSpan, NoOpSpan
8
10
  from .util import generate_trace_id
9
11
  from .scope import Scope
10
- from .types import SpanInputParam, SpanOutputParam, SpanTypeLiterals, SpanMetadataParam, SpanStatusLiterals
12
+ from .types import SpanInputParam, SpanOutputParam, SpanMetadataParam
11
13
  from .trace_queue_manager import TraceQueueManager
12
14
 
13
15
  log: logging.Logger = logging.getLogger(__name__)
@@ -79,11 +81,11 @@ class BaseTrace:
79
81
  return self.root_span.group_id
80
82
 
81
83
  @property
82
- def span_type(self) -> SpanTypeLiterals:
84
+ def span_type(self) -> SpanType:
83
85
  return self.root_span.span_type
84
86
 
85
87
  @property
86
- def status(self) -> SpanStatusLiterals:
88
+ def status(self) -> SpanStatus:
87
89
  return self.root_span.status
88
90
 
89
91
  def set_error(
@@ -128,7 +130,7 @@ class NoOpTrace(BaseTrace):
128
130
  trace_id: Optional[str] = None,
129
131
  span_id: Optional[str] = None,
130
132
  group_id: Optional[str] = None,
131
- span_type: SpanTypeLiterals = "TRACER",
133
+ span_type: SpanType = "TRACER",
132
134
  input: Optional[SpanInputParam] = None,
133
135
  output: Optional[SpanOutputParam] = None,
134
136
  metadata: Optional[SpanMetadataParam] = None,
@@ -164,7 +166,7 @@ class Trace(BaseTrace):
164
166
  trace_id: Optional[str] = None,
165
167
  span_id: Optional[str] = None,
166
168
  group_id: Optional[str] = None,
167
- span_type: SpanTypeLiterals = "TRACER",
169
+ span_type: SpanType = "TRACER",
168
170
  input: Optional[SpanInputParam] = None,
169
171
  output: Optional[SpanOutputParam] = None,
170
172
  metadata: Optional[SpanMetadataParam] = None,
@@ -11,6 +11,8 @@ from .util import configure, is_disabled
11
11
  from .trace_exporter import TraceExporter
12
12
 
13
13
  if TYPE_CHECKING:
14
+ import httpx
15
+
14
16
  from .span import Span
15
17
  from .trace import Trace
16
18
 
@@ -44,6 +46,7 @@ class TraceQueueManager:
44
46
  worker_enabled: Optional[bool] = None,
45
47
  ):
46
48
  self._client = client
49
+ self.register_client(client) if client else None
47
50
  self._attempted_local_client_creation = False
48
51
  self._trigger_queue_size = trigger_queue_size
49
52
  self._trigger_cadence = trigger_cadence
@@ -68,6 +71,17 @@ class TraceQueueManager:
68
71
  log.info("Registering client")
69
72
  self._client = client
70
73
 
74
+ original_prepare_request = self._client._prepare_request
75
+
76
+ def custom_prepare_request(request: "httpx.Request") -> None:
77
+ original_prepare_request(request)
78
+
79
+ # TODO: Hook logic here, we should check to see if we are in the scope of a span, if so we should inject
80
+ # appropriate headers into the request
81
+ # current_span = Scope.get_current_span()
82
+
83
+ self._client._prepare_request = custom_prepare_request # type: ignore
84
+
71
85
  def shutdown(self, timeout: Optional[float] = None) -> None:
72
86
  if not self._worker_enabled:
73
87
  log.debug("No worker to shutdown")
@@ -1,11 +1,13 @@
1
1
  import logging
2
2
  from typing import Optional
3
3
 
4
+ from scale_gp_beta.types import SpanType
5
+
4
6
  from .span import Span, BaseSpan, NoOpSpan
5
7
  from .util import is_disabled
6
8
  from .scope import Scope
7
9
  from .trace import Trace, BaseTrace, NoOpTrace
8
- from .types import SpanInputParam, SpanOutputParam, SpanTypeLiterals, SpanMetadataParam
10
+ from .types import SpanInputParam, SpanOutputParam, SpanMetadataParam
9
11
  from .trace_queue_manager import TraceQueueManager, tracing_queue_manager
10
12
 
11
13
  log: logging.Logger = logging.getLogger(__name__)
@@ -51,7 +53,7 @@ def flush_queue() -> None:
51
53
 
52
54
  def create_trace(
53
55
  name: str,
54
- span_type: SpanTypeLiterals = "TRACER",
56
+ span_type: SpanType = "TRACER",
55
57
  input: Optional[SpanInputParam] = None,
56
58
  output: Optional[SpanOutputParam] = None,
57
59
  metadata: Optional[SpanMetadataParam] = None,
@@ -74,7 +76,7 @@ def create_trace(
74
76
 
75
77
  Args:
76
78
  name: The name of the trace.
77
- span_type (Optional[SpanTypeLiterals]): Type of root span.
79
+ span_type (Optional[SpanType]): Type of root span.
78
80
  input (Optional[SpanInputParam]): Input of root span.
79
81
  output (Optional[SpanOutputParam]): Output of root span.
80
82
  metadata (Optional[SpanMetadataParam]): An optional, user-defined metadata.
@@ -131,7 +133,7 @@ def create_trace(
131
133
 
132
134
  def create_span(
133
135
  name: str,
134
- span_type: SpanTypeLiterals = "STANDALONE",
136
+ span_type: SpanType = "STANDALONE",
135
137
  input: Optional[SpanInputParam] = None,
136
138
  output: Optional[SpanOutputParam] = None,
137
139
  metadata: Optional[SpanMetadataParam] = None,
@@ -160,7 +162,7 @@ def create_span(
160
162
  Args:
161
163
  name (str): A descriptive name for the span (e.g., "database_query",
162
164
  "http_request").
163
- span_type (SpanTypeLiterals): The type of the span.
165
+ span_type (SpanType): The type of the span.
164
166
  input (Optional[SpanInputParam], optional): A dictionary containing
165
167
  input data or parameters relevant to this span's operation. Defaults to None.
166
168
  output (Optional[SpanOutputParam], optional): A dictionary containing
@@ -1,43 +1,5 @@
1
- """
2
- This is necessary, unfortunately. Stainless does not provide SpanStatusLiterals and SpanTypeLiterals as enums, only as
3
- type annotations.
4
-
5
- For strict linting, we need to reference these enums.
6
-
7
- NOTE: These will have to be manually updated to support updated span_types and status.
8
- """
9
-
10
- from typing_extensions import Any, Dict, Literal
1
+ from typing_extensions import Any, Dict
11
2
 
12
3
  SpanInputParam = Dict[str, Any]
13
4
  SpanOutputParam = Dict[str, Any]
14
5
  SpanMetadataParam = Dict[str, Any]
15
-
16
- SpanStatusLiterals = Literal["SUCCESS", "ERROR", "CANCELED"]
17
-
18
- SpanTypeLiterals = Literal[
19
- "TEXT_INPUT",
20
- "TEXT_OUTPUT",
21
- "COMPLETION_INPUT",
22
- "COMPLETION",
23
- "KB_RETRIEVAL",
24
- "KB_INPUT",
25
- "RERANKING",
26
- "EXTERNAL_ENDPOINT",
27
- "PROMPT_ENGINEERING",
28
- "DOCUMENT_INPUT",
29
- "MAP_REDUCE",
30
- "DOCUMENT_SEARCH",
31
- "DOCUMENT_PROMPT",
32
- "CUSTOM",
33
- "CODE_EXECUTION",
34
- "DATA_MANIPULATION",
35
- "EVALUATION",
36
- "FILE_RETRIEVAL",
37
- "KB_ADD_CHUNK",
38
- "KB_MANAGEMENT",
39
- "TRACER",
40
- "AGENT_TRACER",
41
- "AGENT_WORKFLOW",
42
- "STANDALONE",
43
- ]
@@ -56,6 +56,14 @@ from .questions import (
56
56
  QuestionsResourceWithStreamingResponse,
57
57
  AsyncQuestionsResourceWithStreamingResponse,
58
58
  )
59
+ from .responses import (
60
+ ResponsesResource,
61
+ AsyncResponsesResource,
62
+ ResponsesResourceWithRawResponse,
63
+ AsyncResponsesResourceWithRawResponse,
64
+ ResponsesResourceWithStreamingResponse,
65
+ AsyncResponsesResourceWithStreamingResponse,
66
+ )
59
67
  from .completions import (
60
68
  CompletionsResource,
61
69
  AsyncCompletionsResource,
@@ -90,6 +98,12 @@ from .evaluation_items import (
90
98
  )
91
99
 
92
100
  __all__ = [
101
+ "ResponsesResource",
102
+ "AsyncResponsesResource",
103
+ "ResponsesResourceWithRawResponse",
104
+ "AsyncResponsesResourceWithRawResponse",
105
+ "ResponsesResourceWithStreamingResponse",
106
+ "AsyncResponsesResourceWithStreamingResponse",
93
107
  "CompletionsResource",
94
108
  "AsyncCompletionsResource",
95
109
  "CompletionsResourceWithRawResponse",
@@ -522,6 +522,7 @@ class CompletionsResource(SyncAPIResource):
522
522
  def models(
523
523
  self,
524
524
  *,
525
+ check_availability: bool | NotGiven = NOT_GIVEN,
525
526
  ending_before: str | NotGiven = NOT_GIVEN,
526
527
  limit: int | NotGiven = NOT_GIVEN,
527
528
  model_vendor: Literal[
@@ -569,6 +570,7 @@ class CompletionsResource(SyncAPIResource):
569
570
  timeout=timeout,
570
571
  query=maybe_transform(
571
572
  {
573
+ "check_availability": check_availability,
572
574
  "ending_before": ending_before,
573
575
  "limit": limit,
574
576
  "model_vendor": model_vendor,
@@ -1077,6 +1079,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
1077
1079
  async def models(
1078
1080
  self,
1079
1081
  *,
1082
+ check_availability: bool | NotGiven = NOT_GIVEN,
1080
1083
  ending_before: str | NotGiven = NOT_GIVEN,
1081
1084
  limit: int | NotGiven = NOT_GIVEN,
1082
1085
  model_vendor: Literal[
@@ -1124,6 +1127,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
1124
1127
  timeout=timeout,
1125
1128
  query=await async_maybe_transform(
1126
1129
  {
1130
+ "check_availability": check_availability,
1127
1131
  "ending_before": ending_before,
1128
1132
  "limit": limit,
1129
1133
  "model_vendor": model_vendor,