adaptive-sdk 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,6 +12,7 @@ from .cancel_job import CancelJob, CancelJobCancelJob
12
12
  from .client import GQLClient
13
13
  from .create_ab_campaign import CreateAbCampaign, CreateAbCampaignCreateAbCampaign
14
14
  from .create_custom_recipe import CreateCustomRecipe, CreateCustomRecipeCreateCustomRecipe
15
+ from .create_dataset_from_multipart_upload import CreateDatasetFromMultipartUpload, CreateDatasetFromMultipartUploadCreateDatasetFromMultipartUpload
15
16
  from .create_grader import CreateGrader, CreateGraderCreateGrader
16
17
  from .create_job import CreateJob, CreateJobCreateJob
17
18
  from .create_judge import CreateJudge, CreateJudgeCreateJudge
@@ -21,6 +22,7 @@ from .create_role import CreateRole, CreateRoleCreateRole
21
22
  from .create_team import CreateTeam, CreateTeamCreateTeam
22
23
  from .create_use_case import CreateUseCase, CreateUseCaseCreateUseCase
23
24
  from .create_user import CreateUser, CreateUserCreateUser
25
+ from .dataset_upload_processing_status import DatasetUploadProcessingStatus, DatasetUploadProcessingStatusDatasetUploadProcessingStatus
24
26
  from .delete_custom_recipe import DeleteCustomRecipe
25
27
  from .delete_dataset import DeleteDataset
26
28
  from .delete_grader import DeleteGrader, DeleteGraderDeleteGrader
@@ -78,4 +80,4 @@ from .update_judge import UpdateJudge, UpdateJudgeUpdateJudge
78
80
  from .update_model import UpdateModel, UpdateModelUpdateModelService
79
81
  from .update_model_compute_config import UpdateModelComputeConfig, UpdateModelComputeConfigUpdateModelComputeConfig
80
82
  from .update_user import UpdateUser, UpdateUserSetTeamMember, UpdateUserSetTeamMemberRole, UpdateUserSetTeamMemberTeam, UpdateUserSetTeamMemberUser
81
- __all__ = ['AbCampaignCreateData', 'AbCampaignDetailData', 'AbCampaignDetailDataMetric', 'AbCampaignDetailDataModels', 'AbCampaignDetailDataUseCase', 'AbCampaignFilter', 'AbCampaignReportData', 'AbCampaignReportDataVariants', 'AbCampaignReportDataVariantsComparisons', 'AbCampaignReportDataVariantsComparisonsVariant', 'AbCampaignReportDataVariantsInterval', 'AbCampaignReportDataVariantsVariant', 'AbcampaignCreate', 'AbcampaignStatus', 'AddExternalModel', 'AddExternalModelAddExternalModel', 'AddExternalModelInput', 'AddHFModel', 'AddHFModelImportHfModel', 'AddHFModelInput', 'AddModel', 'AddModelAddModel', 'AddModelAddModelBackbone', 'AddModelInput', 'AddRemoteEnv', 'AddRemoteEnvAddRemoteEnv', 'AnthropicProviderDataInput', 'ApiKeyCreate', 'ArtifactFilter', 'AsyncBaseClientOpenTelemetry', 'AsyncGQLClient', 'AttachModel', 'AttachModelToUseCase', 'AttachModelToUseCaseAttachModel', 'AuthProviderKind', 'AzureProviderDataInput', 'BaseClientOpenTelemetry', 'BaseModel', 'BillingCycle', 'CancelABCampaign', 'CancelAllocationInput', 'CancelJob', 'CancelJobCancelJob', 'CapabilityFilter', 'CompletionComparisonFeedbackData', 'CompletionComparisonFeedbackDataModel', 'CompletionComparisonFilterInput', 'CompletionData', 'CompletionDataChatMessages', 'CompletionDataComparisonFeedbacks', 'CompletionDataComparisonFeedbacksMetric', 'CompletionDataComparisonFeedbacksOtherCompletion', 'CompletionDataComparisonFeedbacksPreferedCompletion', 'CompletionDataComparisonFeedbacksUsecase', 'CompletionDataDirectFeedbacks', 'CompletionDataDirectFeedbacksMetric', 'CompletionDataLabels', 'CompletionDataMetadata', 'CompletionDataMetadataUsage', 'CompletionDataModel', 'CompletionFeedbackFilterInput', 'CompletionGroupBy', 'CompletionLabelValue', 'CompletionSource', 'CompletionsByFilters', 'CompletionsById', 'ComputePoolCapability', 'CreateAbCampaign', 'CreateAbCampaignCreateAbCampaign', 'CreateCustomRecipe', 'CreateCustomRecipeCreateCustomRecipe', 'CreateGrader', 'CreateGraderCreateGrader', 'CreateJob', 'CreateJobCreateJob', 'CreateJudge', 'CreateJudgeCreateJudge', 'CreateMetric', 'CreateMetricCreateMetric', 'CreatePrebuiltJudge', 'CreatePrebuiltJudgeCreatePrebuiltJudge', 'CreateRecipeInput', 'CreateRole', 'CreateRoleCreateRole', 'CreateTeam', 'CreateTeamCreateTeam', 'CreateToolProviderInput', 'CreateUseCase', 'CreateUseCaseCreateUseCase', 'CreateUser', 'CreateUserCreateUser', 'CursorPageInput', 'CustomConfigInput', 'CustomRecipeData', 'CustomRecipeDataCreatedBy', 'CustomRecipeDataLabels', 'CustomRecipeFilterInput', 'DatasetCompletionQuery', 'DatasetCreate', 'DatasetCreateFromFilters', 'DatasetCreateFromMultipartUpload', 'DatasetData', 'DatasetDataMetricsUsage', 'DatasetDataMetricsUsageMetric', 'DatasetKind', 'DatasetSource', 'DatasetStatus', 'DatasetUploadProcessingStatusInput', 'DateBucketUnit', 'DeleteCustomRecipe', 'DeleteDataset', 'DeleteGrader', 'DeleteGraderDeleteGrader', 'DeleteJudge', 'DeleteJudgeDeleteJudge', 'DeleteUser', 'DeleteUserDeleteUser', 'DeployModel', 'DescribeAbCampaign', 'DescribeAbCampaignAbCampaign', 'DescribeAbCampaignAbCampaignReport', 'DescribeDataset', 'DescribeDatasetDataset', 'DescribeInteraction', 'DescribeInteractionCompletion', 'DescribeJob', 'DescribeJobJob', 'DescribeMetric', 'DescribeMetricAdmin', 'DescribeMetricAdminMetric', 'DescribeMetricMetric', 'DescribeModel', 'DescribeModelAdmin', 'DescribeModelAdminModel', 'DescribeModelAdminModelBackbone', 'DescribeModelModel', 'DescribeModelModelBackbone', 'DescribeUseCase', 'DescribeUseCaseUseCase', 'EmojiInput', 'ExternalModelProviderName', 'FeedbackAddInput', 'FeedbackFilterInput', 'FeedbackType', 'FeedbackUpdateInput', 'FromGroupsQuery', 'GQLClient', 'GetCustomRecipe', 'GetCustomRecipeCustomRecipe', 'GetGrader', 'GetGraderGrader', 'GetJudge', 'GetJudgeJudge', 'GlobalUsageFilterInput', 'GoogleProviderDataInput', 'GraderConfigInput', 'GraderCreateInput', 'GraderData', 'GraderDataGraderConfigCustomConfigOutput', 'GraderDataGraderConfigJudgeConfigOutput', 'GraderDataGraderConfigJudgeConfigOutputExamples', 'GraderDataGraderConfigJudgeConfigOutputExamplesInput', 'GraderDataGraderConfigJudgeConfigOutputModel', 'GraderDataGraderConfigPrebuiltConfigOutput', 'GraderDataGraderConfigPrebuiltConfigOutputModel', 'GraderDataGraderConfigPrebuiltConfigOutputPrebuiltCriteria', 'GraderDataGraderConfigRemoteConfigOutput', 'GraderDataMetric', 'GraderDataUseCase', 'GraderTypeEnum', 'GraderUpdateInput', 'GraphQLClientError', 'GraphQLClientGraphQLError', 'GraphQLClientGraphQLMultiError', 'GraphQLClientHttpError', 'GraphQLClientInvalidResponseError', 'GroupSelection', 'GroupSelectionQuery', 'HarmonyGroupData', 'HarmonyGroupDataComputePool', 'HarmonyGroupDataOnlineModels', 'HarmonyStatus', 'JobArtifactFilter', 'JobArtifactKind', 'JobData', 'JobDataCreatedBy', 'JobDataDetails', 'JobDataDetailsArtifacts', 'JobDataDetailsArtifactsByproductsDatasetByproducts', 'JobDataDetailsArtifactsByproductsEvaluationByproducts', 'JobDataDetailsArtifactsByproductsEvaluationByproductsEvalResults', 'JobDataDetailsArtifactsByproductsEvaluationByproductsEvalResultsMetric', 'JobDataDetailsArtifactsByproductsEvaluationByproductsEvalResultsModelService', 'JobDataDetailsArtifactsByproductsModelByproducts', 'JobDataRecipe', 'JobDataStages', 'JobDataStagesInfoBatchInferenceJobStageOutput', 'JobDataStagesInfoEvalJobStageOutput', 'JobDataStagesInfoTrainingJobStageOutput', 'JobDataUseCase', 'JobInput', 'JobKind', 'JobStageOutputData', 'JobStageOutputDataInfoBatchInferenceJobStageOutput', 'JobStageOutputDataInfoEvalJobStageOutput', 'JobStageOutputDataInfoTrainingJobStageOutput', 'JobStatus', 'JobStatusOutput', 'JudgeCapability', 'JudgeConfigInput', 'JudgeCreate', 'JudgeData', 'JudgeDataExamples', 'JudgeDataExamplesInput', 'JudgeDataMetric', 'JudgeDataModel', 'JudgeExampleInput', 'JudgeExampleInputTurnEntry', 'JudgeUpdate', 'LabelFilter', 'LabelInput', 'LinkMetric', 'LinkMetricLinkMetric', 'ListAbCampaigns', 'ListAbCampaignsAbCampaigns', 'ListCompletionsFilterInput', 'ListComputePools', 'ListComputePoolsComputePools', 'ListComputePoolsComputePoolsHarmonyGroups', 'ListCustomRecipes', 'ListCustomRecipesCustomRecipes', 'ListDatasets', 'ListDatasetsDatasets', 'ListGraders', 'ListGradersGraders', 'ListGroupedInteractions', 'ListGroupedInteractionsCompletionsGrouped', 'ListGroupedInteractionsCompletionsGroupedNodes', 'ListGroupedInteractionsCompletionsGroupedNodesCompletions', 'ListGroupedInteractionsCompletionsGroupedNodesCompletionsNodes', 'ListGroupedInteractionsCompletionsGroupedNodesDirectFeedbacksStats', 'ListGroupedInteractionsCompletionsGroupedNodesDirectFeedbacksStatsMetric', 'ListGroupedInteractionsCompletionsGroupedPageInfo', 'ListHarmonyGroups', 'ListHarmonyGroupsHarmonyGroups', 'ListInteractions', 'ListInteractionsCompletions', 'ListInteractionsCompletionsNodes', 'ListInteractionsCompletionsPageInfo', 'ListJobs', 'ListJobsFilterInput', 'ListJobsJobs', 'ListJobsJobsNodes', 'ListJobsJobsPageInfo', 'ListJudgeVersions', 'ListJudgeVersionsJudgeVersions', 'ListJudges', 'ListJudgesJudges', 'ListMetrics', 'ListMetricsMetrics', 'ListModels', 'ListModelsModels', 'ListModelsModelsBackbone', 'ListPermissions', 'ListRemoteEnvs', 'ListRemoteEnvsRemoteEnvs', 'ListRoles', 'ListRolesRoles', 'ListTeams', 'ListTeamsTeams', 'ListUseCases', 'ListUseCasesUseCases', 'ListUsers', 'ListUsersUsers', 'LoadDataset', 'LoadDatasetCreateDataset', 'LockGrader', 'LockGraderLockGrader', 'Me', 'MeMe', 'MeMeApiKeys', 'MetricAggregation', 'MetricCreate', 'MetricData', 'MetricDataAdmin', 'MetricDataAdminUseCases', 'MetricGetOrCreate', 'MetricKind', 'MetricLink', 'MetricScoringType', 'MetricTrendInput', 'MetricUnlink', 'MetricWithContextData', 'ModelCapabilityFilter', 'ModelComputeConfigInput', 'ModelData', 'ModelDataAdmin', 'ModelDataAdminUseCases', 'ModelDataComputeConfig', 'ModelFilter', 'ModelOnline', 'ModelPlacementInput', 'ModelProviderDataInput', 'ModelServiceData', 'ModelServiceDataModel', 'ModelServiceDataModelBackbone', 'ModelServiceDisconnect', 'ModelServiceFilter', 'ModelserviceStatus', 'NvidiaProviderDataInput', 'OpenAIModel', 'OpenAIProviderDataInput', 'OrderPair', 'PrebuiltConfigInput', 'PrebuiltCriteriaKey', 'PrebuiltJudgeCreate', 'Protocol', 'ProviderName', 'RemoteConfigInput', 'RemoteEnvCreate', 'RemoteEnvData', 'RemoteEnvStatus', 'RemoveRemoteEnv', 'RemoveTeamMember', 'RemoveTeamMemberRemoveTeamMember', 'ResizePartitionInput', 'RoleCreate', 'SampleConfig', 'SelectionTypeInput', 'SessionStatus', 'ShareUseCase', 'ShareUseCaseShareUseCase', 'SortDirection', 'SystemPromptTemplateCreate', 'SystemPromptTemplateUpdate', 'TeamCreate', 'TeamMemberRemove', 'TeamMemberSet', 'TerminateModel', 'TestRemoteEnv', 'TestRemoteEnvTestRemoteEnvRemoteEnvTestOffline', 'TestRemoteEnvTestRemoteEnvRemoteEnvTestOnline', 'TimeRange', 'TimeseriesInput', 'TimeseriesInterval', 'UnitConfigInput', 'UnitPosition', 'UnlinkMetric', 'UpdateCompletion', 'UpdateCustomRecipe', 'UpdateCustomRecipeUpdateCustomRecipe', 'UpdateGrader', 'UpdateGraderUpdateGrader', 'UpdateJudge', 'UpdateJudgeUpdateJudge', 'UpdateModel', 'UpdateModelComputeConfig', 'UpdateModelComputeConfigUpdateModelComputeConfig', 'UpdateModelService', 'UpdateModelUpdateModelService', 'UpdateRecipeInput', 'UpdateToolProviderInput', 'UpdateUser', 'UpdateUserSetTeamMember', 'UpdateUserSetTeamMemberRole', 'UpdateUserSetTeamMemberTeam', 'UpdateUserSetTeamMemberUser', 'Upload', 'UsageFilterInput', 'UsagePerUseCaseFilterInput', 'UseCaseCreate', 'UseCaseData', 'UseCaseDataMetrics', 'UseCaseDataModelServices', 'UseCaseDataShares', 'UseCaseDataSharesRole', 'UseCaseDataSharesTeam', 'UseCaseFilter', 'UseCaseMetadataInput', 'UseCaseSettingsInput', 'UseCaseShareInput', 'UseCaseShares', 'UseCaseUpdate', 'UserCreate', 'UserCreateTeamWithRole', 'UserData', 'UserDataTeams', 'UserDataTeamsRole', 'UserDataTeamsTeam', 'WidgetInput']
83
+ __all__ = ['AbCampaignCreateData', 'AbCampaignDetailData', 'AbCampaignDetailDataMetric', 'AbCampaignDetailDataModels', 'AbCampaignDetailDataUseCase', 'AbCampaignFilter', 'AbCampaignReportData', 'AbCampaignReportDataVariants', 'AbCampaignReportDataVariantsComparisons', 'AbCampaignReportDataVariantsComparisonsVariant', 'AbCampaignReportDataVariantsInterval', 'AbCampaignReportDataVariantsVariant', 'AbcampaignCreate', 'AbcampaignStatus', 'AddExternalModel', 'AddExternalModelAddExternalModel', 'AddExternalModelInput', 'AddHFModel', 'AddHFModelImportHfModel', 'AddHFModelInput', 'AddModel', 'AddModelAddModel', 'AddModelAddModelBackbone', 'AddModelInput', 'AddRemoteEnv', 'AddRemoteEnvAddRemoteEnv', 'AnthropicProviderDataInput', 'ApiKeyCreate', 'ArtifactFilter', 'AsyncBaseClientOpenTelemetry', 'AsyncGQLClient', 'AttachModel', 'AttachModelToUseCase', 'AttachModelToUseCaseAttachModel', 'AuthProviderKind', 'AzureProviderDataInput', 'BaseClientOpenTelemetry', 'BaseModel', 'BillingCycle', 'CancelABCampaign', 'CancelAllocationInput', 'CancelJob', 'CancelJobCancelJob', 'CapabilityFilter', 'CompletionComparisonFeedbackData', 'CompletionComparisonFeedbackDataModel', 'CompletionComparisonFilterInput', 'CompletionData', 'CompletionDataChatMessages', 'CompletionDataComparisonFeedbacks', 'CompletionDataComparisonFeedbacksMetric', 'CompletionDataComparisonFeedbacksOtherCompletion', 'CompletionDataComparisonFeedbacksPreferedCompletion', 'CompletionDataComparisonFeedbacksUsecase', 'CompletionDataDirectFeedbacks', 'CompletionDataDirectFeedbacksMetric', 'CompletionDataLabels', 'CompletionDataMetadata', 'CompletionDataMetadataUsage', 'CompletionDataModel', 'CompletionFeedbackFilterInput', 'CompletionGroupBy', 'CompletionLabelValue', 'CompletionSource', 'CompletionsByFilters', 'CompletionsById', 'ComputePoolCapability', 'CreateAbCampaign', 'CreateAbCampaignCreateAbCampaign', 'CreateCustomRecipe', 'CreateCustomRecipeCreateCustomRecipe', 'CreateDatasetFromMultipartUpload', 'CreateDatasetFromMultipartUploadCreateDatasetFromMultipartUpload', 'CreateGrader', 'CreateGraderCreateGrader', 'CreateJob', 'CreateJobCreateJob', 'CreateJudge', 'CreateJudgeCreateJudge', 'CreateMetric', 'CreateMetricCreateMetric', 'CreatePrebuiltJudge', 'CreatePrebuiltJudgeCreatePrebuiltJudge', 'CreateRecipeInput', 'CreateRole', 'CreateRoleCreateRole', 'CreateTeam', 'CreateTeamCreateTeam', 'CreateToolProviderInput', 'CreateUseCase', 'CreateUseCaseCreateUseCase', 'CreateUser', 'CreateUserCreateUser', 'CursorPageInput', 'CustomConfigInput', 'CustomRecipeData', 'CustomRecipeDataCreatedBy', 'CustomRecipeDataLabels', 'CustomRecipeFilterInput', 'DatasetCompletionQuery', 'DatasetCreate', 'DatasetCreateFromFilters', 'DatasetCreateFromMultipartUpload', 'DatasetData', 'DatasetDataMetricsUsage', 'DatasetDataMetricsUsageMetric', 'DatasetKind', 'DatasetSource', 'DatasetStatus', 'DatasetUploadProcessingStatus', 'DatasetUploadProcessingStatusDatasetUploadProcessingStatus', 'DatasetUploadProcessingStatusInput', 'DateBucketUnit', 'DeleteCustomRecipe', 'DeleteDataset', 'DeleteGrader', 'DeleteGraderDeleteGrader', 'DeleteJudge', 'DeleteJudgeDeleteJudge', 'DeleteUser', 'DeleteUserDeleteUser', 'DeployModel', 'DescribeAbCampaign', 'DescribeAbCampaignAbCampaign', 'DescribeAbCampaignAbCampaignReport', 'DescribeDataset', 'DescribeDatasetDataset', 'DescribeInteraction', 'DescribeInteractionCompletion', 'DescribeJob', 'DescribeJobJob', 'DescribeMetric', 'DescribeMetricAdmin', 'DescribeMetricAdminMetric', 'DescribeMetricMetric', 'DescribeModel', 'DescribeModelAdmin', 'DescribeModelAdminModel', 'DescribeModelAdminModelBackbone', 'DescribeModelModel', 'DescribeModelModelBackbone', 'DescribeUseCase', 'DescribeUseCaseUseCase', 'EmojiInput', 'ExternalModelProviderName', 'FeedbackAddInput', 'FeedbackFilterInput', 'FeedbackType', 'FeedbackUpdateInput', 'FromGroupsQuery', 'GQLClient', 'GetCustomRecipe', 'GetCustomRecipeCustomRecipe', 'GetGrader', 'GetGraderGrader', 'GetJudge', 'GetJudgeJudge', 'GlobalUsageFilterInput', 'GoogleProviderDataInput', 'GraderConfigInput', 'GraderCreateInput', 'GraderData', 'GraderDataGraderConfigCustomConfigOutput', 'GraderDataGraderConfigJudgeConfigOutput', 'GraderDataGraderConfigJudgeConfigOutputExamples', 'GraderDataGraderConfigJudgeConfigOutputExamplesInput', 'GraderDataGraderConfigJudgeConfigOutputModel', 'GraderDataGraderConfigPrebuiltConfigOutput', 'GraderDataGraderConfigPrebuiltConfigOutputModel', 'GraderDataGraderConfigPrebuiltConfigOutputPrebuiltCriteria', 'GraderDataGraderConfigRemoteConfigOutput', 'GraderDataMetric', 'GraderDataUseCase', 'GraderTypeEnum', 'GraderUpdateInput', 'GraphQLClientError', 'GraphQLClientGraphQLError', 'GraphQLClientGraphQLMultiError', 'GraphQLClientHttpError', 'GraphQLClientInvalidResponseError', 'GroupSelection', 'GroupSelectionQuery', 'HarmonyGroupData', 'HarmonyGroupDataComputePool', 'HarmonyGroupDataOnlineModels', 'HarmonyStatus', 'JobArtifactFilter', 'JobArtifactKind', 'JobData', 'JobDataCreatedBy', 'JobDataDetails', 'JobDataDetailsArtifacts', 'JobDataDetailsArtifactsByproductsDatasetByproducts', 'JobDataDetailsArtifactsByproductsEvaluationByproducts', 'JobDataDetailsArtifactsByproductsEvaluationByproductsEvalResults', 'JobDataDetailsArtifactsByproductsEvaluationByproductsEvalResultsMetric', 'JobDataDetailsArtifactsByproductsEvaluationByproductsEvalResultsModelService', 'JobDataDetailsArtifactsByproductsModelByproducts', 'JobDataRecipe', 'JobDataStages', 'JobDataStagesInfoBatchInferenceJobStageOutput', 'JobDataStagesInfoEvalJobStageOutput', 'JobDataStagesInfoTrainingJobStageOutput', 'JobDataUseCase', 'JobInput', 'JobKind', 'JobStageOutputData', 'JobStageOutputDataInfoBatchInferenceJobStageOutput', 'JobStageOutputDataInfoEvalJobStageOutput', 'JobStageOutputDataInfoTrainingJobStageOutput', 'JobStatus', 'JobStatusOutput', 'JudgeCapability', 'JudgeConfigInput', 'JudgeCreate', 'JudgeData', 'JudgeDataExamples', 'JudgeDataExamplesInput', 'JudgeDataMetric', 'JudgeDataModel', 'JudgeExampleInput', 'JudgeExampleInputTurnEntry', 'JudgeUpdate', 'LabelFilter', 'LabelInput', 'LinkMetric', 'LinkMetricLinkMetric', 'ListAbCampaigns', 'ListAbCampaignsAbCampaigns', 'ListCompletionsFilterInput', 'ListComputePools', 'ListComputePoolsComputePools', 'ListComputePoolsComputePoolsHarmonyGroups', 'ListCustomRecipes', 'ListCustomRecipesCustomRecipes', 'ListDatasets', 'ListDatasetsDatasets', 'ListGraders', 'ListGradersGraders', 'ListGroupedInteractions', 'ListGroupedInteractionsCompletionsGrouped', 'ListGroupedInteractionsCompletionsGroupedNodes', 'ListGroupedInteractionsCompletionsGroupedNodesCompletions', 'ListGroupedInteractionsCompletionsGroupedNodesCompletionsNodes', 'ListGroupedInteractionsCompletionsGroupedNodesDirectFeedbacksStats', 'ListGroupedInteractionsCompletionsGroupedNodesDirectFeedbacksStatsMetric', 'ListGroupedInteractionsCompletionsGroupedPageInfo', 'ListHarmonyGroups', 'ListHarmonyGroupsHarmonyGroups', 'ListInteractions', 'ListInteractionsCompletions', 'ListInteractionsCompletionsNodes', 'ListInteractionsCompletionsPageInfo', 'ListJobs', 'ListJobsFilterInput', 'ListJobsJobs', 'ListJobsJobsNodes', 'ListJobsJobsPageInfo', 'ListJudgeVersions', 'ListJudgeVersionsJudgeVersions', 'ListJudges', 'ListJudgesJudges', 'ListMetrics', 'ListMetricsMetrics', 'ListModels', 'ListModelsModels', 'ListModelsModelsBackbone', 'ListPermissions', 'ListRemoteEnvs', 'ListRemoteEnvsRemoteEnvs', 'ListRoles', 'ListRolesRoles', 'ListTeams', 'ListTeamsTeams', 'ListUseCases', 'ListUseCasesUseCases', 'ListUsers', 'ListUsersUsers', 'LoadDataset', 'LoadDatasetCreateDataset', 'LockGrader', 'LockGraderLockGrader', 'Me', 'MeMe', 'MeMeApiKeys', 'MetricAggregation', 'MetricCreate', 'MetricData', 'MetricDataAdmin', 'MetricDataAdminUseCases', 'MetricGetOrCreate', 'MetricKind', 'MetricLink', 'MetricScoringType', 'MetricTrendInput', 'MetricUnlink', 'MetricWithContextData', 'ModelCapabilityFilter', 'ModelComputeConfigInput', 'ModelData', 'ModelDataAdmin', 'ModelDataAdminUseCases', 'ModelDataComputeConfig', 'ModelFilter', 'ModelOnline', 'ModelPlacementInput', 'ModelProviderDataInput', 'ModelServiceData', 'ModelServiceDataModel', 'ModelServiceDataModelBackbone', 'ModelServiceDisconnect', 'ModelServiceFilter', 'ModelserviceStatus', 'NvidiaProviderDataInput', 'OpenAIModel', 'OpenAIProviderDataInput', 'OrderPair', 'PrebuiltConfigInput', 'PrebuiltCriteriaKey', 'PrebuiltJudgeCreate', 'Protocol', 'ProviderName', 'RemoteConfigInput', 'RemoteEnvCreate', 'RemoteEnvData', 'RemoteEnvStatus', 'RemoveRemoteEnv', 'RemoveTeamMember', 'RemoveTeamMemberRemoveTeamMember', 'ResizePartitionInput', 'RoleCreate', 'SampleConfig', 'SelectionTypeInput', 'SessionStatus', 'ShareUseCase', 'ShareUseCaseShareUseCase', 'SortDirection', 'SystemPromptTemplateCreate', 'SystemPromptTemplateUpdate', 'TeamCreate', 'TeamMemberRemove', 'TeamMemberSet', 'TerminateModel', 'TestRemoteEnv', 'TestRemoteEnvTestRemoteEnvRemoteEnvTestOffline', 'TestRemoteEnvTestRemoteEnvRemoteEnvTestOnline', 'TimeRange', 'TimeseriesInput', 'TimeseriesInterval', 'UnitConfigInput', 'UnitPosition', 'UnlinkMetric', 'UpdateCompletion', 'UpdateCustomRecipe', 'UpdateCustomRecipeUpdateCustomRecipe', 'UpdateGrader', 'UpdateGraderUpdateGrader', 'UpdateJudge', 'UpdateJudgeUpdateJudge', 'UpdateModel', 'UpdateModelComputeConfig', 'UpdateModelComputeConfigUpdateModelComputeConfig', 'UpdateModelService', 'UpdateModelUpdateModelService', 'UpdateRecipeInput', 'UpdateToolProviderInput', 'UpdateUser', 'UpdateUserSetTeamMember', 'UpdateUserSetTeamMemberRole', 'UpdateUserSetTeamMemberTeam', 'UpdateUserSetTeamMemberUser', 'Upload', 'UsageFilterInput', 'UsagePerUseCaseFilterInput', 'UseCaseCreate', 'UseCaseData', 'UseCaseDataMetrics', 'UseCaseDataModelServices', 'UseCaseDataShares', 'UseCaseDataSharesRole', 'UseCaseDataSharesTeam', 'UseCaseFilter', 'UseCaseMetadataInput', 'UseCaseSettingsInput', 'UseCaseShareInput', 'UseCaseShares', 'UseCaseUpdate', 'UserCreate', 'UserCreateTeamWithRole', 'UserData', 'UserDataTeams', 'UserDataTeamsRole', 'UserDataTeamsTeam', 'WidgetInput']
@@ -12,6 +12,7 @@ from .cancel_ab_campaign import CancelABCampaign
12
12
  from .cancel_job import CancelJob
13
13
  from .create_ab_campaign import CreateAbCampaign
14
14
  from .create_custom_recipe import CreateCustomRecipe
15
+ from .create_dataset_from_multipart_upload import CreateDatasetFromMultipartUpload
15
16
  from .create_grader import CreateGrader
16
17
  from .create_job import CreateJob
17
18
  from .create_judge import CreateJudge
@@ -21,6 +22,7 @@ from .create_role import CreateRole
21
22
  from .create_team import CreateTeam
22
23
  from .create_use_case import CreateUseCase
23
24
  from .create_user import CreateUser
25
+ from .dataset_upload_processing_status import DatasetUploadProcessingStatus
24
26
  from .delete_custom_recipe import DeleteCustomRecipe
25
27
  from .delete_dataset import DeleteDataset
26
28
  from .delete_grader import DeleteGrader
@@ -40,7 +42,7 @@ from .enums import CompletionGroupBy
40
42
  from .get_custom_recipe import GetCustomRecipe
41
43
  from .get_grader import GetGrader
42
44
  from .get_judge import GetJudge
43
- from .input_types import AbcampaignCreate, AbCampaignFilter, AddExternalModelInput, AddHFModelInput, AddModelInput, AttachModel, CreateRecipeInput, CursorPageInput, CustomRecipeFilterInput, DatasetCreate, GraderCreateInput, GraderUpdateInput, JobInput, JudgeCreate, JudgeUpdate, ListCompletionsFilterInput, ListJobsFilterInput, MetricCreate, MetricLink, MetricUnlink, ModelComputeConfigInput, ModelFilter, ModelPlacementInput, OrderPair, PrebuiltJudgeCreate, RemoteEnvCreate, RoleCreate, TeamCreate, TeamMemberRemove, TeamMemberSet, UpdateModelService, UpdateRecipeInput, UseCaseCreate, UseCaseShares, UserCreate
45
+ from .input_types import AbcampaignCreate, AbCampaignFilter, AddExternalModelInput, AddHFModelInput, AddModelInput, AttachModel, CreateRecipeInput, CursorPageInput, CustomRecipeFilterInput, DatasetCreate, DatasetCreateFromMultipartUpload, DatasetUploadProcessingStatusInput, GraderCreateInput, GraderUpdateInput, JobInput, JudgeCreate, JudgeUpdate, ListCompletionsFilterInput, ListJobsFilterInput, MetricCreate, MetricLink, MetricUnlink, ModelComputeConfigInput, ModelFilter, ModelPlacementInput, OrderPair, PrebuiltJudgeCreate, RemoteEnvCreate, RoleCreate, TeamCreate, TeamMemberRemove, TeamMemberSet, UpdateModelService, UpdateRecipeInput, UseCaseCreate, UseCaseShares, UserCreate
44
46
  from .link_metric import LinkMetric
45
47
  from .list_ab_campaigns import ListAbCampaigns
46
48
  from .list_compute_pools import ListComputePools
@@ -356,6 +358,13 @@ class AsyncGQLClient(AsyncBaseClientOpenTelemetry):
356
358
  data = self.get_data(response)
357
359
  return CancelJob.model_validate(data)
358
360
 
361
+ async def create_dataset_from_multipart_upload(self, input: DatasetCreateFromMultipartUpload, **kwargs: Any) -> CreateDatasetFromMultipartUpload:
362
+ query = gql('\n mutation CreateDatasetFromMultipartUpload($input: DatasetCreateFromMultipartUpload!) {\n createDatasetFromMultipartUpload(input: $input) {\n datasetId\n status\n totalParts\n processedParts\n progress\n error\n }\n }\n ')
363
+ variables: Dict[str, object] = {'input': input}
364
+ response = await self.execute(query=query, operation_name='CreateDatasetFromMultipartUpload', variables=variables, **kwargs)
365
+ data = self.get_data(response)
366
+ return CreateDatasetFromMultipartUpload.model_validate(data)
367
+
359
368
  async def list_datasets(self, input: Any, **kwargs: Any) -> ListDatasets:
360
369
  query = gql('\n query ListDatasets($input: IdOrKey!) {\n datasets(useCase: $input) {\n ...DatasetData\n }\n }\n\n fragment DatasetData on Dataset {\n id\n key\n name\n createdAt\n kind\n records\n metricsUsage {\n feedbackCount\n comparisonCount\n metric {\n ...MetricData\n }\n }\n }\n\n fragment MetricData on Metric {\n id\n key\n name\n kind\n description\n scoringType\n createdAt\n hasDirectFeedbacks\n hasComparisonFeedbacks\n }\n ')
361
370
  variables: Dict[str, object] = {'input': input}
@@ -580,6 +589,13 @@ class AsyncGQLClient(AsyncBaseClientOpenTelemetry):
580
589
  data = self.get_data(response)
581
590
  return DescribeJob.model_validate(data)
582
591
 
592
+ async def dataset_upload_processing_status(self, input: DatasetUploadProcessingStatusInput, **kwargs: Any) -> DatasetUploadProcessingStatus:
593
+ query = gql('\n query DatasetUploadProcessingStatus($input: DatasetUploadProcessingStatusInput!) {\n datasetUploadProcessingStatus(input: $input) {\n datasetId\n status\n totalParts\n processedParts\n progress\n error\n }\n }\n ')
594
+ variables: Dict[str, object] = {'input': input}
595
+ response = await self.execute(query=query, operation_name='DatasetUploadProcessingStatus', variables=variables, **kwargs)
596
+ data = self.get_data(response)
597
+ return DatasetUploadProcessingStatus.model_validate(data)
598
+
583
599
  async def execute_custom_operation(self, *fields: GraphQLField, operation_type: OperationType, operation_name: str) -> Dict[str, Any]:
584
600
  selections = self._build_selection_set(fields)
585
601
  combined_variables = self._combine_variables(fields)
@@ -12,6 +12,7 @@ from .cancel_ab_campaign import CancelABCampaign
12
12
  from .cancel_job import CancelJob
13
13
  from .create_ab_campaign import CreateAbCampaign
14
14
  from .create_custom_recipe import CreateCustomRecipe
15
+ from .create_dataset_from_multipart_upload import CreateDatasetFromMultipartUpload
15
16
  from .create_grader import CreateGrader
16
17
  from .create_job import CreateJob
17
18
  from .create_judge import CreateJudge
@@ -21,6 +22,7 @@ from .create_role import CreateRole
21
22
  from .create_team import CreateTeam
22
23
  from .create_use_case import CreateUseCase
23
24
  from .create_user import CreateUser
25
+ from .dataset_upload_processing_status import DatasetUploadProcessingStatus
24
26
  from .delete_custom_recipe import DeleteCustomRecipe
25
27
  from .delete_dataset import DeleteDataset
26
28
  from .delete_grader import DeleteGrader
@@ -40,7 +42,7 @@ from .enums import CompletionGroupBy
40
42
  from .get_custom_recipe import GetCustomRecipe
41
43
  from .get_grader import GetGrader
42
44
  from .get_judge import GetJudge
43
- from .input_types import AbcampaignCreate, AbCampaignFilter, AddExternalModelInput, AddHFModelInput, AddModelInput, AttachModel, CreateRecipeInput, CursorPageInput, CustomRecipeFilterInput, DatasetCreate, GraderCreateInput, GraderUpdateInput, JobInput, JudgeCreate, JudgeUpdate, ListCompletionsFilterInput, ListJobsFilterInput, MetricCreate, MetricLink, MetricUnlink, ModelComputeConfigInput, ModelFilter, ModelPlacementInput, OrderPair, PrebuiltJudgeCreate, RemoteEnvCreate, RoleCreate, TeamCreate, TeamMemberRemove, TeamMemberSet, UpdateModelService, UpdateRecipeInput, UseCaseCreate, UseCaseShares, UserCreate
45
+ from .input_types import AbcampaignCreate, AbCampaignFilter, AddExternalModelInput, AddHFModelInput, AddModelInput, AttachModel, CreateRecipeInput, CursorPageInput, CustomRecipeFilterInput, DatasetCreate, DatasetCreateFromMultipartUpload, DatasetUploadProcessingStatusInput, GraderCreateInput, GraderUpdateInput, JobInput, JudgeCreate, JudgeUpdate, ListCompletionsFilterInput, ListJobsFilterInput, MetricCreate, MetricLink, MetricUnlink, ModelComputeConfigInput, ModelFilter, ModelPlacementInput, OrderPair, PrebuiltJudgeCreate, RemoteEnvCreate, RoleCreate, TeamCreate, TeamMemberRemove, TeamMemberSet, UpdateModelService, UpdateRecipeInput, UseCaseCreate, UseCaseShares, UserCreate
44
46
  from .link_metric import LinkMetric
45
47
  from .list_ab_campaigns import ListAbCampaigns
46
48
  from .list_compute_pools import ListComputePools
@@ -356,6 +358,13 @@ class GQLClient(BaseClientOpenTelemetry):
356
358
  data = self.get_data(response)
357
359
  return CancelJob.model_validate(data)
358
360
 
361
+ def create_dataset_from_multipart_upload(self, input: DatasetCreateFromMultipartUpload, **kwargs: Any) -> CreateDatasetFromMultipartUpload:
362
+ query = gql('\n mutation CreateDatasetFromMultipartUpload($input: DatasetCreateFromMultipartUpload!) {\n createDatasetFromMultipartUpload(input: $input) {\n datasetId\n status\n totalParts\n processedParts\n progress\n error\n }\n }\n ')
363
+ variables: Dict[str, object] = {'input': input}
364
+ response = self.execute(query=query, operation_name='CreateDatasetFromMultipartUpload', variables=variables, **kwargs)
365
+ data = self.get_data(response)
366
+ return CreateDatasetFromMultipartUpload.model_validate(data)
367
+
359
368
  def list_datasets(self, input: str, **kwargs: Any) -> ListDatasets:
360
369
  query = gql('\n query ListDatasets($input: IdOrKey!) {\n datasets(useCase: $input) {\n ...DatasetData\n }\n }\n\n fragment DatasetData on Dataset {\n id\n key\n name\n createdAt\n kind\n records\n metricsUsage {\n feedbackCount\n comparisonCount\n metric {\n ...MetricData\n }\n }\n }\n\n fragment MetricData on Metric {\n id\n key\n name\n kind\n description\n scoringType\n createdAt\n hasDirectFeedbacks\n hasComparisonFeedbacks\n }\n ')
361
370
  variables: Dict[str, object] = {'input': input}
@@ -580,6 +589,13 @@ class GQLClient(BaseClientOpenTelemetry):
580
589
  data = self.get_data(response)
581
590
  return DescribeJob.model_validate(data)
582
591
 
592
+ def dataset_upload_processing_status(self, input: DatasetUploadProcessingStatusInput, **kwargs: Any) -> DatasetUploadProcessingStatus:
593
+ query = gql('\n query DatasetUploadProcessingStatus($input: DatasetUploadProcessingStatusInput!) {\n datasetUploadProcessingStatus(input: $input) {\n datasetId\n status\n totalParts\n processedParts\n progress\n error\n }\n }\n ')
594
+ variables: Dict[str, object] = {'input': input}
595
+ response = self.execute(query=query, operation_name='DatasetUploadProcessingStatus', variables=variables, **kwargs)
596
+ data = self.get_data(response)
597
+ return DatasetUploadProcessingStatus.model_validate(data)
598
+
583
599
  def execute_custom_operation(self, *fields: GraphQLField, operation_type: OperationType, operation_name: str) -> Dict[str, Any]:
584
600
  selections = self._build_selection_set(fields)
585
601
  combined_variables = self._combine_variables(fields)
@@ -0,0 +1,18 @@
1
+ from typing import Any, Optional
2
+ from pydantic import Field
3
+ from .base_model import BaseModel
4
+ from .enums import SessionStatus
5
+
6
+ class CreateDatasetFromMultipartUpload(BaseModel):
7
+ """@public"""
8
+ create_dataset_from_multipart_upload: 'CreateDatasetFromMultipartUploadCreateDatasetFromMultipartUpload' = Field(alias='createDatasetFromMultipartUpload')
9
+
10
+ class CreateDatasetFromMultipartUploadCreateDatasetFromMultipartUpload(BaseModel):
11
+ """@public"""
12
+ dataset_id: Any = Field(alias='datasetId')
13
+ status: SessionStatus
14
+ total_parts: int = Field(alias='totalParts')
15
+ processed_parts: int = Field(alias='processedParts')
16
+ progress: float
17
+ error: Optional[str]
18
+ CreateDatasetFromMultipartUpload.model_rebuild()
@@ -35,6 +35,12 @@ class Query:
35
35
  cleared_arguments = {key: value for key, value in arguments.items() if value['value'] is not None}
36
36
  return CustomRecipeFields(field_name='customRecipe', arguments=cleared_arguments)
37
37
 
38
+ @classmethod
39
+ def parse_recipe_schema(cls, recipe_content: str) -> GraphQLField:
40
+ arguments: Dict[str, Dict[str, Any]] = {'recipeContent': {'type': 'String!', 'value': recipe_content}}
41
+ cleared_arguments = {key: value for key, value in arguments.items() if value['value'] is not None}
42
+ return GraphQLField(field_name='parseRecipeSchema', arguments=cleared_arguments)
43
+
38
44
  @classmethod
39
45
  def datasets(cls, use_case: str) -> DatasetFields:
40
46
  arguments: Dict[str, Dict[str, Any]] = {'useCase': {'type': 'IdOrKey!', 'value': use_case}}
@@ -0,0 +1,18 @@
1
+ from typing import Any, Optional
2
+ from pydantic import Field
3
+ from .base_model import BaseModel
4
+ from .enums import SessionStatus
5
+
6
+ class DatasetUploadProcessingStatus(BaseModel):
7
+ """@public"""
8
+ dataset_upload_processing_status: 'DatasetUploadProcessingStatusDatasetUploadProcessingStatus' = Field(alias='datasetUploadProcessingStatus')
9
+
10
+ class DatasetUploadProcessingStatusDatasetUploadProcessingStatus(BaseModel):
11
+ """@public"""
12
+ dataset_id: Any = Field(alias='datasetId')
13
+ status: SessionStatus
14
+ total_parts: int = Field(alias='totalParts')
15
+ processed_parts: int = Field(alias='processedParts')
16
+ progress: float
17
+ error: Optional[str]
18
+ DatasetUploadProcessingStatus.model_rebuild()
@@ -49,6 +49,7 @@ class DatasetSource(str, Enum):
49
49
 
50
50
  class DatasetStatus(str, Enum):
51
51
  """@public"""
52
+ PENDING = 'PENDING'
52
53
  PROCESSING = 'PROCESSING'
53
54
  READY = 'READY'
54
55
  FAILED = 'FAILED'
@@ -2,22 +2,20 @@ from __future__ import annotations
2
2
  import json
3
3
  import math
4
4
  import os
5
+ import time
5
6
  from pathlib import Path
6
- from typing import List, Literal, TYPE_CHECKING
7
+ from typing import List, TYPE_CHECKING
7
8
 
8
9
  from adaptive_sdk.graphql_client import (
9
10
  DatasetCreate,
10
11
  Upload,
11
- LoadDatasetCreateDataset,
12
12
  ListDatasetsDatasets,
13
13
  DatasetData,
14
14
  DatasetCreateFromMultipartUpload,
15
+ DatasetUploadProcessingStatusInput,
16
+ SessionStatus,
15
17
  )
16
- from adaptive_sdk.graphql_client.custom_mutations import Mutation
17
- from adaptive_sdk.graphql_client.custom_fields import (
18
- DatasetFields,
19
- DatasetUploadProcessingStatusFields,
20
- )
18
+
21
19
  from adaptive_sdk.rest import rest_types
22
20
  from adaptive_sdk.error_handling import rest_error_handler
23
21
 
@@ -29,23 +27,12 @@ if TYPE_CHECKING:
29
27
  MIN_CHUNK_SIZE_BYTES = 5 * 1024 * 1024 # 5MB
30
28
  MAX_CHUNK_SIZE_BYTES = 100 * 1024 * 1024 # 100MB
31
29
  MAX_PARTS_COUNT = 10000
30
+ INIT_CHUNKED_UPLOAD_ROUTE = "/upload/init"
31
+ UPLOAD_PART_ROUTE = "/upload/part"
32
+ ABORT_CHUNKED_UPLOAD_ROUTE = "/upload/abort"
32
33
 
33
34
 
34
35
  def _calculate_upload_parts(file_size: int) -> tuple[int, int]:
35
- """
36
- Calculate optimal number of parts and chunk size for multipart upload.
37
-
38
- Strategy: Scale chunk size based on file size for optimal performance
39
-
40
- Args:
41
- file_size: Size of the file in bytes
42
-
43
- Returns:
44
- Tuple of (total_parts, chunk_size_bytes)
45
-
46
- Raises:
47
- ValueError: If file is too large to upload with the given constraints
48
- """
49
36
  if file_size < MIN_CHUNK_SIZE_BYTES:
50
37
  raise ValueError(f"File size ({file_size:,} bytes) is too small for chunked upload")
51
38
 
@@ -97,19 +84,15 @@ class Datasets(SyncAPIResource, UseCaseResource): # type: ignore[misc]
97
84
  dataset_key: str,
98
85
  name: str | None = None,
99
86
  use_case: str | None = None,
100
- ) -> LoadDatasetCreateDataset:
87
+ ) -> DatasetData:
101
88
  """
102
- Upload a dataset from a file. File must be jsonl, where each line should match structure in example below.
89
+ Upload a dataset from a file. File must be jsonl, where each line should match supported structure.
103
90
 
104
91
  Args:
105
92
  file_path: Path to jsonl file.
106
93
  dataset_key: New dataset key.
107
94
  name: Optional name to render in UI; if `None`, defaults to same as `dataset_key`.
108
95
 
109
- Example:
110
- ```
111
- {"messages": [{"role": "system", "content": "<optional system prompt>"}, {"role": "user", "content": "<user content>"}, {"role": "assistant", "content": "<assistant answer>"}], "completion": "hey"}
112
- ```
113
96
  """
114
97
  file_size = os.path.getsize(file_path)
115
98
 
@@ -124,12 +107,8 @@ class Datasets(SyncAPIResource, UseCaseResource): # type: ignore[misc]
124
107
  )
125
108
  filename = Path(file_path).stem
126
109
  with open(file_path, "rb") as f:
127
- file_upload = Upload(
128
- filename=filename, content=f, content_type="application/jsonl"
129
- )
130
- return self._gql_client.load_dataset(
131
- input=input, file=file_upload
132
- ).create_dataset
110
+ file_upload = Upload(filename=filename, content=f, content_type="application/jsonl")
111
+ return self._gql_client.load_dataset(input=input, file=file_upload).create_dataset
133
112
 
134
113
  def _chunked_upload(
135
114
  self,
@@ -137,78 +116,73 @@ class Datasets(SyncAPIResource, UseCaseResource): # type: ignore[misc]
137
116
  dataset_key: str,
138
117
  name: str | None = None,
139
118
  use_case: str | None = None,
140
- ) -> LoadDatasetCreateDataset:
119
+ ) -> DatasetData:
141
120
  """Upload large files using chunked upload via REST API."""
142
121
  file_size = os.path.getsize(file_path)
143
122
  total_parts, chunk_size = _calculate_upload_parts(file_size)
144
123
 
145
- # Step 1: Initialize chunked upload session
146
124
  init_request = rest_types.InitChunkedUploadRequest(
147
125
  content_type="application/jsonl",
148
126
  metadata=None,
149
127
  total_parts_count=total_parts,
150
128
  )
151
- response = self._rest_client.post(
152
- "/upload/init", json=init_request.model_dump()
153
- )
129
+ response = self._rest_client.post(INIT_CHUNKED_UPLOAD_ROUTE, json=init_request.model_dump())
154
130
  rest_error_handler(response)
155
- init_response = rest_types.InitChunkedUploadResponse.model_validate(
156
- response.json()
157
- )
131
+ init_response = rest_types.InitChunkedUploadResponse.model_validate(response.json())
158
132
  session_id = init_response.session_id
159
133
 
160
134
  try:
161
- # Step 2: Upload each part
162
135
  with open(file_path, "rb") as f:
163
136
  for part_number in range(1, total_parts + 1):
164
137
  chunk_data = f.read(chunk_size)
165
138
 
166
139
  response = self._rest_client.post(
167
- "/upload/part",
140
+ UPLOAD_PART_ROUTE,
168
141
  params={"session_id": session_id, "part_number": part_number},
169
142
  content=chunk_data,
170
143
  headers={"Content-Type": "application/octet-stream"},
171
144
  )
172
145
  rest_error_handler(response)
173
146
 
174
- # Step 3: Finalize upload by creating dataset from multipart upload
175
147
  input = DatasetCreateFromMultipartUpload(
176
148
  useCase=self.use_case_key(use_case),
177
149
  name=name if name else dataset_key,
178
150
  key=dataset_key,
179
151
  uploadSessionId=session_id,
180
152
  )
153
+ create_dataset_result = self._gql_client.create_dataset_from_multipart_upload(
154
+ input=input
155
+ ).create_dataset_from_multipart_upload
156
+
157
+ upload_done = False
158
+ while not upload_done:
159
+ check_progress_result = self._gql_client.dataset_upload_processing_status(
160
+ input=DatasetUploadProcessingStatusInput(
161
+ useCase=self.use_case_key(use_case), datasetId=create_dataset_result.dataset_id
162
+ )
163
+ ).dataset_upload_processing_status
164
+ if check_progress_result.status == SessionStatus.DONE:
165
+ upload_done = True
166
+ elif check_progress_result.status == SessionStatus.ERROR:
167
+ raise Exception(f"Upload failed: {check_progress_result.error}")
168
+ else:
169
+ time.sleep(2)
181
170
 
182
- mutation_field = Mutation.create_dataset_from_multipart_upload(input=input)
183
- mutation_field.fields(
184
- DatasetUploadProcessingStatusFields.dataset_id,
185
- DatasetUploadProcessingStatusFields.total_parts,
186
- DatasetUploadProcessingStatusFields.processed_parts,
187
- DatasetUploadProcessingStatusFields.progress,
188
- DatasetUploadProcessingStatusFields.error,
189
- DatasetUploadProcessingStatusFields.status,
190
- )
191
- result = self._gql_client.mutation(
192
- mutation_field, operation_name="CreateDatasetFromMultipartUpload"
193
- )
194
- return LoadDatasetCreateDataset.model_validate(
195
- result["createDatasetFromMultipartUpload"]
196
- )
171
+ dataset_data = self.get(create_dataset_result.dataset_id, use_case=self.use_case_key(use_case))
172
+ assert dataset_data is not None
173
+
174
+ return dataset_data
197
175
 
198
- except Exception as e:
199
- # Abort the upload session on error
176
+ except Exception:
200
177
  try:
201
- abort_request = rest_types.AbortChunkedUploadRequest(
202
- session_id=session_id
203
- )
204
- # delete with body works in runtime but not in type checking
205
- self._rest_client.delete( # type: ignore[call-arg]
206
- "/upload/abort",
207
- content=json.dumps(abort_request.model_dump()),
178
+ abort_request = rest_types.AbortChunkedUploadRequest(session_id=session_id)
179
+ self._rest_client.delete(
180
+ ABORT_CHUNKED_UPLOAD_ROUTE,
181
+ content=json.dumps(abort_request.model_dump()), # type: ignore[call-arg]
208
182
  headers={"Content-Type": "application/json"},
209
183
  )
210
184
  except Exception:
211
- pass # Best effort cleanup
185
+ pass
212
186
  raise
213
187
 
214
188
  def list(
@@ -227,15 +201,11 @@ class Datasets(SyncAPIResource, UseCaseResource): # type: ignore[misc]
227
201
  Args:
228
202
  key: Dataset key.
229
203
  """
230
- return self._gql_client.describe_dataset(
231
- key, self.use_case_key(use_case)
232
- ).dataset
204
+ return self._gql_client.describe_dataset(key, self.use_case_key(use_case)).dataset
233
205
 
234
206
  def delete(self, key: str, use_case: str | None = None) -> bool:
235
207
  """Delete dataset."""
236
- return self._gql_client.delete_dataset(
237
- id_or_key=key, use_case=self.use_case_key(use_case)
238
- ).delete_dataset
208
+ return self._gql_client.delete_dataset(id_or_key=key, use_case=self.use_case_key(use_case)).delete_dataset
239
209
 
240
210
 
241
211
  class AsyncDatasets(AsyncAPIResource, UseCaseResource): # type: ignore[misc]
@@ -249,7 +219,7 @@ class AsyncDatasets(AsyncAPIResource, UseCaseResource): # type: ignore[misc]
249
219
  dataset_key: str,
250
220
  name: str | None = None,
251
221
  use_case: str | None = None,
252
- ) -> LoadDatasetCreateDataset:
222
+ ) -> DatasetData:
253
223
  """
254
224
  Upload a dataset from a file. File must be jsonl, where each line should match structure in example below.
255
225
 
@@ -277,12 +247,8 @@ class AsyncDatasets(AsyncAPIResource, UseCaseResource): # type: ignore[misc]
277
247
  )
278
248
  filename = Path(file_path).stem
279
249
  with open(file_path, "rb") as f:
280
- file_upload = Upload(
281
- filename=filename, content=f, content_type="application/jsonl"
282
- )
283
- upload_result = await self._gql_client.load_dataset(
284
- input=input, file=file_upload
285
- )
250
+ file_upload = Upload(filename=filename, content=f, content_type="application/jsonl")
251
+ upload_result = await self._gql_client.load_dataset(input=input, file=file_upload)
286
252
  return upload_result.create_dataset
287
253
 
288
254
  async def _chunked_upload(
@@ -291,34 +257,28 @@ class AsyncDatasets(AsyncAPIResource, UseCaseResource): # type: ignore[misc]
291
257
  dataset_key: str,
292
258
  name: str | None = None,
293
259
  use_case: str | None = None,
294
- ) -> LoadDatasetCreateDataset:
260
+ ) -> DatasetData:
295
261
  """Upload large files using chunked upload via REST API."""
296
262
  file_size = os.path.getsize(file_path)
297
263
  total_parts, chunk_size = _calculate_upload_parts(file_size)
298
264
 
299
- # Step 1: Initialize chunked upload session
300
265
  init_request = rest_types.InitChunkedUploadRequest(
301
266
  content_type="application/jsonl",
302
267
  metadata=None,
303
268
  total_parts_count=total_parts,
304
269
  )
305
- response = await self._rest_client.post(
306
- "/upload/init", json=init_request.model_dump()
307
- )
270
+ response = await self._rest_client.post(INIT_CHUNKED_UPLOAD_ROUTE, json=init_request.model_dump())
308
271
  rest_error_handler(response)
309
- init_response = rest_types.InitChunkedUploadResponse.model_validate(
310
- response.json()
311
- )
272
+ init_response = rest_types.InitChunkedUploadResponse.model_validate(response.json())
312
273
  session_id = init_response.session_id
313
274
 
314
275
  try:
315
- # Step 2: Upload each part
316
276
  with open(file_path, "rb") as f:
317
277
  for part_number in range(1, total_parts + 1):
318
278
  chunk_data = f.read(chunk_size)
319
279
 
320
280
  response = await self._rest_client.post(
321
- "/upload/part",
281
+ UPLOAD_PART_ROUTE,
322
282
  params={"session_id": session_id, "part_number": part_number},
323
283
  content=chunk_data,
324
284
  headers={"Content-Type": "application/octet-stream"},
@@ -331,33 +291,37 @@ class AsyncDatasets(AsyncAPIResource, UseCaseResource): # type: ignore[misc]
331
291
  key=dataset_key,
332
292
  uploadSessionId=session_id,
333
293
  )
294
+ create_dataset_result = (
295
+ await self._gql_client.create_dataset_from_multipart_upload(input=input)
296
+ ).create_dataset_from_multipart_upload
297
+
298
+ upload_done = False
299
+ while not upload_done:
300
+ check_progress_result = (
301
+ await self._gql_client.dataset_upload_processing_status(
302
+ input=DatasetUploadProcessingStatusInput(
303
+ useCase=self.use_case_key(use_case), datasetId=create_dataset_result.dataset_id
304
+ )
305
+ )
306
+ ).dataset_upload_processing_status
307
+ if check_progress_result.status == SessionStatus.DONE:
308
+ upload_done = True
309
+ elif check_progress_result.status == SessionStatus.ERROR:
310
+ raise Exception(f"Upload failed: {check_progress_result.error}")
311
+ else:
312
+ time.sleep(2)
334
313
 
335
- mutation_field = Mutation.create_dataset_from_multipart_upload(input=input)
336
- mutation_field.fields(
337
- DatasetUploadProcessingStatusFields.dataset_id,
338
- DatasetUploadProcessingStatusFields.total_parts,
339
- DatasetUploadProcessingStatusFields.processed_parts,
340
- DatasetUploadProcessingStatusFields.progress,
341
- DatasetUploadProcessingStatusFields.error,
342
- DatasetUploadProcessingStatusFields.status,
343
- )
344
- result = await self._gql_client.mutation(
345
- mutation_field, operation_name="CreateDatasetFromMultipartUpload"
346
- )
347
- return LoadDatasetCreateDataset.model_validate(
348
- result["createDatasetFromMultipartUpload"]
349
- )
314
+ dataset_data = await self.get(create_dataset_result.dataset_id, use_case=self.use_case_key(use_case))
315
+ assert dataset_data is not None
350
316
 
351
- except Exception as e:
352
- # Abort the upload session on error
317
+ return dataset_data
318
+
319
+ except Exception:
353
320
  try:
354
- abort_request = rest_types.AbortChunkedUploadRequest(
355
- session_id=session_id
356
- )
357
- # delete with body works in runtime but not in type checking
358
- await self._rest_client.delete( # type: ignore[call-arg]
359
- "/upload/abort",
360
- content=json.dumps(abort_request.model_dump()),
321
+ abort_request = rest_types.AbortChunkedUploadRequest(session_id=session_id)
322
+ _ = await self._rest_client.delete(
323
+ ABORT_CHUNKED_UPLOAD_ROUTE,
324
+ content=json.dumps(abort_request.model_dump()), # type: ignore[call-arg]
361
325
  headers={"Content-Type": "application/json"},
362
326
  )
363
327
  except Exception:
@@ -381,15 +345,11 @@ class AsyncDatasets(AsyncAPIResource, UseCaseResource): # type: ignore[misc]
381
345
  Args:
382
346
  key: Dataset key.
383
347
  """
384
- result = await self._gql_client.describe_dataset(
385
- key, self.use_case_key(use_case)
386
- )
348
+ result = await self._gql_client.describe_dataset(key, self.use_case_key(use_case))
387
349
  return result.dataset
388
350
 
389
351
  async def delete(self, key: str, use_case: str | None = None) -> bool:
390
352
  """Delete dataset."""
391
353
  return (
392
- await self._gql_client.delete_dataset(
393
- id_or_key=key, use_case=self.use_case_key(use_case)
394
- )
354
+ await self._gql_client.delete_dataset(id_or_key=key, use_case=self.use_case_key(use_case))
395
355
  ).delete_dataset
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: adaptive-sdk
3
- Version: 0.1.8
3
+ Version: 0.1.9
4
4
  Summary: Python SDK for Adaptive Engine
5
5
  Author-email: Vincent Debergue <vincent@adaptive-ml.com>, Joao Moura <joao@adaptive-ml.com>, Yacine Bouraoui <yacine@adaptive-ml.com>
6
6
  Requires-Python: >=3.10
@@ -13,22 +13,23 @@ adaptive_sdk/external/reward_client.py,sha256=TSVdi437McUkMModDQV6ZswduaDmmG1Bvk
13
13
  adaptive_sdk/external/reward_server.py,sha256=yT8vqAEIoaq8nFZYaku5IoK0_7fX9uX_dfF1CxcDKnA,7488
14
14
  adaptive_sdk/external/reward_types.py,sha256=aZmTolT0OjgObo-64zJkarUKOY4RdxHnsQt8AKAaq1w,1710
15
15
  adaptive_sdk/external/websocket_utils.py,sha256=QN-K1IRbqe1LdQTz6vXhVgn-R2SjOB80NjGD2w_LaAo,1284
16
- adaptive_sdk/graphql_client/__init__.py,sha256=mlVK5fr6Sazx2vMUsX73SU1cd2-kAFgg1fKCiliYirk,20995
16
+ adaptive_sdk/graphql_client/__init__.py,sha256=TjBs6TpZW1A4RHbwi_PPRGTweEKlaT9RntGYAsycZbE,21479
17
17
  adaptive_sdk/graphql_client/add_external_model.py,sha256=9VpQHlJMm5t_Ja_SX2MkYcSG1IQpem6mrdXMfNTa1oo,352
18
18
  adaptive_sdk/graphql_client/add_hf_model.py,sha256=aC2IcYftepe28Hi01Kg0w3A7bjKrStWasx6aXiqghJU,312
19
19
  adaptive_sdk/graphql_client/add_model.py,sha256=Uyhpxkziz1Pv2S7Q0wOaKGK4krjmEf2PeLK0yEs89Hs,461
20
20
  adaptive_sdk/graphql_client/add_remote_env.py,sha256=X94F94tnMzuX9TC_Ieowngw35gJxnKL2YST2sqEJ7CY,328
21
21
  adaptive_sdk/graphql_client/async_base_client_open_telemetry.py,sha256=XSRJGEcyfWnFjrDz4Un6xfQWOxr7jWto1vYjbYTQeo0,19761
22
- adaptive_sdk/graphql_client/async_client.py,sha256=LvX0-wOVVh7lhyQUzbY9jbeEuyjir8RNharyLXpXmi4,115656
22
+ adaptive_sdk/graphql_client/async_client.py,sha256=nS2veg60CXt-H41Go5OBlU2_ORbr-IFq17abJJmTJTA,117553
23
23
  adaptive_sdk/graphql_client/attach_model_to_use_case.py,sha256=WyERM4wxKrUS1u9VZ3FUasZu5AVSbRswzy9jmxssTFM,361
24
24
  adaptive_sdk/graphql_client/base_client_open_telemetry.py,sha256=IV96gRr5FuH-dWMU5PBvQhTwEsV7udsXU-Dyh-Mx-4k,9398
25
25
  adaptive_sdk/graphql_client/base_model.py,sha256=2xxKA4sIdlGPIezw06XP9bseSxBURU7nivgt_gL44iA,621
26
26
  adaptive_sdk/graphql_client/base_operation.py,sha256=aooq1M4r79cvMoganZ2RvQ3-v0io22fGLOVfl3UBqPQ,4635
27
27
  adaptive_sdk/graphql_client/cancel_ab_campaign.py,sha256=JAEpmedz0jOu90U3KR0PYCMAhf2_E6h6WOT30HSE91k,176
28
28
  adaptive_sdk/graphql_client/cancel_job.py,sha256=3ZLoqrULi1mp5A5i4rD-gXliKhD8038IPfvCNBg0dPs,291
29
- adaptive_sdk/graphql_client/client.py,sha256=P9BgiDKnfU5GOTAQ0MoZpnqLWPzxWnvfvlIzA8OKVV4,114747
29
+ adaptive_sdk/graphql_client/client.py,sha256=QrXhdCjekvHaENH6Vrc9KQ7xItVywC4Z8XodFNnwgCk,116620
30
30
  adaptive_sdk/graphql_client/create_ab_campaign.py,sha256=___2iFSGnWKkjXz_MfxKUxi1EoQcSBv9AX8S7YoUeVw,374
31
31
  adaptive_sdk/graphql_client/create_custom_recipe.py,sha256=eIVLDHbdFG2qWjoZBLC3Xs40Fjy6L-u6wrABV5ibUxo,382
32
+ adaptive_sdk/graphql_client/create_dataset_from_multipart_upload.py,sha256=eoqOfEviCFww5ElTknknV0qOpFTJQmQ1BeGLsRQ5iHc,730
32
33
  adaptive_sdk/graphql_client/create_grader.py,sha256=H6xdf7Zn7JkOehINWMkULH3ObASrtvPLKaX-BjW-3zw,321
33
34
  adaptive_sdk/graphql_client/create_job.py,sha256=nXthwdViHMxsTOFoHbY1Uy2m_7kxE1vp_c8XhNyLaRA,291
34
35
  adaptive_sdk/graphql_client/create_judge.py,sha256=ZusgRcEdABVH9PNS4f2MYzEK4J7ocz9PWJgQhz9FvHo,311
@@ -40,8 +41,9 @@ adaptive_sdk/graphql_client/create_use_case.py,sha256=sekD76jWCo3zNCfMsBGhVYfNSI
40
41
  adaptive_sdk/graphql_client/create_user.py,sha256=gurD0kZgncXt1HBr7Oo5AkK5ubqFKpJvaR1rn506gHo,301
41
42
  adaptive_sdk/graphql_client/custom_fields.py,sha256=T5QG7zjX6URKy1lLkmbfjCNCkTU3pbqHn_3MzuYE3AA,98874
42
43
  adaptive_sdk/graphql_client/custom_mutations.py,sha256=meo96-odIhk_NoFL_uDj78xguLOD-_DIYW2bj6ilvII,25341
43
- adaptive_sdk/graphql_client/custom_queries.py,sha256=QsFX6ovy1W9nUuCD0-wo9GhiJACl9yQGCifHnKIs6Bo,17359
44
+ adaptive_sdk/graphql_client/custom_queries.py,sha256=PffzVJE0UO9OE-LulWFRmv7iuT7gmO7KtA3Lwtd_fUY,17755
44
45
  adaptive_sdk/graphql_client/custom_typing_fields.py,sha256=aS7Oj2z-oLcIqMZrVNNM9wVxXQK6rYSTF1GaXAnFVr4,20608
46
+ adaptive_sdk/graphql_client/dataset_upload_processing_status.py,sha256=Xwj9bxmRf0RVxMT5kf30yX0vQaCYEuTI5-alCiqedqI,705
45
47
  adaptive_sdk/graphql_client/delete_custom_recipe.py,sha256=tsWGqherJyVe5OBFG8QCG5GUdlX8s-nd8MSiMAXfSqk,183
46
48
  adaptive_sdk/graphql_client/delete_dataset.py,sha256=k5enUd6zO89RmA349eVzYajtZigWjOdDdPTcUzTFahI,167
47
49
  adaptive_sdk/graphql_client/delete_grader.py,sha256=U9r26BtvOaThzyf0VGouvkuEaJ1wJGPGjbHluSDWBsc,350
@@ -57,7 +59,7 @@ adaptive_sdk/graphql_client/describe_metric_admin.py,sha256=_SKKwnFhZnbOTT97elEr
57
59
  adaptive_sdk/graphql_client/describe_model.py,sha256=UnsOnAyBjNsnkJaS4q5uwkSSvInHwRqUj3XqAoO0yO4,434
58
60
  adaptive_sdk/graphql_client/describe_model_admin.py,sha256=XUt_CBSMw1HRleUEWZn2snYt2BNSux_siqrVlwtqH-w,484
59
61
  adaptive_sdk/graphql_client/describe_use_case.py,sha256=WW0QkTmdfggN8YBUNGi8ShrP_fr2jXPR6Fer6jlQxu0,353
60
- adaptive_sdk/graphql_client/enums.py,sha256=NykoTVLryXuy6pbHDRLfAN3m7EUi8B2Evbgp41Em7sQ,5168
62
+ adaptive_sdk/graphql_client/enums.py,sha256=_tBbVdGt-Bj5Whp9Tr6c-zpE_ZRdUn2BGh9AQtTiAHE,5192
61
63
  adaptive_sdk/graphql_client/exceptions.py,sha256=NiC6v-5S7aRDlvQTcHH3K5KvxWvk-c-PkIQQHkipTB8,2268
62
64
  adaptive_sdk/graphql_client/fragments.py,sha256=_F3IgC3lJN4erM5Ory1bnvnLbzjZ9Z1Xfv4jdw8dJEo,22046
63
65
  adaptive_sdk/graphql_client/get_custom_recipe.py,sha256=7qxBZGQTqpc69k-NwzgFctaHWaRz0tHl7YlVSsEad6U,383
@@ -108,7 +110,7 @@ adaptive_sdk/resources/abtests.py,sha256=9PCPjxuWwY9ec88ewnq54gkoELq5U5iaBmHhzLC
108
110
  adaptive_sdk/resources/base_resource.py,sha256=D9adWSFxiDB7chVstDuBu1jcuXkE71UQJexnWENpC4A,1497
109
111
  adaptive_sdk/resources/chat.py,sha256=k-jh_IZfKwqoWNN2P5BhAM3P_f5HLybVZp35pUadwso,11954
110
112
  adaptive_sdk/resources/compute_pools.py,sha256=4eHP8FMkZOsGPjZ-qBvda2PunA6GMyvvJTHnhAEGTo4,938
111
- adaptive_sdk/resources/datasets.py,sha256=TjYkU4eKY-Si5ffQhMkX3sIEj5Ybz1U3ksOJs4Fa6BA,14622
113
+ adaptive_sdk/resources/datasets.py,sha256=sgGP2BwhaezaGei8xXoH0aKHyZFc64ZvIllxFUKNTd8,13648
112
114
  adaptive_sdk/resources/embeddings.py,sha256=-ov_EChHU6PJJOJRtDlCo4sYyr9hwyvRjnBhub8QNFg,3922
113
115
  adaptive_sdk/resources/feedback.py,sha256=lujqwFIhxi6iovL8JWL05Kr-gkzR4QEwUXZbTx33raA,14116
114
116
  adaptive_sdk/resources/graders.py,sha256=ekQQ5fqmLZpZHeLr6iUm6m45wDevoDJdj3mG-axR-m8,29014
@@ -124,6 +126,6 @@ adaptive_sdk/resources/users.py,sha256=SoGWwdDCdhK4KjYOcAws-ZWlW7Edii7D3Vxfdu-NZ
124
126
  adaptive_sdk/rest/__init__.py,sha256=P9uhkOoc9cgUkJ5MBoV5soPgQWSkvPrTwHzPGX7i5tY,610
125
127
  adaptive_sdk/rest/base_model.py,sha256=P06TNhnqXa6JEje_B_94vAa5zqPYIVxMZAp6aZ4d80U,516
126
128
  adaptive_sdk/rest/rest_types.py,sha256=aSN7901_1goByBEl7Ka6onU2ia-_RhlmtH9suFkWkic,8859
127
- adaptive_sdk-0.1.8.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
128
- adaptive_sdk-0.1.8.dist-info/METADATA,sha256=stv3nHM_GV-XMi8mtWvzxBw9Of8_fMLFU71wmBoTB-E,1446
129
- adaptive_sdk-0.1.8.dist-info/RECORD,,
129
+ adaptive_sdk-0.1.9.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
130
+ adaptive_sdk-0.1.9.dist-info/METADATA,sha256=HUuoKVBWa8CR3moV2aHj0xWkyefEEyMy5EEyHpzD81c,1446
131
+ adaptive_sdk-0.1.9.dist-info/RECORD,,