eval-studio-client 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +0 -43
- eval_studio_client/api/api/__init__.py +0 -5
- eval_studio_client/api/api/perturbator_service_api.py +1 -268
- eval_studio_client/api/api/test_service_api.py +0 -568
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +0 -1
- eval_studio_client/api/docs/PerturbatorServiceApi.md +3 -33
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +1 -2
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +0 -1
- eval_studio_client/api/docs/TestServiceApi.md +0 -140
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +0 -1
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +0 -1
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +0 -1
- eval_studio_client/api/docs/V1ImportEvaluationRequest.md +0 -1
- eval_studio_client/api/docs/V1TestCase.md +0 -1
- eval_studio_client/api/models/__init__.py +0 -38
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +2 -8
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +3 -5
- eval_studio_client/api/models/required_the_test_case_to_update.py +2 -6
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +2 -4
- eval_studio_client/api/models/test_service_perturb_test_request.py +2 -4
- eval_studio_client/api/models/v1_create_evaluation_request.py +2 -7
- eval_studio_client/api/models/v1_import_evaluation_request.py +2 -7
- eval_studio_client/api/models/v1_test_case.py +2 -6
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +2 -20
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +1 -4
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +1 -4
- eval_studio_client/api/test/test_test_service_api.py +0 -12
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +1 -4
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +1 -4
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +1 -4
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -20
- eval_studio_client/api/test/test_v1_create_test_case_response.py +1 -4
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +1 -4
- eval_studio_client/api/test/test_v1_evaluation_test.py +1 -4
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +1 -4
- eval_studio_client/api/test/test_v1_get_test_case_response.py +1 -4
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +1 -16
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +1 -4
- eval_studio_client/api/test/test_v1_test_case.py +1 -4
- eval_studio_client/api/test/test_v1_update_test_case_response.py +1 -4
- eval_studio_client/client.py +11 -9
- eval_studio_client/dashboards.py +0 -29
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +32 -1903
- eval_studio_client/leaderboards.py +0 -123
- eval_studio_client/models.py +42 -3
- eval_studio_client/test_labs.py +21 -49
- eval_studio_client/tests.py +1 -188
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.0.3.dist-info}/METADATA +3 -2
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.0.3.dist-info}/RECORD +50 -179
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.0.3.dist-info}/WHEEL +1 -1
- eval_studio_client/api/api/human_calibration_service_api.py +0 -304
- eval_studio_client/api/api/prompt_library_service_api.py +0 -669
- eval_studio_client/api/api/workflow_edge_service_api.py +0 -296
- eval_studio_client/api/api/workflow_node_service_api.py +0 -1634
- eval_studio_client/api/api/workflow_service_api.py +0 -1609
- eval_studio_client/api/docs/HumanCalibrationServiceApi.md +0 -77
- eval_studio_client/api/docs/PromptLibraryServiceApi.md +0 -155
- eval_studio_client/api/docs/ProtobufNullValue.md +0 -12
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +0 -44
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +0 -44
- eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +0 -32
- eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +0 -35
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +0 -29
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +0 -29
- eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +0 -29
- eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +0 -29
- eval_studio_client/api/docs/V1CreateWorkflowResponse.md +0 -29
- eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +0 -29
- eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +0 -29
- eval_studio_client/api/docs/V1EstimateThresholdRequest.md +0 -33
- eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +0 -30
- eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +0 -29
- eval_studio_client/api/docs/V1GetWorkflowResponse.md +0 -29
- eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +0 -29
- eval_studio_client/api/docs/V1ImportTestCasesRequest.md +0 -33
- eval_studio_client/api/docs/V1LabeledTestCase.md +0 -31
- eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +0 -29
- eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +0 -29
- eval_studio_client/api/docs/V1ListWorkflowsResponse.md +0 -29
- eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +0 -29
- eval_studio_client/api/docs/V1PromptLibraryItem.md +0 -42
- eval_studio_client/api/docs/V1TestSuiteEvaluates.md +0 -11
- eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +0 -29
- eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +0 -29
- eval_studio_client/api/docs/V1Workflow.md +0 -46
- eval_studio_client/api/docs/V1WorkflowEdge.md +0 -40
- eval_studio_client/api/docs/V1WorkflowEdgeType.md +0 -12
- eval_studio_client/api/docs/V1WorkflowNode.md +0 -46
- eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +0 -40
- eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +0 -29
- eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +0 -30
- eval_studio_client/api/docs/V1WorkflowNodeStatus.md +0 -12
- eval_studio_client/api/docs/V1WorkflowNodeType.md +0 -12
- eval_studio_client/api/docs/V1WorkflowNodeView.md +0 -12
- eval_studio_client/api/docs/V1WorkflowType.md +0 -12
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +0 -76
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +0 -423
- eval_studio_client/api/docs/WorkflowServiceApi.md +0 -417
- eval_studio_client/api/models/protobuf_null_value.py +0 -36
- eval_studio_client/api/models/required_the_updated_workflow.py +0 -152
- eval_studio_client/api/models/required_the_updated_workflow_node.py +0 -152
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +0 -93
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +0 -99
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +0 -87
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +0 -95
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +0 -95
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +0 -95
- eval_studio_client/api/models/v1_create_workflow_response.py +0 -91
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +0 -91
- eval_studio_client/api/models/v1_delete_workflow_response.py +0 -91
- eval_studio_client/api/models/v1_estimate_threshold_request.py +0 -103
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +0 -89
- eval_studio_client/api/models/v1_get_workflow_node_response.py +0 -91
- eval_studio_client/api/models/v1_get_workflow_response.py +0 -91
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +0 -91
- eval_studio_client/api/models/v1_import_test_cases_request.py +0 -95
- eval_studio_client/api/models/v1_labeled_test_case.py +0 -91
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +0 -95
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +0 -95
- eval_studio_client/api/models/v1_list_workflows_response.py +0 -95
- eval_studio_client/api/models/v1_process_workflow_node_response.py +0 -91
- eval_studio_client/api/models/v1_prompt_library_item.py +0 -129
- eval_studio_client/api/models/v1_test_suite_evaluates.py +0 -39
- eval_studio_client/api/models/v1_update_workflow_node_response.py +0 -91
- eval_studio_client/api/models/v1_update_workflow_response.py +0 -91
- eval_studio_client/api/models/v1_workflow.py +0 -156
- eval_studio_client/api/models/v1_workflow_edge.py +0 -123
- eval_studio_client/api/models/v1_workflow_edge_type.py +0 -37
- eval_studio_client/api/models/v1_workflow_node.py +0 -156
- eval_studio_client/api/models/v1_workflow_node_artifact.py +0 -122
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +0 -97
- eval_studio_client/api/models/v1_workflow_node_attributes.py +0 -87
- eval_studio_client/api/models/v1_workflow_node_status.py +0 -40
- eval_studio_client/api/models/v1_workflow_node_type.py +0 -41
- eval_studio_client/api/models/v1_workflow_node_view.py +0 -38
- eval_studio_client/api/models/v1_workflow_type.py +0 -37
- eval_studio_client/api/test/test_human_calibration_service_api.py +0 -38
- eval_studio_client/api/test/test_prompt_library_service_api.py +0 -43
- eval_studio_client/api/test/test_protobuf_null_value.py +0 -33
- eval_studio_client/api/test/test_required_the_updated_workflow.py +0 -88
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +0 -80
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +0 -56
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +0 -63
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +0 -53
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +0 -92
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +0 -64
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +0 -84
- eval_studio_client/api/test/test_v1_create_workflow_response.py +0 -90
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +0 -82
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +0 -90
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +0 -60
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +0 -56
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +0 -82
- eval_studio_client/api/test/test_v1_get_workflow_response.py +0 -90
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +0 -71
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +0 -57
- eval_studio_client/api/test/test_v1_labeled_test_case.py +0 -53
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +0 -71
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +0 -71
- eval_studio_client/api/test/test_v1_list_workflows_response.py +0 -92
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +0 -71
- eval_studio_client/api/test/test_v1_prompt_library_item.py +0 -68
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +0 -33
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +0 -82
- eval_studio_client/api/test/test_v1_update_workflow_response.py +0 -90
- eval_studio_client/api/test/test_v1_workflow.py +0 -89
- eval_studio_client/api/test/test_v1_workflow_edge.py +0 -61
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +0 -33
- eval_studio_client/api/test/test_v1_workflow_node.py +0 -81
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +0 -61
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +0 -64
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +0 -51
- eval_studio_client/api/test/test_v1_workflow_node_status.py +0 -33
- eval_studio_client/api/test/test_v1_workflow_node_type.py +0 -33
- eval_studio_client/api/test/test_v1_workflow_node_view.py +0 -33
- eval_studio_client/api/test/test_v1_workflow_type.py +0 -33
- eval_studio_client/api/test/test_workflow_edge_service_api.py +0 -38
- eval_studio_client/api/test/test_workflow_node_service_api.py +0 -73
- eval_studio_client/api/test/test_workflow_service_api.py +0 -73
|
@@ -16,7 +16,6 @@ Name | Type | Description | Notes
|
|
|
16
16
|
**answer** | **str** | Expected answer text. Model output. | [optional]
|
|
17
17
|
**constraints** | **List[str]** | Constraints on the model output. | [optional]
|
|
18
18
|
**condition** | **str** | Optional. Test case output condition, in a form of AIP-160 compliant filter expression. | [optional]
|
|
19
|
-
**perturbed_by** | **List[str]** | Output only. The list of perturbators applied to this test case. | [optional] [readonly]
|
|
20
19
|
|
|
21
20
|
## Example
|
|
22
21
|
|
|
@@ -11,9 +11,7 @@ Method | HTTP request | Description
|
|
|
11
11
|
[**test_service_delete_test**](TestServiceApi.md#test_service_delete_test) | **DELETE** /v1/{name_6} |
|
|
12
12
|
[**test_service_generate_test_cases**](TestServiceApi.md#test_service_generate_test_cases) | **POST** /v1/{name}:generateTestCases |
|
|
13
13
|
[**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{name_9} |
|
|
14
|
-
[**test_service_import_test_cases_from_library**](TestServiceApi.md#test_service_import_test_cases_from_library) | **POST** /v1/{name}:importTestCasesFromLibrary |
|
|
15
14
|
[**test_service_list_most_recent_tests**](TestServiceApi.md#test_service_list_most_recent_tests) | **GET** /v1/tests:mostRecent |
|
|
16
|
-
[**test_service_list_test_case_library_items**](TestServiceApi.md#test_service_list_test_case_library_items) | **POST** /v1/{name}:listTestCaseLibraryItems |
|
|
17
15
|
[**test_service_list_tests**](TestServiceApi.md#test_service_list_tests) | **GET** /v1/tests |
|
|
18
16
|
[**test_service_perturb_test**](TestServiceApi.md#test_service_perturb_test) | **POST** /v1/{name}:perturb |
|
|
19
17
|
[**test_service_update_test**](TestServiceApi.md#test_service_update_test) | **PATCH** /v1/{test.name} |
|
|
@@ -489,75 +487,6 @@ No authorization required
|
|
|
489
487
|
|
|
490
488
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
491
489
|
|
|
492
|
-
# **test_service_import_test_cases_from_library**
|
|
493
|
-
> V1ImportTestCasesFromLibraryResponse test_service_import_test_cases_from_library(name, body)
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
### Example
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
```python
|
|
501
|
-
import eval_studio_client.api
|
|
502
|
-
from eval_studio_client.api.models.test_service_import_test_cases_from_library_request import TestServiceImportTestCasesFromLibraryRequest
|
|
503
|
-
from eval_studio_client.api.models.v1_import_test_cases_from_library_response import V1ImportTestCasesFromLibraryResponse
|
|
504
|
-
from eval_studio_client.api.rest import ApiException
|
|
505
|
-
from pprint import pprint
|
|
506
|
-
|
|
507
|
-
# Defining the host is optional and defaults to http://localhost
|
|
508
|
-
# See configuration.py for a list of all supported configuration parameters.
|
|
509
|
-
configuration = eval_studio_client.api.Configuration(
|
|
510
|
-
host = "http://localhost"
|
|
511
|
-
)
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
# Enter a context with an instance of the API client
|
|
515
|
-
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
516
|
-
# Create an instance of the API class
|
|
517
|
-
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
518
|
-
name = 'name_example' # str | Required. The Test for which to get TestCases.
|
|
519
|
-
body = eval_studio_client.api.TestServiceImportTestCasesFromLibraryRequest() # TestServiceImportTestCasesFromLibraryRequest |
|
|
520
|
-
|
|
521
|
-
try:
|
|
522
|
-
api_response = api_instance.test_service_import_test_cases_from_library(name, body)
|
|
523
|
-
print("The response of TestServiceApi->test_service_import_test_cases_from_library:\n")
|
|
524
|
-
pprint(api_response)
|
|
525
|
-
except Exception as e:
|
|
526
|
-
print("Exception when calling TestServiceApi->test_service_import_test_cases_from_library: %s\n" % e)
|
|
527
|
-
```
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
### Parameters
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
Name | Type | Description | Notes
|
|
535
|
-
------------- | ------------- | ------------- | -------------
|
|
536
|
-
**name** | **str**| Required. The Test for which to get TestCases. |
|
|
537
|
-
**body** | [**TestServiceImportTestCasesFromLibraryRequest**](TestServiceImportTestCasesFromLibraryRequest.md)| |
|
|
538
|
-
|
|
539
|
-
### Return type
|
|
540
|
-
|
|
541
|
-
[**V1ImportTestCasesFromLibraryResponse**](V1ImportTestCasesFromLibraryResponse.md)
|
|
542
|
-
|
|
543
|
-
### Authorization
|
|
544
|
-
|
|
545
|
-
No authorization required
|
|
546
|
-
|
|
547
|
-
### HTTP request headers
|
|
548
|
-
|
|
549
|
-
- **Content-Type**: application/json
|
|
550
|
-
- **Accept**: application/json
|
|
551
|
-
|
|
552
|
-
### HTTP response details
|
|
553
|
-
|
|
554
|
-
| Status code | Description | Response headers |
|
|
555
|
-
|-------------|-------------|------------------|
|
|
556
|
-
**200** | A successful response. | - |
|
|
557
|
-
**0** | An unexpected error response. | - |
|
|
558
|
-
|
|
559
|
-
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
560
|
-
|
|
561
490
|
# **test_service_list_most_recent_tests**
|
|
562
491
|
> V1ListMostRecentTestsResponse test_service_list_most_recent_tests(limit=limit)
|
|
563
492
|
|
|
@@ -624,75 +553,6 @@ No authorization required
|
|
|
624
553
|
|
|
625
554
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
626
555
|
|
|
627
|
-
# **test_service_list_test_case_library_items**
|
|
628
|
-
> V1ListTestCaseLibraryItemsResponse test_service_list_test_case_library_items(name, body)
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
### Example
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
```python
|
|
636
|
-
import eval_studio_client.api
|
|
637
|
-
from eval_studio_client.api.models.test_service_list_test_case_library_items_request import TestServiceListTestCaseLibraryItemsRequest
|
|
638
|
-
from eval_studio_client.api.models.v1_list_test_case_library_items_response import V1ListTestCaseLibraryItemsResponse
|
|
639
|
-
from eval_studio_client.api.rest import ApiException
|
|
640
|
-
from pprint import pprint
|
|
641
|
-
|
|
642
|
-
# Defining the host is optional and defaults to http://localhost
|
|
643
|
-
# See configuration.py for a list of all supported configuration parameters.
|
|
644
|
-
configuration = eval_studio_client.api.Configuration(
|
|
645
|
-
host = "http://localhost"
|
|
646
|
-
)
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
# Enter a context with an instance of the API client
|
|
650
|
-
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
651
|
-
# Create an instance of the API class
|
|
652
|
-
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
653
|
-
name = 'name_example' # str | Required. The Test for which to list the items.
|
|
654
|
-
body = eval_studio_client.api.TestServiceListTestCaseLibraryItemsRequest() # TestServiceListTestCaseLibraryItemsRequest |
|
|
655
|
-
|
|
656
|
-
try:
|
|
657
|
-
api_response = api_instance.test_service_list_test_case_library_items(name, body)
|
|
658
|
-
print("The response of TestServiceApi->test_service_list_test_case_library_items:\n")
|
|
659
|
-
pprint(api_response)
|
|
660
|
-
except Exception as e:
|
|
661
|
-
print("Exception when calling TestServiceApi->test_service_list_test_case_library_items: %s\n" % e)
|
|
662
|
-
```
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
### Parameters
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
Name | Type | Description | Notes
|
|
670
|
-
------------- | ------------- | ------------- | -------------
|
|
671
|
-
**name** | **str**| Required. The Test for which to list the items. |
|
|
672
|
-
**body** | [**TestServiceListTestCaseLibraryItemsRequest**](TestServiceListTestCaseLibraryItemsRequest.md)| |
|
|
673
|
-
|
|
674
|
-
### Return type
|
|
675
|
-
|
|
676
|
-
[**V1ListTestCaseLibraryItemsResponse**](V1ListTestCaseLibraryItemsResponse.md)
|
|
677
|
-
|
|
678
|
-
### Authorization
|
|
679
|
-
|
|
680
|
-
No authorization required
|
|
681
|
-
|
|
682
|
-
### HTTP request headers
|
|
683
|
-
|
|
684
|
-
- **Content-Type**: application/json
|
|
685
|
-
- **Accept**: application/json
|
|
686
|
-
|
|
687
|
-
### HTTP response details
|
|
688
|
-
|
|
689
|
-
| Status code | Description | Response headers |
|
|
690
|
-
|-------------|-------------|------------------|
|
|
691
|
-
**200** | A successful response. | - |
|
|
692
|
-
**0** | An unexpected error response. | - |
|
|
693
|
-
|
|
694
|
-
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
695
|
-
|
|
696
556
|
# **test_service_list_tests**
|
|
697
557
|
> V1ListTestsResponse test_service_list_tests(order_by=order_by)
|
|
698
558
|
|
|
@@ -10,7 +10,6 @@ Name | Type | Description | Notes
|
|
|
10
10
|
**base_llm_model** | **str** | Optional. The base LLM model to use for generating the prompts. Selected automatically if not specified. | [optional]
|
|
11
11
|
**generators** | [**List[V1TestCasesGenerator]**](V1TestCasesGenerator.md) | Optional. Generators to use for generation. If not specified, all generators are selected. | [optional]
|
|
12
12
|
**h2ogpte_collection_id** | **str** | Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created. | [optional]
|
|
13
|
-
**topics** | **List[str]** | Optional. Optional. Topics to generate questions for. If not specified, use document summarization as topic generation. | [optional]
|
|
14
13
|
|
|
15
14
|
## Example
|
|
16
15
|
|
|
@@ -8,7 +8,6 @@ Name | Type | Description | Notes
|
|
|
8
8
|
**perturbator_configurations** | [**List[V1PerturbatorConfiguration]**](V1PerturbatorConfiguration.md) | Required. PerturbatorConfigurations to apply to the Test. | [optional]
|
|
9
9
|
**new_test_display_name** | **str** | Required. Name of the newly created test. | [optional]
|
|
10
10
|
**new_test_description** | **str** | Optional. Description of the newly created Test. | [optional]
|
|
11
|
-
**test_case_names** | **List[str]** | Optional. Perturbation apply only to selected testCases. | [optional]
|
|
12
11
|
|
|
13
12
|
## Example
|
|
14
13
|
|
|
@@ -14,7 +14,6 @@ Name | Type | Description | Notes
|
|
|
14
14
|
**evaluators_parameters** | **Dict[str, str]** | Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary. | [optional]
|
|
15
15
|
**model_parameters** | **str** | Optional. Parameters overrides in JSON format. | [optional]
|
|
16
16
|
**h2ogpte_collection** | **str** | The existing collection name in H2OGPTe. | [optional]
|
|
17
|
-
**default_h2ogpte_model** | [**V1Model**](V1Model.md) | | [optional]
|
|
18
17
|
|
|
19
18
|
## Example
|
|
20
19
|
|
|
@@ -10,7 +10,6 @@ Name | Type | Description | Notes
|
|
|
10
10
|
**test_lab** | **str** | Required. The JSON representation of the pre-built test-lab. | [optional]
|
|
11
11
|
**operation** | **str** | Required. Resource name of the long-running operation. | [optional]
|
|
12
12
|
**evaluators_parameters** | **Dict[str, str]** | Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary. | [optional]
|
|
13
|
-
**default_h2ogpte_model** | [**V1Model**](V1Model.md) | | [optional]
|
|
14
13
|
|
|
15
14
|
## Example
|
|
16
15
|
|
|
@@ -17,7 +17,6 @@ Name | Type | Description | Notes
|
|
|
17
17
|
**answer** | **str** | Expected answer text. Model output. | [optional]
|
|
18
18
|
**constraints** | **List[str]** | Constraints on the model output. | [optional]
|
|
19
19
|
**condition** | **str** | Optional. Test case output condition, in a form of AIP-160 compliant filter expression. | [optional]
|
|
20
|
-
**perturbed_by** | **List[str]** | Output only. The list of perturbators applied to this test case. | [optional] [readonly]
|
|
21
20
|
|
|
22
21
|
## Example
|
|
23
22
|
|
|
@@ -17,7 +17,6 @@
|
|
|
17
17
|
from eval_studio_client.api.models.perturbation_service_create_perturbation_request import PerturbationServiceCreatePerturbationRequest
|
|
18
18
|
from eval_studio_client.api.models.prompt_generation_service_auto_generate_prompts_request import PromptGenerationServiceAutoGeneratePromptsRequest
|
|
19
19
|
from eval_studio_client.api.models.protobuf_any import ProtobufAny
|
|
20
|
-
from eval_studio_client.api.models.protobuf_null_value import ProtobufNullValue
|
|
21
20
|
from eval_studio_client.api.models.required_the_dashboard_to_update import RequiredTheDashboardToUpdate
|
|
22
21
|
from eval_studio_client.api.models.required_the_document_to_update import RequiredTheDocumentToUpdate
|
|
23
22
|
from eval_studio_client.api.models.required_the_leaderboard_to_update import RequiredTheLeaderboardToUpdate
|
|
@@ -26,13 +25,9 @@ from eval_studio_client.api.models.required_the_operation_to_finalize import Req
|
|
|
26
25
|
from eval_studio_client.api.models.required_the_operation_to_update import RequiredTheOperationToUpdate
|
|
27
26
|
from eval_studio_client.api.models.required_the_test_case_to_update import RequiredTheTestCaseToUpdate
|
|
28
27
|
from eval_studio_client.api.models.required_the_test_to_update import RequiredTheTestToUpdate
|
|
29
|
-
from eval_studio_client.api.models.required_the_updated_workflow import RequiredTheUpdatedWorkflow
|
|
30
|
-
from eval_studio_client.api.models.required_the_updated_workflow_node import RequiredTheUpdatedWorkflowNode
|
|
31
28
|
from eval_studio_client.api.models.rpc_status import RpcStatus
|
|
32
29
|
from eval_studio_client.api.models.test_case_service_batch_delete_test_cases_request import TestCaseServiceBatchDeleteTestCasesRequest
|
|
33
30
|
from eval_studio_client.api.models.test_service_generate_test_cases_request import TestServiceGenerateTestCasesRequest
|
|
34
|
-
from eval_studio_client.api.models.test_service_import_test_cases_from_library_request import TestServiceImportTestCasesFromLibraryRequest
|
|
35
|
-
from eval_studio_client.api.models.test_service_list_test_case_library_items_request import TestServiceListTestCaseLibraryItemsRequest
|
|
36
31
|
from eval_studio_client.api.models.test_service_perturb_test_request import TestServicePerturbTestRequest
|
|
37
32
|
from eval_studio_client.api.models.v1_batch_create_leaderboards_request import V1BatchCreateLeaderboardsRequest
|
|
38
33
|
from eval_studio_client.api.models.v1_batch_create_leaderboards_response import V1BatchCreateLeaderboardsResponse
|
|
@@ -49,16 +44,12 @@ from eval_studio_client.api.models.v1_batch_delete_models_response import V1Batc
|
|
|
49
44
|
from eval_studio_client.api.models.v1_batch_delete_test_cases_response import V1BatchDeleteTestCasesResponse
|
|
50
45
|
from eval_studio_client.api.models.v1_batch_delete_tests_request import V1BatchDeleteTestsRequest
|
|
51
46
|
from eval_studio_client.api.models.v1_batch_delete_tests_response import V1BatchDeleteTestsResponse
|
|
52
|
-
from eval_studio_client.api.models.v1_batch_delete_workflows_request import V1BatchDeleteWorkflowsRequest
|
|
53
|
-
from eval_studio_client.api.models.v1_batch_delete_workflows_response import V1BatchDeleteWorkflowsResponse
|
|
54
47
|
from eval_studio_client.api.models.v1_batch_get_dashboards_response import V1BatchGetDashboardsResponse
|
|
55
48
|
from eval_studio_client.api.models.v1_batch_get_documents_response import V1BatchGetDocumentsResponse
|
|
56
49
|
from eval_studio_client.api.models.v1_batch_get_leaderboards_response import V1BatchGetLeaderboardsResponse
|
|
57
50
|
from eval_studio_client.api.models.v1_batch_get_models_response import V1BatchGetModelsResponse
|
|
58
51
|
from eval_studio_client.api.models.v1_batch_get_operations_response import V1BatchGetOperationsResponse
|
|
59
52
|
from eval_studio_client.api.models.v1_batch_get_tests_response import V1BatchGetTestsResponse
|
|
60
|
-
from eval_studio_client.api.models.v1_batch_get_workflow_edges_response import V1BatchGetWorkflowEdgesResponse
|
|
61
|
-
from eval_studio_client.api.models.v1_batch_get_workflow_nodes_response import V1BatchGetWorkflowNodesResponse
|
|
62
53
|
from eval_studio_client.api.models.v1_batch_import_leaderboard_request import V1BatchImportLeaderboardRequest
|
|
63
54
|
from eval_studio_client.api.models.v1_batch_import_leaderboard_response import V1BatchImportLeaderboardResponse
|
|
64
55
|
from eval_studio_client.api.models.v1_batch_import_tests_request import V1BatchImportTestsRequest
|
|
@@ -77,7 +68,6 @@ from eval_studio_client.api.models.v1_create_perturbation_response import V1Crea
|
|
|
77
68
|
from eval_studio_client.api.models.v1_create_test_case_response import V1CreateTestCaseResponse
|
|
78
69
|
from eval_studio_client.api.models.v1_create_test_lab_response import V1CreateTestLabResponse
|
|
79
70
|
from eval_studio_client.api.models.v1_create_test_response import V1CreateTestResponse
|
|
80
|
-
from eval_studio_client.api.models.v1_create_workflow_response import V1CreateWorkflowResponse
|
|
81
71
|
from eval_studio_client.api.models.v1_dashboard import V1Dashboard
|
|
82
72
|
from eval_studio_client.api.models.v1_dashboard_status import V1DashboardStatus
|
|
83
73
|
from eval_studio_client.api.models.v1_delete_dashboard_response import V1DeleteDashboardResponse
|
|
@@ -87,10 +77,7 @@ from eval_studio_client.api.models.v1_delete_leaderboard_response import V1Delet
|
|
|
87
77
|
from eval_studio_client.api.models.v1_delete_model_response import V1DeleteModelResponse
|
|
88
78
|
from eval_studio_client.api.models.v1_delete_test_case_response import V1DeleteTestCaseResponse
|
|
89
79
|
from eval_studio_client.api.models.v1_delete_test_response import V1DeleteTestResponse
|
|
90
|
-
from eval_studio_client.api.models.v1_delete_workflow_node_response import V1DeleteWorkflowNodeResponse
|
|
91
|
-
from eval_studio_client.api.models.v1_delete_workflow_response import V1DeleteWorkflowResponse
|
|
92
80
|
from eval_studio_client.api.models.v1_document import V1Document
|
|
93
|
-
from eval_studio_client.api.models.v1_estimate_threshold_request import V1EstimateThresholdRequest
|
|
94
81
|
from eval_studio_client.api.models.v1_evaluation_test import V1EvaluationTest
|
|
95
82
|
from eval_studio_client.api.models.v1_evaluator import V1Evaluator
|
|
96
83
|
from eval_studio_client.api.models.v1_evaluator_param_type import V1EvaluatorParamType
|
|
@@ -112,17 +99,11 @@ from eval_studio_client.api.models.v1_get_perturbator_response import V1GetPertu
|
|
|
112
99
|
from eval_studio_client.api.models.v1_get_test_case_response import V1GetTestCaseResponse
|
|
113
100
|
from eval_studio_client.api.models.v1_get_test_class_response import V1GetTestClassResponse
|
|
114
101
|
from eval_studio_client.api.models.v1_get_test_response import V1GetTestResponse
|
|
115
|
-
from eval_studio_client.api.models.v1_get_workflow_node_prerequisites_response import V1GetWorkflowNodePrerequisitesResponse
|
|
116
|
-
from eval_studio_client.api.models.v1_get_workflow_node_response import V1GetWorkflowNodeResponse
|
|
117
|
-
from eval_studio_client.api.models.v1_get_workflow_response import V1GetWorkflowResponse
|
|
118
102
|
from eval_studio_client.api.models.v1_import_evaluation_request import V1ImportEvaluationRequest
|
|
119
103
|
from eval_studio_client.api.models.v1_import_leaderboard_request import V1ImportLeaderboardRequest
|
|
120
104
|
from eval_studio_client.api.models.v1_import_leaderboard_response import V1ImportLeaderboardResponse
|
|
121
|
-
from eval_studio_client.api.models.v1_import_test_cases_from_library_response import V1ImportTestCasesFromLibraryResponse
|
|
122
|
-
from eval_studio_client.api.models.v1_import_test_cases_request import V1ImportTestCasesRequest
|
|
123
105
|
from eval_studio_client.api.models.v1_info import V1Info
|
|
124
106
|
from eval_studio_client.api.models.v1_insight import V1Insight
|
|
125
|
-
from eval_studio_client.api.models.v1_labeled_test_case import V1LabeledTestCase
|
|
126
107
|
from eval_studio_client.api.models.v1_leaderboard import V1Leaderboard
|
|
127
108
|
from eval_studio_client.api.models.v1_leaderboard_status import V1LeaderboardStatus
|
|
128
109
|
from eval_studio_client.api.models.v1_leaderboard_type import V1LeaderboardType
|
|
@@ -141,13 +122,10 @@ from eval_studio_client.api.models.v1_list_most_recent_models_response import V1
|
|
|
141
122
|
from eval_studio_client.api.models.v1_list_most_recent_tests_response import V1ListMostRecentTestsResponse
|
|
142
123
|
from eval_studio_client.api.models.v1_list_operations_response import V1ListOperationsResponse
|
|
143
124
|
from eval_studio_client.api.models.v1_list_perturbators_response import V1ListPerturbatorsResponse
|
|
144
|
-
from eval_studio_client.api.models.v1_list_prompt_library_items_response import V1ListPromptLibraryItemsResponse
|
|
145
125
|
from eval_studio_client.api.models.v1_list_rag_collections_response import V1ListRAGCollectionsResponse
|
|
146
|
-
from eval_studio_client.api.models.v1_list_test_case_library_items_response import V1ListTestCaseLibraryItemsResponse
|
|
147
126
|
from eval_studio_client.api.models.v1_list_test_cases_response import V1ListTestCasesResponse
|
|
148
127
|
from eval_studio_client.api.models.v1_list_test_classes_response import V1ListTestClassesResponse
|
|
149
128
|
from eval_studio_client.api.models.v1_list_tests_response import V1ListTestsResponse
|
|
150
|
-
from eval_studio_client.api.models.v1_list_workflows_response import V1ListWorkflowsResponse
|
|
151
129
|
from eval_studio_client.api.models.v1_model import V1Model
|
|
152
130
|
from eval_studio_client.api.models.v1_model_type import V1ModelType
|
|
153
131
|
from eval_studio_client.api.models.v1_operation import V1Operation
|
|
@@ -157,8 +135,6 @@ from eval_studio_client.api.models.v1_perturbator import V1Perturbator
|
|
|
157
135
|
from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
|
|
158
136
|
from eval_studio_client.api.models.v1_perturbator_intensity import V1PerturbatorIntensity
|
|
159
137
|
from eval_studio_client.api.models.v1_problem_and_action import V1ProblemAndAction
|
|
160
|
-
from eval_studio_client.api.models.v1_process_workflow_node_response import V1ProcessWorkflowNodeResponse
|
|
161
|
-
from eval_studio_client.api.models.v1_prompt_library_item import V1PromptLibraryItem
|
|
162
138
|
from eval_studio_client.api.models.v1_test import V1Test
|
|
163
139
|
from eval_studio_client.api.models.v1_test_case import V1TestCase
|
|
164
140
|
from eval_studio_client.api.models.v1_test_case_relationship import V1TestCaseRelationship
|
|
@@ -166,7 +142,6 @@ from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGen
|
|
|
166
142
|
from eval_studio_client.api.models.v1_test_class import V1TestClass
|
|
167
143
|
from eval_studio_client.api.models.v1_test_class_type import V1TestClassType
|
|
168
144
|
from eval_studio_client.api.models.v1_test_lab import V1TestLab
|
|
169
|
-
from eval_studio_client.api.models.v1_test_suite_evaluates import V1TestSuiteEvaluates
|
|
170
145
|
from eval_studio_client.api.models.v1_update_dashboard_response import V1UpdateDashboardResponse
|
|
171
146
|
from eval_studio_client.api.models.v1_update_document_response import V1UpdateDocumentResponse
|
|
172
147
|
from eval_studio_client.api.models.v1_update_leaderboard_response import V1UpdateLeaderboardResponse
|
|
@@ -174,17 +149,4 @@ from eval_studio_client.api.models.v1_update_model_response import V1UpdateModel
|
|
|
174
149
|
from eval_studio_client.api.models.v1_update_operation_response import V1UpdateOperationResponse
|
|
175
150
|
from eval_studio_client.api.models.v1_update_test_case_response import V1UpdateTestCaseResponse
|
|
176
151
|
from eval_studio_client.api.models.v1_update_test_response import V1UpdateTestResponse
|
|
177
|
-
from eval_studio_client.api.models.v1_update_workflow_node_response import V1UpdateWorkflowNodeResponse
|
|
178
|
-
from eval_studio_client.api.models.v1_update_workflow_response import V1UpdateWorkflowResponse
|
|
179
152
|
from eval_studio_client.api.models.v1_who_am_i_response import V1WhoAmIResponse
|
|
180
|
-
from eval_studio_client.api.models.v1_workflow import V1Workflow
|
|
181
|
-
from eval_studio_client.api.models.v1_workflow_edge import V1WorkflowEdge
|
|
182
|
-
from eval_studio_client.api.models.v1_workflow_edge_type import V1WorkflowEdgeType
|
|
183
|
-
from eval_studio_client.api.models.v1_workflow_node import V1WorkflowNode
|
|
184
|
-
from eval_studio_client.api.models.v1_workflow_node_artifact import V1WorkflowNodeArtifact
|
|
185
|
-
from eval_studio_client.api.models.v1_workflow_node_artifacts import V1WorkflowNodeArtifacts
|
|
186
|
-
from eval_studio_client.api.models.v1_workflow_node_attributes import V1WorkflowNodeAttributes
|
|
187
|
-
from eval_studio_client.api.models.v1_workflow_node_status import V1WorkflowNodeStatus
|
|
188
|
-
from eval_studio_client.api.models.v1_workflow_node_type import V1WorkflowNodeType
|
|
189
|
-
from eval_studio_client.api.models.v1_workflow_node_view import V1WorkflowNodeView
|
|
190
|
-
from eval_studio_client.api.models.v1_workflow_type import V1WorkflowType
|
|
@@ -19,7 +19,6 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.v1_model import V1Model
|
|
23
22
|
from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
|
|
24
23
|
from eval_studio_client.api.models.v1_test_case import V1TestCase
|
|
25
24
|
from eval_studio_client.api.models.v1_test_case_relationship import V1TestCaseRelationship
|
|
@@ -33,8 +32,7 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
|
|
|
33
32
|
perturbator_configurations: Optional[List[V1PerturbatorConfiguration]] = Field(default=None, description="Required. PerturbatorConfiguration to apply to the parent Test.", alias="perturbatorConfigurations")
|
|
34
33
|
test_cases: Optional[List[V1TestCase]] = Field(default=None, description="Required. List of test cases to perturbate. These are the test cases from the parent test. TODO: breaks https://google.aip.dev/144", alias="testCases")
|
|
35
34
|
test_case_relationships: Optional[List[V1TestCaseRelationship]] = Field(default=None, description="Optional. List of relationships between test cases.", alias="testCaseRelationships")
|
|
36
|
-
|
|
37
|
-
__properties: ClassVar[List[str]] = ["perturbatorConfigurations", "testCases", "testCaseRelationships", "defaultH2ogpteModel"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["perturbatorConfigurations", "testCases", "testCaseRelationships"]
|
|
38
36
|
|
|
39
37
|
model_config = ConfigDict(
|
|
40
38
|
populate_by_name=True,
|
|
@@ -96,9 +94,6 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
|
|
|
96
94
|
if _item:
|
|
97
95
|
_items.append(_item.to_dict())
|
|
98
96
|
_dict['testCaseRelationships'] = _items
|
|
99
|
-
# override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
|
|
100
|
-
if self.default_h2ogpte_model:
|
|
101
|
-
_dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
|
|
102
97
|
return _dict
|
|
103
98
|
|
|
104
99
|
@classmethod
|
|
@@ -113,8 +108,7 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
|
|
|
113
108
|
_obj = cls.model_validate({
|
|
114
109
|
"perturbatorConfigurations": [V1PerturbatorConfiguration.from_dict(_item) for _item in obj["perturbatorConfigurations"]] if obj.get("perturbatorConfigurations") is not None else None,
|
|
115
110
|
"testCases": [V1TestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None,
|
|
116
|
-
"testCaseRelationships": [V1TestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None
|
|
117
|
-
"defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None
|
|
111
|
+
"testCaseRelationships": [V1TestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None
|
|
118
112
|
})
|
|
119
113
|
return _obj
|
|
120
114
|
|
|
@@ -33,10 +33,9 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
33
33
|
count: Optional[StrictInt] = Field(default=None, description="Required. The number of TestCases to generate.")
|
|
34
34
|
base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generating the prompts.", alias="baseLlmModel")
|
|
35
35
|
document_urls: Optional[List[StrictStr]] = Field(default=None, description="Optional. The list of document URLs. The document URL might be a managed document URL or a public URL.", alias="documentUrls")
|
|
36
|
-
generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional.
|
|
36
|
+
generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Topics to generate TestCases for. If not specified, all topics are selected.")
|
|
37
37
|
h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
|
|
38
|
-
|
|
39
|
-
__properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "generators", "h2ogpteCollectionId", "topics"]
|
|
38
|
+
__properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "generators", "h2ogpteCollectionId"]
|
|
40
39
|
|
|
41
40
|
model_config = ConfigDict(
|
|
42
41
|
populate_by_name=True,
|
|
@@ -98,8 +97,7 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
98
97
|
"baseLlmModel": obj.get("baseLlmModel"),
|
|
99
98
|
"documentUrls": obj.get("documentUrls"),
|
|
100
99
|
"generators": obj.get("generators"),
|
|
101
|
-
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
|
|
102
|
-
"topics": obj.get("topics")
|
|
100
|
+
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
|
|
103
101
|
})
|
|
104
102
|
return _obj
|
|
105
103
|
|
|
@@ -38,8 +38,7 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
38
38
|
answer: Optional[StrictStr] = Field(default=None, description="Expected answer text. Model output.")
|
|
39
39
|
constraints: Optional[List[StrictStr]] = Field(default=None, description="Constraints on the model output.")
|
|
40
40
|
condition: Optional[StrictStr] = Field(default=None, description="Optional. Test case output condition, in a form of AIP-160 compliant filter expression.")
|
|
41
|
-
|
|
42
|
-
__properties: ClassVar[List[str]] = ["createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "parent", "prompt", "answer", "constraints", "condition", "perturbedBy"]
|
|
41
|
+
__properties: ClassVar[List[str]] = ["createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "parent", "prompt", "answer", "constraints", "condition"]
|
|
43
42
|
|
|
44
43
|
model_config = ConfigDict(
|
|
45
44
|
populate_by_name=True,
|
|
@@ -77,7 +76,6 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
77
76
|
* OpenAPI `readOnly` fields are excluded.
|
|
78
77
|
* OpenAPI `readOnly` fields are excluded.
|
|
79
78
|
* OpenAPI `readOnly` fields are excluded.
|
|
80
|
-
* OpenAPI `readOnly` fields are excluded.
|
|
81
79
|
"""
|
|
82
80
|
excluded_fields: Set[str] = set([
|
|
83
81
|
"create_time",
|
|
@@ -86,7 +84,6 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
86
84
|
"updater",
|
|
87
85
|
"delete_time",
|
|
88
86
|
"deleter",
|
|
89
|
-
"perturbed_by",
|
|
90
87
|
])
|
|
91
88
|
|
|
92
89
|
_dict = self.model_dump(
|
|
@@ -116,8 +113,7 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
116
113
|
"prompt": obj.get("prompt"),
|
|
117
114
|
"answer": obj.get("answer"),
|
|
118
115
|
"constraints": obj.get("constraints"),
|
|
119
|
-
"condition": obj.get("condition")
|
|
120
|
-
"perturbedBy": obj.get("perturbedBy")
|
|
116
|
+
"condition": obj.get("condition")
|
|
121
117
|
})
|
|
122
118
|
return _obj
|
|
123
119
|
|
|
@@ -32,8 +32,7 @@ class TestServiceGenerateTestCasesRequest(BaseModel):
|
|
|
32
32
|
base_llm_model: Optional[StrictStr] = Field(default=None, description="Optional. The base LLM model to use for generating the prompts. Selected automatically if not specified.", alias="baseLlmModel")
|
|
33
33
|
generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Generators to use for generation. If not specified, all generators are selected.")
|
|
34
34
|
h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
|
|
35
|
-
|
|
36
|
-
__properties: ClassVar[List[str]] = ["count", "model", "baseLlmModel", "generators", "h2ogpteCollectionId", "topics"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["count", "model", "baseLlmModel", "generators", "h2ogpteCollectionId"]
|
|
37
36
|
|
|
38
37
|
model_config = ConfigDict(
|
|
39
38
|
populate_by_name=True,
|
|
@@ -90,8 +89,7 @@ class TestServiceGenerateTestCasesRequest(BaseModel):
|
|
|
90
89
|
"model": obj.get("model"),
|
|
91
90
|
"baseLlmModel": obj.get("baseLlmModel"),
|
|
92
91
|
"generators": obj.get("generators"),
|
|
93
|
-
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
|
|
94
|
-
"topics": obj.get("topics")
|
|
92
|
+
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
|
|
95
93
|
})
|
|
96
94
|
return _obj
|
|
97
95
|
|
|
@@ -30,8 +30,7 @@ class TestServicePerturbTestRequest(BaseModel):
|
|
|
30
30
|
perturbator_configurations: Optional[List[V1PerturbatorConfiguration]] = Field(default=None, description="Required. PerturbatorConfigurations to apply to the Test.", alias="perturbatorConfigurations")
|
|
31
31
|
new_test_display_name: Optional[StrictStr] = Field(default=None, description="Required. Name of the newly created test.", alias="newTestDisplayName")
|
|
32
32
|
new_test_description: Optional[StrictStr] = Field(default=None, description="Optional. Description of the newly created Test.", alias="newTestDescription")
|
|
33
|
-
|
|
34
|
-
__properties: ClassVar[List[str]] = ["perturbatorConfigurations", "newTestDisplayName", "newTestDescription", "testCaseNames"]
|
|
33
|
+
__properties: ClassVar[List[str]] = ["perturbatorConfigurations", "newTestDisplayName", "newTestDescription"]
|
|
35
34
|
|
|
36
35
|
model_config = ConfigDict(
|
|
37
36
|
populate_by_name=True,
|
|
@@ -93,8 +92,7 @@ class TestServicePerturbTestRequest(BaseModel):
|
|
|
93
92
|
_obj = cls.model_validate({
|
|
94
93
|
"perturbatorConfigurations": [V1PerturbatorConfiguration.from_dict(_item) for _item in obj["perturbatorConfigurations"]] if obj.get("perturbatorConfigurations") is not None else None,
|
|
95
94
|
"newTestDisplayName": obj.get("newTestDisplayName"),
|
|
96
|
-
"newTestDescription": obj.get("newTestDescription")
|
|
97
|
-
"testCaseNames": obj.get("testCaseNames")
|
|
95
|
+
"newTestDescription": obj.get("newTestDescription")
|
|
98
96
|
})
|
|
99
97
|
return _obj
|
|
100
98
|
|
|
@@ -37,8 +37,7 @@ class V1CreateEvaluationRequest(BaseModel):
|
|
|
37
37
|
evaluators_parameters: Optional[Dict[str, StrictStr]] = Field(default=None, description="Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary.", alias="evaluatorsParameters")
|
|
38
38
|
model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Parameters overrides in JSON format.", alias="modelParameters")
|
|
39
39
|
h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="The existing collection name in H2OGPTe.", alias="h2ogpteCollection")
|
|
40
|
-
|
|
41
|
-
__properties: ClassVar[List[str]] = ["evaluatorIdentifiers", "model", "evaluationTests", "operation", "llmModels", "useCache", "evaluatorsParameters", "modelParameters", "h2ogpteCollection", "defaultH2ogpteModel"]
|
|
40
|
+
__properties: ClassVar[List[str]] = ["evaluatorIdentifiers", "model", "evaluationTests", "operation", "llmModels", "useCache", "evaluatorsParameters", "modelParameters", "h2ogpteCollection"]
|
|
42
41
|
|
|
43
42
|
model_config = ConfigDict(
|
|
44
43
|
populate_by_name=True,
|
|
@@ -89,9 +88,6 @@ class V1CreateEvaluationRequest(BaseModel):
|
|
|
89
88
|
if _item:
|
|
90
89
|
_items.append(_item.to_dict())
|
|
91
90
|
_dict['evaluationTests'] = _items
|
|
92
|
-
# override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
|
|
93
|
-
if self.default_h2ogpte_model:
|
|
94
|
-
_dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
|
|
95
91
|
return _dict
|
|
96
92
|
|
|
97
93
|
@classmethod
|
|
@@ -112,8 +108,7 @@ class V1CreateEvaluationRequest(BaseModel):
|
|
|
112
108
|
"useCache": obj.get("useCache"),
|
|
113
109
|
"evaluatorsParameters": obj.get("evaluatorsParameters"),
|
|
114
110
|
"modelParameters": obj.get("modelParameters"),
|
|
115
|
-
"h2ogpteCollection": obj.get("h2ogpteCollection")
|
|
116
|
-
"defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None
|
|
111
|
+
"h2ogpteCollection": obj.get("h2ogpteCollection")
|
|
117
112
|
})
|
|
118
113
|
return _obj
|
|
119
114
|
|
|
@@ -32,8 +32,7 @@ class V1ImportEvaluationRequest(BaseModel):
|
|
|
32
32
|
test_lab: Optional[StrictStr] = Field(default=None, description="Required. The JSON representation of the pre-built test-lab.", alias="testLab")
|
|
33
33
|
operation: Optional[StrictStr] = Field(default=None, description="Required. Resource name of the long-running operation.")
|
|
34
34
|
evaluators_parameters: Optional[Dict[str, StrictStr]] = Field(default=None, description="Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary.", alias="evaluatorsParameters")
|
|
35
|
-
|
|
36
|
-
__properties: ClassVar[List[str]] = ["evaluatorIdentifiers", "model", "testLab", "operation", "evaluatorsParameters", "defaultH2ogpteModel"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["evaluatorIdentifiers", "model", "testLab", "operation", "evaluatorsParameters"]
|
|
37
36
|
|
|
38
37
|
model_config = ConfigDict(
|
|
39
38
|
populate_by_name=True,
|
|
@@ -77,9 +76,6 @@ class V1ImportEvaluationRequest(BaseModel):
|
|
|
77
76
|
# override the default output from pydantic by calling `to_dict()` of model
|
|
78
77
|
if self.model:
|
|
79
78
|
_dict['model'] = self.model.to_dict()
|
|
80
|
-
# override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
|
|
81
|
-
if self.default_h2ogpte_model:
|
|
82
|
-
_dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
|
|
83
79
|
return _dict
|
|
84
80
|
|
|
85
81
|
@classmethod
|
|
@@ -96,8 +92,7 @@ class V1ImportEvaluationRequest(BaseModel):
|
|
|
96
92
|
"model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
|
|
97
93
|
"testLab": obj.get("testLab"),
|
|
98
94
|
"operation": obj.get("operation"),
|
|
99
|
-
"evaluatorsParameters": obj.get("evaluatorsParameters")
|
|
100
|
-
"defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None
|
|
95
|
+
"evaluatorsParameters": obj.get("evaluatorsParameters")
|
|
101
96
|
})
|
|
102
97
|
return _obj
|
|
103
98
|
|
|
@@ -39,8 +39,7 @@ class V1TestCase(BaseModel):
|
|
|
39
39
|
answer: Optional[StrictStr] = Field(default=None, description="Expected answer text. Model output.")
|
|
40
40
|
constraints: Optional[List[StrictStr]] = Field(default=None, description="Constraints on the model output.")
|
|
41
41
|
condition: Optional[StrictStr] = Field(default=None, description="Optional. Test case output condition, in a form of AIP-160 compliant filter expression.")
|
|
42
|
-
|
|
43
|
-
__properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "parent", "prompt", "answer", "constraints", "condition", "perturbedBy"]
|
|
42
|
+
__properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "parent", "prompt", "answer", "constraints", "condition"]
|
|
44
43
|
|
|
45
44
|
model_config = ConfigDict(
|
|
46
45
|
populate_by_name=True,
|
|
@@ -79,7 +78,6 @@ class V1TestCase(BaseModel):
|
|
|
79
78
|
* OpenAPI `readOnly` fields are excluded.
|
|
80
79
|
* OpenAPI `readOnly` fields are excluded.
|
|
81
80
|
* OpenAPI `readOnly` fields are excluded.
|
|
82
|
-
* OpenAPI `readOnly` fields are excluded.
|
|
83
81
|
"""
|
|
84
82
|
excluded_fields: Set[str] = set([
|
|
85
83
|
"name",
|
|
@@ -89,7 +87,6 @@ class V1TestCase(BaseModel):
|
|
|
89
87
|
"updater",
|
|
90
88
|
"delete_time",
|
|
91
89
|
"deleter",
|
|
92
|
-
"perturbed_by",
|
|
93
90
|
])
|
|
94
91
|
|
|
95
92
|
_dict = self.model_dump(
|
|
@@ -120,8 +117,7 @@ class V1TestCase(BaseModel):
|
|
|
120
117
|
"prompt": obj.get("prompt"),
|
|
121
118
|
"answer": obj.get("answer"),
|
|
122
119
|
"constraints": obj.get("constraints"),
|
|
123
|
-
"condition": obj.get("condition")
|
|
124
|
-
"perturbedBy": obj.get("perturbedBy")
|
|
120
|
+
"condition": obj.get("condition")
|
|
125
121
|
})
|
|
126
122
|
return _obj
|
|
127
123
|
|
|
@@ -56,32 +56,14 @@ class TestPerturbationServiceCreatePerturbationRequest(unittest.TestCase):
|
|
|
56
56
|
constraints = [
|
|
57
57
|
''
|
|
58
58
|
],
|
|
59
|
-
condition = '',
|
|
60
|
-
perturbed_by = [
|
|
61
|
-
''
|
|
62
|
-
], )
|
|
59
|
+
condition = '', )
|
|
63
60
|
],
|
|
64
61
|
test_case_relationships = [
|
|
65
62
|
eval_studio_client.api.models.v1_test_case_relationship.v1TestCaseRelationship(
|
|
66
63
|
source = '',
|
|
67
64
|
target = '',
|
|
68
65
|
type = '', )
|
|
69
|
-
]
|
|
70
|
-
default_h2ogpte_model = eval_studio_client.api.models.v1_model.v1Model(
|
|
71
|
-
name = '',
|
|
72
|
-
create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
73
|
-
creator = '',
|
|
74
|
-
update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
75
|
-
updater = '',
|
|
76
|
-
delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
77
|
-
deleter = '',
|
|
78
|
-
display_name = '',
|
|
79
|
-
description = '',
|
|
80
|
-
url = '',
|
|
81
|
-
api_key = '',
|
|
82
|
-
type = 'MODEL_TYPE_UNSPECIFIED',
|
|
83
|
-
parameters = '',
|
|
84
|
-
demo = True, )
|
|
66
|
+
]
|
|
85
67
|
)
|
|
86
68
|
else:
|
|
87
69
|
return PerturbationServiceCreatePerturbationRequest(
|
|
@@ -59,10 +59,7 @@ class TestPromptGenerationServiceAutoGeneratePromptsRequest(unittest.TestCase):
|
|
|
59
59
|
generators = [
|
|
60
60
|
'TEST_CASES_GENERATOR_UNSPECIFIED'
|
|
61
61
|
],
|
|
62
|
-
h2ogpte_collection_id = ''
|
|
63
|
-
topics = [
|
|
64
|
-
''
|
|
65
|
-
]
|
|
62
|
+
h2ogpte_collection_id = ''
|
|
66
63
|
)
|
|
67
64
|
else:
|
|
68
65
|
return PromptGenerationServiceAutoGeneratePromptsRequest(
|