@maxim_mazurok/gapi.client.language-v2 0.0.20240217 → 0.0.20240220
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +1046 -11
- package/package.json +1 -1
package/index.d.ts
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
// This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
|
|
10
10
|
// In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
|
|
11
11
|
// Generated from: https://language.googleapis.com/$discovery/rest?version=v2
|
|
12
|
-
// Revision:
|
|
12
|
+
// Revision: 20240220
|
|
13
13
|
|
|
14
14
|
/// <reference types="gapi.client" />
|
|
15
15
|
|
|
@@ -60,7 +60,17 @@ declare namespace gapi.client {
|
|
|
60
60
|
/** The encoding type used by the API to calculate offsets. */
|
|
61
61
|
encodingType?: string;
|
|
62
62
|
/** Required. The enabled features. */
|
|
63
|
-
features?:
|
|
63
|
+
features?: AnnotateTextRequestFeatures;
|
|
64
|
+
}
|
|
65
|
+
interface AnnotateTextRequestFeatures {
|
|
66
|
+
/** Optional. Classify the full document into categories. */
|
|
67
|
+
classifyText?: boolean;
|
|
68
|
+
/** Optional. Extract document-level sentiment. */
|
|
69
|
+
extractDocumentSentiment?: boolean;
|
|
70
|
+
/** Optional. Extract entities. */
|
|
71
|
+
extractEntities?: boolean;
|
|
72
|
+
/** Optional. Moderate the document for harmful and sensitive categories. */
|
|
73
|
+
moderateText?: boolean;
|
|
64
74
|
}
|
|
65
75
|
interface AnnotateTextResponse {
|
|
66
76
|
/** Categories identified in the input document. */
|
|
@@ -96,6 +106,34 @@ declare namespace gapi.client {
|
|
|
96
106
|
/** Whether the language is officially supported. The API may still return a response when the language is not supported, but it is on a best effort basis. */
|
|
97
107
|
languageSupported?: boolean;
|
|
98
108
|
}
|
|
109
|
+
interface Color {
|
|
110
|
+
/** The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). */
|
|
111
|
+
alpha?: number;
|
|
112
|
+
/** The amount of blue in the color as a value in the interval [0, 1]. */
|
|
113
|
+
blue?: number;
|
|
114
|
+
/** The amount of green in the color as a value in the interval [0, 1]. */
|
|
115
|
+
green?: number;
|
|
116
|
+
/** The amount of red in the color as a value in the interval [0, 1]. */
|
|
117
|
+
red?: number;
|
|
118
|
+
}
|
|
119
|
+
interface CpuMetric {
|
|
120
|
+
/** Required. Number of CPU cores. */
|
|
121
|
+
coreNumber?: string;
|
|
122
|
+
/** Required. Total seconds of core usage, e.g. 4. */
|
|
123
|
+
coreSec?: string;
|
|
124
|
+
/** Required. Type of cpu, e.g. N2. */
|
|
125
|
+
cpuType?: string;
|
|
126
|
+
/** Required. Machine spec, e.g. N1_STANDARD_4. */
|
|
127
|
+
machineSpec?: string;
|
|
128
|
+
/** Billing tracking labels. They do not contain any user data but only the labels set by Vertex Core Infra itself. Tracking labels' keys are defined with special format: goog-[\p{Ll}\p{N}]+ E.g. "key": "goog-k8s-cluster-name","value": "us-east1-b4rk" */
|
|
129
|
+
trackingLabels?: {[P in string]: string};
|
|
130
|
+
}
|
|
131
|
+
interface DiskMetric {
|
|
132
|
+
/** Required. Type of Disk, e.g. REGIONAL_SSD. */
|
|
133
|
+
diskType?: string;
|
|
134
|
+
/** Required. Seconds of physical disk usage, e.g. 3600. */
|
|
135
|
+
gibSec?: string;
|
|
136
|
+
}
|
|
99
137
|
interface Document {
|
|
100
138
|
/** The content of the input in string format. Cloud audit logging exempt since it is based on user data. */
|
|
101
139
|
content?: string;
|
|
@@ -128,15 +166,27 @@ declare namespace gapi.client {
|
|
|
128
166
|
/** The type of the entity mention. */
|
|
129
167
|
type?: string;
|
|
130
168
|
}
|
|
131
|
-
interface
|
|
132
|
-
/**
|
|
133
|
-
|
|
134
|
-
/**
|
|
135
|
-
|
|
136
|
-
/**
|
|
137
|
-
|
|
138
|
-
/**
|
|
139
|
-
|
|
169
|
+
interface GpuMetric {
|
|
170
|
+
/** Required. Seconds of GPU usage, e.g. 3600. */
|
|
171
|
+
gpuSec?: string;
|
|
172
|
+
/** Required. Type of GPU, e.g. NVIDIA_TESLA_V100. */
|
|
173
|
+
gpuType?: string;
|
|
174
|
+
/** Required. Machine spec, e.g. N1_STANDARD_4. */
|
|
175
|
+
machineSpec?: string;
|
|
176
|
+
/** Billing tracking labels. They do not contain any user data but only the labels set by Vertex Core Infra itself. Tracking labels' keys are defined with special format: goog-[\p{Ll}\p{N}]+ E.g. "key": "goog-k8s-cluster-name","value": "us-east1-b4rk" */
|
|
177
|
+
trackingLabels?: {[P in string]: string};
|
|
178
|
+
}
|
|
179
|
+
interface InfraUsage {
|
|
180
|
+
/** Aggregated core metrics since requested start_time. */
|
|
181
|
+
cpuMetrics?: CpuMetric[];
|
|
182
|
+
/** Aggregated persistent disk metrics since requested start_time. */
|
|
183
|
+
diskMetrics?: DiskMetric[];
|
|
184
|
+
/** Aggregated gpu metrics since requested start_time. */
|
|
185
|
+
gpuMetrics?: GpuMetric[];
|
|
186
|
+
/** Aggregated ram metrics since requested start_time. */
|
|
187
|
+
ramMetrics?: RamMetric[];
|
|
188
|
+
/** Aggregated tpu metrics since requested start_time. */
|
|
189
|
+
tpuMetrics?: TpuMetric[];
|
|
140
190
|
}
|
|
141
191
|
interface ModerateTextRequest {
|
|
142
192
|
/** Required. Input document. */
|
|
@@ -150,6 +200,18 @@ declare namespace gapi.client {
|
|
|
150
200
|
/** Harmful and sensitive categories representing the input document. */
|
|
151
201
|
moderationCategories?: ClassificationCategory[];
|
|
152
202
|
}
|
|
203
|
+
interface RamMetric {
|
|
204
|
+
/** Required. VM memory in Gigabyte second, e.g. 3600. Using int64 type to match billing metrics definition. */
|
|
205
|
+
gibSec?: string;
|
|
206
|
+
/** Required. Machine spec, e.g. N1_STANDARD_4. */
|
|
207
|
+
machineSpec?: string;
|
|
208
|
+
/** Required. VM memory in gb. */
|
|
209
|
+
memories?: number;
|
|
210
|
+
/** Required. Type of ram. */
|
|
211
|
+
ramType?: string;
|
|
212
|
+
/** Billing tracking labels. They do not contain any user data but only the labels set by Vertex Core Infra itself. Tracking labels' keys are defined with special format: goog-[\p{Ll}\p{N}]+ E.g. "key": "goog-k8s-cluster-name","value": "us-east1-b4rk" */
|
|
213
|
+
trackingLabels?: {[P in string]: string};
|
|
214
|
+
}
|
|
153
215
|
interface Sentence {
|
|
154
216
|
/** For calls to AnalyzeSentiment or if AnnotateTextRequest.Features.extract_document_sentiment is set to true, this field will contain the sentiment for the sentence. */
|
|
155
217
|
sentiment?: Sentiment;
|
|
@@ -176,6 +238,979 @@ declare namespace gapi.client {
|
|
|
176
238
|
/** The content of the text span, which is a substring of the document. */
|
|
177
239
|
content?: string;
|
|
178
240
|
}
|
|
241
|
+
interface TpuMetric {
|
|
242
|
+
/** Required. Seconds of TPU usage, e.g. 3600. */
|
|
243
|
+
tpuSec?: string;
|
|
244
|
+
/** Required. Type of TPU, e.g. TPU_V2, TPU_V3_POD. */
|
|
245
|
+
tpuType?: string;
|
|
246
|
+
}
|
|
247
|
+
interface XPSArrayStats {
|
|
248
|
+
commonStats?: XPSCommonStats;
|
|
249
|
+
/** Stats of all the values of all arrays, as if they were a single long series of data. The type depends on the element type of the array. */
|
|
250
|
+
memberStats?: XPSDataStats;
|
|
251
|
+
}
|
|
252
|
+
interface XPSBatchPredictResponse {
|
|
253
|
+
/** Examples for batch prediction result. Under full API implementation, results are stored in shared RecordIO of AnnotatedExample protobufs, the annotations field of which is populated by XPS backend. */
|
|
254
|
+
exampleSet?: XPSExampleSet;
|
|
255
|
+
}
|
|
256
|
+
interface XPSBoundingBoxMetricsEntry {
|
|
257
|
+
/** Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */
|
|
258
|
+
confidenceMetricsEntries?: XPSBoundingBoxMetricsEntryConfidenceMetricsEntry[];
|
|
259
|
+
/** The intersection-over-union threshold value used to compute this metrics entry. */
|
|
260
|
+
iouThreshold?: number;
|
|
261
|
+
/** The mean average precision. */
|
|
262
|
+
meanAveragePrecision?: number;
|
|
263
|
+
}
|
|
264
|
+
interface XPSBoundingBoxMetricsEntryConfidenceMetricsEntry {
|
|
265
|
+
/** The confidence threshold value used to compute the metrics. */
|
|
266
|
+
confidenceThreshold?: number;
|
|
267
|
+
/** The harmonic mean of recall and precision. */
|
|
268
|
+
f1Score?: number;
|
|
269
|
+
/** Precision for the given confidence threshold. */
|
|
270
|
+
precision?: number;
|
|
271
|
+
/** Recall for the given confidence threshold. */
|
|
272
|
+
recall?: number;
|
|
273
|
+
}
|
|
274
|
+
interface XPSCategoryStats {
|
|
275
|
+
commonStats?: XPSCommonStats;
|
|
276
|
+
/** The statistics of the top 20 CATEGORY values, ordered by CategoryStats.SingleCategoryStats.count. */
|
|
277
|
+
topCategoryStats?: XPSCategoryStatsSingleCategoryStats[];
|
|
278
|
+
}
|
|
279
|
+
interface XPSCategoryStatsSingleCategoryStats {
|
|
280
|
+
/** The number of occurrences of this value in the series. */
|
|
281
|
+
count?: string;
|
|
282
|
+
/** The CATEGORY value. */
|
|
283
|
+
value?: string;
|
|
284
|
+
}
|
|
285
|
+
interface XPSClassificationEvaluationMetrics {
|
|
286
|
+
/** The Area under precision recall curve metric. */
|
|
287
|
+
auPrc?: number;
|
|
288
|
+
/** The Area Under Receiver Operating Characteristic curve metric. Micro-averaged for the overall evaluation. */
|
|
289
|
+
auRoc?: number;
|
|
290
|
+
/** The Area under precision recall curve metric based on priors. */
|
|
291
|
+
baseAuPrc?: number;
|
|
292
|
+
/** Metrics that have confidence thresholds. Precision-recall curve can be derived from it. */
|
|
293
|
+
confidenceMetricsEntries?: XPSConfidenceMetricsEntry[];
|
|
294
|
+
/** Confusion matrix of the evaluation. Only set for MULTICLASS classification problems where number of annotation specs is no more than 10. Only set for model level evaluation, not for evaluation per label. */
|
|
295
|
+
confusionMatrix?: XPSConfusionMatrix;
|
|
296
|
+
/** The number of examples used for model evaluation. */
|
|
297
|
+
evaluatedExamplesCount?: number;
|
|
298
|
+
/** The Log Loss metric. */
|
|
299
|
+
logLoss?: number;
|
|
300
|
+
}
|
|
301
|
+
interface XPSColorMap {
|
|
302
|
+
/** Should be used during training. */
|
|
303
|
+
annotationSpecIdToken?: string;
|
|
304
|
+
/** This type is deprecated in favor of the IntColor below. This is because google.type.Color represent color has a float which semantically does not reflect discrete classes/categories concept. Moreover, to handle it well we need to have some tolerance when converting to a discretized color. As such, the recommendation is to have API surface still use google.type.Color while internally IntColor is used. */
|
|
305
|
+
color?: Color;
|
|
306
|
+
/** Should be used during preprocessing. */
|
|
307
|
+
displayName?: string;
|
|
308
|
+
intColor?: XPSColorMapIntColor;
|
|
309
|
+
}
|
|
310
|
+
interface XPSColorMapIntColor {
|
|
311
|
+
/** The value should be in range of [0, 255]. */
|
|
312
|
+
blue?: number;
|
|
313
|
+
/** The value should be in range of [0, 255]. */
|
|
314
|
+
green?: number;
|
|
315
|
+
/** The value should be in range of [0, 255]. */
|
|
316
|
+
red?: number;
|
|
317
|
+
}
|
|
318
|
+
interface XPSColumnSpec {
|
|
319
|
+
/** The unique id of the column. When Preprocess, the Tables BE will popuate the order id of the column, which reflects the order of the column inside the table, i.e. 0 means the first column in the table, N-1 means the last column. AutoML BE will persist this order id in Spanner and set the order id here when calling RefreshTablesStats and Train. Note: it's different than the column_spec_id that is generated in AutoML BE. */
|
|
320
|
+
columnId?: number;
|
|
321
|
+
/** The data stats of the column. It's outputed in RefreshTablesStats and a required input for Train. */
|
|
322
|
+
dataStats?: XPSDataStats;
|
|
323
|
+
/** The data type of the column. It's outputed in Preprocess rpc and a required input for RefreshTablesStats and Train. */
|
|
324
|
+
dataType?: XPSDataType;
|
|
325
|
+
/** The display name of the column. It's outputed in Preprocess and a required input for RefreshTablesStats and Train. */
|
|
326
|
+
displayName?: string;
|
|
327
|
+
forecastingMetadata?: XPSColumnSpecForecastingMetadata;
|
|
328
|
+
/** It's outputed in RefreshTablesStats, and a required input in Train. */
|
|
329
|
+
topCorrelatedColumns?: XPSColumnSpecCorrelatedColumn[];
|
|
330
|
+
}
|
|
331
|
+
interface XPSColumnSpecCorrelatedColumn {
|
|
332
|
+
columnId?: number;
|
|
333
|
+
correlationStats?: XPSCorrelationStats;
|
|
334
|
+
}
|
|
335
|
+
interface XPSColumnSpecForecastingMetadata {
|
|
336
|
+
/** The type of the column for FORECASTING model training purposes. */
|
|
337
|
+
columnType?: string;
|
|
338
|
+
}
|
|
339
|
+
interface XPSCommonStats {
|
|
340
|
+
distinctValueCount?: string;
|
|
341
|
+
nullValueCount?: string;
|
|
342
|
+
validValueCount?: string;
|
|
343
|
+
}
|
|
344
|
+
interface XPSConfidenceMetricsEntry {
|
|
345
|
+
/** Metrics are computed with an assumption that the model never return predictions with score lower than this value. */
|
|
346
|
+
confidenceThreshold?: number;
|
|
347
|
+
/** The harmonic mean of recall and precision. */
|
|
348
|
+
f1Score?: number;
|
|
349
|
+
/** The harmonic mean of recall_at1 and precision_at1. */
|
|
350
|
+
f1ScoreAt1?: number;
|
|
351
|
+
/** The number of ground truth labels that are not matched by a model created label. */
|
|
352
|
+
falseNegativeCount?: string;
|
|
353
|
+
/** The number of model created labels that do not match a ground truth label. */
|
|
354
|
+
falsePositiveCount?: string;
|
|
355
|
+
/** False Positive Rate for the given confidence threshold. */
|
|
356
|
+
falsePositiveRate?: number;
|
|
357
|
+
/** The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each example. */
|
|
358
|
+
falsePositiveRateAt1?: number;
|
|
359
|
+
/** Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidence_threshold. */
|
|
360
|
+
positionThreshold?: number;
|
|
361
|
+
/** Precision for the given confidence threshold. */
|
|
362
|
+
precision?: number;
|
|
363
|
+
/** The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each example. */
|
|
364
|
+
precisionAt1?: number;
|
|
365
|
+
/** Recall (true positive rate) for the given confidence threshold. */
|
|
366
|
+
recall?: number;
|
|
367
|
+
/** The recall (true positive rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each example. */
|
|
368
|
+
recallAt1?: number;
|
|
369
|
+
/** The number of labels that were not created by the model, but if they would, they would not match a ground truth label. */
|
|
370
|
+
trueNegativeCount?: string;
|
|
371
|
+
/** The number of model created labels that match a ground truth label. */
|
|
372
|
+
truePositiveCount?: string;
|
|
373
|
+
}
|
|
374
|
+
interface XPSConfusionMatrix {
|
|
375
|
+
/** For the following three repeated fields, only one is intended to be set. annotation_spec_id_token is preferable to be set. ID tokens of the annotation specs used in the confusion matrix. */
|
|
376
|
+
annotationSpecIdToken?: string[];
|
|
377
|
+
/** Category (mainly for segmentation). Set only for image segmentation models. Note: uCAIP Image Segmentation should use annotation_spec_id_token. */
|
|
378
|
+
category?: number[];
|
|
379
|
+
/** Rows in the confusion matrix. The number of rows is equal to the size of `annotation_spec_id_token`. `row[i].value[j]` is the number of examples that have ground truth of the `annotation_spec_id_token[i]` and are predicted as `annotation_spec_id_token[j]` by the model being evaluated. */
|
|
380
|
+
row?: XPSConfusionMatrixRow[];
|
|
381
|
+
/** Sentiment labels used in the confusion matrix. Set only for text sentiment models. For AutoML Text Revamp, use `annotation_spec_id_token` instead and leave this field empty. */
|
|
382
|
+
sentimentLabel?: number[];
|
|
383
|
+
}
|
|
384
|
+
interface XPSConfusionMatrixRow {
|
|
385
|
+
/** Same as above except intended to represent other counts (for e.g. for segmentation this is pixel count). NOTE(params): Only example_count or count is set (oneoff does not support repeated fields unless they are embedded inside another message). */
|
|
386
|
+
count?: string[];
|
|
387
|
+
/** Value of the specific cell in the confusion matrix. The number of values each row has (i.e. the length of the row) is equal to the length of the annotation_spec_id_token field. */
|
|
388
|
+
exampleCount?: number[];
|
|
389
|
+
}
|
|
390
|
+
interface XPSCoreMlFormat {}
|
|
391
|
+
interface XPSCorrelationStats {
|
|
392
|
+
/** The correlation value using the Cramer's V measure. */
|
|
393
|
+
cramersV?: number;
|
|
394
|
+
}
|
|
395
|
+
interface XPSDataErrors {
|
|
396
|
+
/** Number of records having errors associated with the enum. */
|
|
397
|
+
count?: number;
|
|
398
|
+
/** Type of the error. */
|
|
399
|
+
errorType?: string;
|
|
400
|
+
}
|
|
401
|
+
interface XPSDataStats {
|
|
402
|
+
/** The statistics for ARRAY DataType. */
|
|
403
|
+
arrayStats?: XPSArrayStats;
|
|
404
|
+
/** The statistics for CATEGORY DataType. */
|
|
405
|
+
categoryStats?: XPSCategoryStats;
|
|
406
|
+
/** The number of distinct values. */
|
|
407
|
+
distinctValueCount?: string;
|
|
408
|
+
/** The statistics for FLOAT64 DataType. */
|
|
409
|
+
float64Stats?: XPSFloat64Stats;
|
|
410
|
+
/** The number of values that are null. */
|
|
411
|
+
nullValueCount?: string;
|
|
412
|
+
/** The statistics for STRING DataType. */
|
|
413
|
+
stringStats?: XPSStringStats;
|
|
414
|
+
/** The statistics for STRUCT DataType. */
|
|
415
|
+
structStats?: XPSStructStats;
|
|
416
|
+
/** The statistics for TIMESTAMP DataType. */
|
|
417
|
+
timestampStats?: XPSTimestampStats;
|
|
418
|
+
/** The number of values that are valid. */
|
|
419
|
+
validValueCount?: string;
|
|
420
|
+
}
|
|
421
|
+
interface XPSDataType {
|
|
422
|
+
/** The highly compatible data types to this data type. */
|
|
423
|
+
compatibleDataTypes?: XPSDataType[];
|
|
424
|
+
/** If type_code == ARRAY, then `list_element_type` is the type of the elements. */
|
|
425
|
+
listElementType?: XPSDataType;
|
|
426
|
+
/** If true, this DataType can also be `null`. */
|
|
427
|
+
nullable?: boolean;
|
|
428
|
+
/** If type_code == STRUCT, then `struct_type` provides type information for the struct's fields. */
|
|
429
|
+
structType?: XPSStructType;
|
|
430
|
+
/** If type_code == TIMESTAMP then `time_format` provides the format in which that time field is expressed. The time_format must be written in `strftime` syntax. If time_format is not set, then the default format as described on the field is used. */
|
|
431
|
+
timeFormat?: string;
|
|
432
|
+
/** Required. The TypeCode for this type. */
|
|
433
|
+
typeCode?: string;
|
|
434
|
+
}
|
|
435
|
+
interface XPSDockerFormat {
|
|
436
|
+
/** Optional. Additional cpu information describing the requirements for the to be exported model files. */
|
|
437
|
+
cpuArchitecture?: string;
|
|
438
|
+
/** Optional. Additional gpu information describing the requirements for the to be exported model files. */
|
|
439
|
+
gpuArchitecture?: string;
|
|
440
|
+
}
|
|
441
|
+
interface XPSEdgeTpuTfLiteFormat {}
|
|
442
|
+
interface XPSEvaluationMetrics {
|
|
443
|
+
/** The annotation_spec for which this evaluation metrics instance had been created. Empty iff this is an overall model evaluation (like Tables evaluation metrics), i.e. aggregated across all labels. The value comes from the input annotations in AnnotatedExample. For MVP product or for text sentiment models where annotation_spec_id_token is not available, set label instead. */
|
|
444
|
+
annotationSpecIdToken?: string;
|
|
445
|
+
/** The integer category label for which this evaluation metric instance had been created. Valid categories are 0 or higher. Overall model evaluation should set this to negative values (rather than implicit zero). Only used for Image Segmentation (prefer to set annotation_spec_id_token instead). Note: uCAIP Image Segmentation should use annotation_spec_id_token. */
|
|
446
|
+
category?: number;
|
|
447
|
+
/** The number of examples used to create this evaluation metrics instance. */
|
|
448
|
+
evaluatedExampleCount?: number;
|
|
449
|
+
imageClassificationEvalMetrics?: XPSClassificationEvaluationMetrics;
|
|
450
|
+
imageObjectDetectionEvalMetrics?: XPSImageObjectDetectionEvaluationMetrics;
|
|
451
|
+
imageSegmentationEvalMetrics?: XPSImageSegmentationEvaluationMetrics;
|
|
452
|
+
/** The label for which this evaluation metrics instance had been created. Empty iff this is an overall model evaluation (like Tables evaluation metrics), i.e. aggregated across all labels. The label maps to AnnotationSpec.display_name in Public API protos. Only used by MVP implementation and text sentiment FULL implementation. */
|
|
453
|
+
label?: string;
|
|
454
|
+
regressionEvalMetrics?: XPSRegressionEvaluationMetrics;
|
|
455
|
+
tablesClassificationEvalMetrics?: XPSClassificationEvaluationMetrics;
|
|
456
|
+
tablesEvalMetrics?: XPSTablesEvaluationMetrics;
|
|
457
|
+
textClassificationEvalMetrics?: XPSClassificationEvaluationMetrics;
|
|
458
|
+
textExtractionEvalMetrics?: XPSTextExtractionEvaluationMetrics;
|
|
459
|
+
textSentimentEvalMetrics?: XPSTextSentimentEvaluationMetrics;
|
|
460
|
+
translationEvalMetrics?: XPSTranslationEvaluationMetrics;
|
|
461
|
+
videoActionRecognitionEvalMetrics?: XPSVideoActionRecognitionEvaluationMetrics;
|
|
462
|
+
videoClassificationEvalMetrics?: XPSClassificationEvaluationMetrics;
|
|
463
|
+
videoObjectTrackingEvalMetrics?: XPSVideoObjectTrackingEvaluationMetrics;
|
|
464
|
+
}
|
|
465
|
+
interface XPSEvaluationMetricsSet {
|
|
466
|
+
/** Inline EvaluationMetrics - should be relatively small. For passing large quantities of exhaustive metrics, use file_spec. */
|
|
467
|
+
evaluationMetrics?: XPSEvaluationMetrics[];
|
|
468
|
+
/** File spec containing evaluation metrics of a model, must point to RecordIO file(s) of intelligence.cloud.automl.xps.EvaluationMetrics messages. */
|
|
469
|
+
fileSpec?: XPSFileSpec;
|
|
470
|
+
/** Number of the evaluation metrics (usually one per label plus overall). */
|
|
471
|
+
numEvaluationMetrics?: string;
|
|
472
|
+
}
|
|
473
|
+
interface XPSExampleSet {
|
|
474
|
+
/** File spec of the examples or input sources. */
|
|
475
|
+
fileSpec?: XPSFileSpec;
|
|
476
|
+
/** Fingerprint of the example set. */
|
|
477
|
+
fingerprint?: string;
|
|
478
|
+
/** Number of examples. */
|
|
479
|
+
numExamples?: string;
|
|
480
|
+
/** Number of input sources. */
|
|
481
|
+
numInputSources?: string;
|
|
482
|
+
}
|
|
483
|
+
interface XPSExportModelOutputConfig {
|
|
484
|
+
coreMlFormat?: any;
|
|
485
|
+
dockerFormat?: XPSDockerFormat;
|
|
486
|
+
edgeTpuTfLiteFormat?: any;
|
|
487
|
+
/** For any model and format: If true, will additionally export FirebaseExportedModelInfo in a firebase.txt file. */
|
|
488
|
+
exportFirebaseAuxiliaryInfo?: boolean;
|
|
489
|
+
/** The Google Contained Registry (GCR) path the exported files to be pushed to. This location is set if the exported format is DOCKDER. */
|
|
490
|
+
outputGcrUri?: string;
|
|
491
|
+
/** The Google Cloud Storage (GCS) directory where XPS will output the exported models and related files. Format: gs://bucket/directory */
|
|
492
|
+
outputGcsUri?: string;
|
|
493
|
+
tfJsFormat?: any;
|
|
494
|
+
tfLiteFormat?: any;
|
|
495
|
+
tfSavedModelFormat?: any;
|
|
496
|
+
}
|
|
497
|
+
interface XPSFileSpec {
|
|
498
|
+
/** Deprecated. Use file_spec. */
|
|
499
|
+
directoryPath?: string;
|
|
500
|
+
fileFormat?: string;
|
|
501
|
+
/** Single file path, or file pattern of format "/path/to/file@shard_count". E.g. /cns/cell-d/somewhere/file@2 is expanded to two files: /cns/cell-d/somewhere/file-00000-of-00002 and /cns/cell-d/somewhere/file-00001-of-00002. */
|
|
502
|
+
fileSpec?: string;
|
|
503
|
+
/** Deprecated. Use file_spec. */
|
|
504
|
+
singleFilePath?: string;
|
|
505
|
+
}
|
|
506
|
+
interface XPSFloat64Stats {
|
|
507
|
+
commonStats?: XPSCommonStats;
|
|
508
|
+
/** Histogram buckets of the data series. Sorted by the min value of the bucket, ascendingly, and the number of the buckets is dynamically generated. The buckets are non-overlapping and completely cover whole FLOAT64 range with min of first bucket being `"-Infinity"`, and max of the last one being `"Infinity"`. */
|
|
509
|
+
histogramBuckets?: XPSFloat64StatsHistogramBucket[];
|
|
510
|
+
/** The mean of the series. */
|
|
511
|
+
mean?: number;
|
|
512
|
+
/** Ordered from 0 to k k-quantile values of the data series of n values. The value at index i is, approximately, the i*n/k-th smallest value in the series; for i = 0 and i = k these are, respectively, the min and max values. */
|
|
513
|
+
quantiles?: number[];
|
|
514
|
+
/** The standard deviation of the series. */
|
|
515
|
+
standardDeviation?: number;
|
|
516
|
+
}
|
|
517
|
+
interface XPSFloat64StatsHistogramBucket {
|
|
518
|
+
/** The number of data values that are in the bucket, i.e. are between min and max values. */
|
|
519
|
+
count?: string;
|
|
520
|
+
/** The maximum value of the bucket, exclusive unless max = `"Infinity"`, in which case it's inclusive. */
|
|
521
|
+
max?: number;
|
|
522
|
+
/** The minimum value of the bucket, inclusive. */
|
|
523
|
+
min?: number;
|
|
524
|
+
}
|
|
525
|
+
interface XPSImageClassificationTrainResponse {
|
|
526
|
+
/** Total number of classes. */
|
|
527
|
+
classCount?: string;
|
|
528
|
+
/** Information of downloadable models that are pre-generated as part of training flow and will be persisted in AutoMl backend. Populated for AutoMl requests. */
|
|
529
|
+
exportModelSpec?: XPSImageExportModelSpec;
|
|
530
|
+
/** ## The fields below are only populated under uCAIP request scope. */
|
|
531
|
+
modelArtifactSpec?: XPSImageModelArtifactSpec;
|
|
532
|
+
modelServingSpec?: XPSImageModelServingSpec;
|
|
533
|
+
/** Stop reason for training job, e.g. 'TRAIN_BUDGET_REACHED', 'MODEL_CONVERGED', 'MODEL_EARLY_STOPPED'. */
|
|
534
|
+
stopReason?: string;
|
|
535
|
+
/** The actual cost to create this model. - For edge type model, the cost is expressed in node hour. - For cloud type model,the cost is expressed in compute hour. - Populated for models created before GA. To be deprecated after GA. */
|
|
536
|
+
trainCostInNodeTime?: string;
|
|
537
|
+
/** The actual training cost, expressed in node seconds. Populated for models trained in node time. */
|
|
538
|
+
trainCostNodeSeconds?: string;
|
|
539
|
+
}
|
|
540
|
+
interface XPSImageExportModelSpec {
|
|
541
|
+
/** Contains the model format and internal location of the model files to be exported/downloaded. Use the GCS bucket name which is provided via TrainRequest.gcs_bucket_name to store the model files. */
|
|
542
|
+
exportModelOutputConfig?: XPSExportModelOutputConfig[];
|
|
543
|
+
}
|
|
544
|
+
interface XPSImageModelArtifactSpec {
|
|
545
|
+
/** The Tensorflow checkpoint files. e.g. Used for resumable training. */
|
|
546
|
+
checkpointArtifact?: XPSModelArtifactItem;
|
|
547
|
+
/** The model binary files in different formats for model export. */
|
|
548
|
+
exportArtifact?: XPSModelArtifactItem[];
|
|
549
|
+
/** GCS uri of decoded labels file for model export 'dict.txt'. */
|
|
550
|
+
labelGcsUri?: string;
|
|
551
|
+
/** The default model binary file used for serving (e.g. online predict, batch predict) via public Cloud AI Platform API. */
|
|
552
|
+
servingArtifact?: XPSModelArtifactItem;
|
|
553
|
+
/** GCS uri prefix of Tensorflow JavaScript binary files 'groupX-shardXofX.bin' Deprecated. */
|
|
554
|
+
tfJsBinaryGcsPrefix?: string;
|
|
555
|
+
/** GCS uri of Tensorflow Lite metadata 'tflite_metadata.json'. */
|
|
556
|
+
tfLiteMetadataGcsUri?: string;
|
|
557
|
+
}
|
|
558
|
+
interface XPSImageModelServingSpec {
|
|
559
|
+
/** Populate under uCAIP request scope. */
|
|
560
|
+
modelThroughputEstimation?: XPSImageModelServingSpecModelThroughputEstimation[];
|
|
561
|
+
/** An estimated value of how much traffic a node can serve. Populated for AutoMl request only. */
|
|
562
|
+
nodeQps?: number;
|
|
563
|
+
/** ## The fields below are only populated under uCAIP request scope. https://cloud.google.com/ml-engine/docs/runtime-version-list */
|
|
564
|
+
tfRuntimeVersion?: string;
|
|
565
|
+
}
|
|
566
|
+
interface XPSImageModelServingSpecModelThroughputEstimation {
|
|
567
|
+
computeEngineAcceleratorType?: string;
|
|
568
|
+
/** Estimated latency. */
|
|
569
|
+
latencyInMilliseconds?: number;
|
|
570
|
+
/** The approximate qps a deployed node can serve. */
|
|
571
|
+
nodeQps?: number;
|
|
572
|
+
servomaticPartitionType?: string;
|
|
573
|
+
}
|
|
574
|
+
interface XPSImageObjectDetectionEvaluationMetrics {
|
|
575
|
+
/** The single metric for bounding boxes evaluation: the mean_average_precision averaged over all bounding_box_metrics_entries. */
|
|
576
|
+
boundingBoxMeanAveragePrecision?: number;
|
|
577
|
+
/** The bounding boxes match metrics for each Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */
|
|
578
|
+
boundingBoxMetricsEntries?: XPSBoundingBoxMetricsEntry[];
|
|
579
|
+
/** The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. */
|
|
580
|
+
evaluatedBoundingBoxCount?: number;
|
|
581
|
+
}
|
|
582
|
+
interface XPSImageObjectDetectionModelSpec {
|
|
583
|
+
/** Total number of classes. */
|
|
584
|
+
classCount?: string;
|
|
585
|
+
exportModelSpec?: XPSImageExportModelSpec;
|
|
586
|
+
/** Max number of bounding box. */
|
|
587
|
+
maxBoundingBoxCount?: string;
|
|
588
|
+
/** ## The fields below are only populated under uCAIP request scope. */
|
|
589
|
+
modelArtifactSpec?: XPSImageModelArtifactSpec;
|
|
590
|
+
modelServingSpec?: XPSImageModelServingSpec;
|
|
591
|
+
/** Stop reason for training job, e.g. 'TRAIN_BUDGET_REACHED', 'MODEL_CONVERGED'. */
|
|
592
|
+
stopReason?: string;
|
|
593
|
+
/** The actual train cost of creating this model, expressed in node seconds, i.e. 3,600 value in this field means 1 node hour. */
|
|
594
|
+
trainCostNodeSeconds?: string;
|
|
595
|
+
}
|
|
596
|
+
interface XPSImageSegmentationEvaluationMetrics {
|
|
597
|
+
/** Metrics that have confidence thresholds. Precision-recall curve can be derived from it. */
|
|
598
|
+
confidenceMetricsEntries?: XPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry[];
|
|
599
|
+
}
|
|
600
|
+
interface XPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry {
|
|
601
|
+
/** The confidence threshold value used to compute the metrics. */
|
|
602
|
+
confidenceThreshold?: number;
|
|
603
|
+
/** Confusion matrix of the per confidence_threshold evaluation. Pixel counts are set here. Only set for model level evaluation, not for evaluation per label. */
|
|
604
|
+
confusionMatrix?: XPSConfusionMatrix;
|
|
605
|
+
/** DSC or the F1 score: The harmonic mean of recall and precision. */
|
|
606
|
+
diceScoreCoefficient?: number;
|
|
607
|
+
/** IOU score. */
|
|
608
|
+
iouScore?: number;
|
|
609
|
+
/** Precision for the given confidence threshold. */
|
|
610
|
+
precision?: number;
|
|
611
|
+
/** Recall for the given confidence threshold. */
|
|
612
|
+
recall?: number;
|
|
613
|
+
}
|
|
614
|
+
interface XPSImageSegmentationTrainResponse {
|
|
615
|
+
/** Color map of the model. */
|
|
616
|
+
colorMaps?: XPSColorMap[];
|
|
617
|
+
/** NOTE: These fields are not used/needed in EAP but will be set later. */
|
|
618
|
+
exportModelSpec?: XPSImageExportModelSpec;
|
|
619
|
+
/** ## The fields below are only populated under uCAIP request scope. Model artifact spec stores and model gcs pathes and related metadata */
|
|
620
|
+
modelArtifactSpec?: XPSImageModelArtifactSpec;
|
|
621
|
+
modelServingSpec?: XPSImageModelServingSpec;
|
|
622
|
+
/** Stop reason for training job, e.g. 'TRAIN_BUDGET_REACHED', 'MODEL_CONVERGED'. */
|
|
623
|
+
stopReason?: string;
|
|
624
|
+
/** The actual train cost of creating this model, expressed in node seconds, i.e. 3,600 value in this field means 1 node hour. */
|
|
625
|
+
trainCostNodeSeconds?: string;
|
|
626
|
+
}
|
|
627
|
+
interface XPSIntegratedGradientsAttribution {
|
|
628
|
+
/** The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. */
|
|
629
|
+
stepCount?: number;
|
|
630
|
+
}
|
|
631
|
+
interface XPSMetricEntry {
|
|
632
|
+
/** For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. */
|
|
633
|
+
argentumMetricId?: string;
|
|
634
|
+
/** A double value. */
|
|
635
|
+
doubleValue?: number;
|
|
636
|
+
/** A signed 64-bit integer value. */
|
|
637
|
+
int64Value?: string;
|
|
638
|
+
/** The metric name defined in the service configuration. */
|
|
639
|
+
metricName?: string;
|
|
640
|
+
/** Billing system labels for this (metric, value) pair. */
|
|
641
|
+
systemLabels?: XPSMetricEntryLabel[];
|
|
642
|
+
}
|
|
643
|
+
interface XPSMetricEntryLabel {
|
|
644
|
+
/** The name of the label. */
|
|
645
|
+
labelName?: string;
|
|
646
|
+
/** The value of the label. */
|
|
647
|
+
labelValue?: string;
|
|
648
|
+
}
|
|
649
|
+
interface XPSModelArtifactItem {
|
|
650
|
+
/** The model artifact format. */
|
|
651
|
+
artifactFormat?: string;
|
|
652
|
+
/** The Google Cloud Storage (GCS) uri that stores the model binary files. */
|
|
653
|
+
gcsUri?: string;
|
|
654
|
+
}
|
|
655
|
+
interface XPSPreprocessResponse {
|
|
656
|
+
/** Preprocessed examples, that are to be imported into AutoML storage. This should point to RecordIO file(s) of PreprocessedExample messages. The PreprocessedExample.mvp_training_data-s returned here are later verbatim passed to Train() call in TrainExample.mvp_training_data. */
|
|
657
|
+
outputExampleSet?: XPSExampleSet;
|
|
658
|
+
speechPreprocessResp?: XPSSpeechPreprocessResponse;
|
|
659
|
+
tablesPreprocessResponse?: XPSTablesPreprocessResponse;
|
|
660
|
+
translationPreprocessResp?: XPSTranslationPreprocessResponse;
|
|
661
|
+
}
|
|
662
|
+
interface XPSRegressionEvaluationMetrics {
|
|
663
|
+
/** Mean Absolute Error (MAE). */
|
|
664
|
+
meanAbsoluteError?: number;
|
|
665
|
+
/** Mean absolute percentage error. Only set if all ground truth values are positive. */
|
|
666
|
+
meanAbsolutePercentageError?: number;
|
|
667
|
+
/** A list of actual versus predicted points for the model being evaluated. */
|
|
668
|
+
regressionMetricsEntries?: XPSRegressionMetricsEntry[];
|
|
669
|
+
/** Root Mean Squared Error (RMSE). */
|
|
670
|
+
rootMeanSquaredError?: number;
|
|
671
|
+
/** Root mean squared log error. */
|
|
672
|
+
rootMeanSquaredLogError?: number;
|
|
673
|
+
/** R squared. */
|
|
674
|
+
rSquared?: number;
|
|
675
|
+
}
|
|
676
|
+
interface XPSRegressionMetricsEntry {
|
|
677
|
+
/** The observed value for a row in the dataset. */
|
|
678
|
+
predictedValue?: number;
|
|
679
|
+
/** The actual target value for a row in the dataset. */
|
|
680
|
+
trueValue?: number;
|
|
681
|
+
}
|
|
682
|
+
interface XPSReportingMetrics {
|
|
683
|
+
/** The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. */
|
|
684
|
+
effectiveTrainingDuration?: string;
|
|
685
|
+
/** One entry per metric name. The values must be aggregated per metric name. */
|
|
686
|
+
metricEntries?: XPSMetricEntry[];
|
|
687
|
+
}
|
|
688
|
+
interface XPSResponseExplanationMetadata {
|
|
689
|
+
/** Metadata of the input. */
|
|
690
|
+
inputs?: {[P in string]: XPSResponseExplanationMetadataInputMetadata};
|
|
691
|
+
/** Metadata of the output. */
|
|
692
|
+
outputs?: {[P in string]: XPSResponseExplanationMetadataOutputMetadata};
|
|
693
|
+
}
|
|
694
|
+
interface XPSResponseExplanationMetadataInputMetadata {
|
|
695
|
+
/** Name of the input tensor for this model. Only needed in train response. */
|
|
696
|
+
inputTensorName?: string;
|
|
697
|
+
/** Modality of the feature. Valid values are: numeric, image. Defaults to numeric. */
|
|
698
|
+
modality?: string;
|
|
699
|
+
/** Visualization configurations for image explanation. */
|
|
700
|
+
visualizationConfig?: XPSVisualization;
|
|
701
|
+
}
|
|
702
|
+
interface XPSResponseExplanationMetadataOutputMetadata {
|
|
703
|
+
/** Name of the output tensor. Only needed in train response. */
|
|
704
|
+
outputTensorName?: string;
|
|
705
|
+
}
|
|
706
|
+
interface XPSResponseExplanationParameters {
|
|
707
|
+
/** An attribution method that computes Aumann-Shapley values taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 */
|
|
708
|
+
integratedGradientsAttribution?: XPSIntegratedGradientsAttribution;
|
|
709
|
+
/** An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead. */
|
|
710
|
+
xraiAttribution?: XPSXraiAttribution;
|
|
711
|
+
}
|
|
712
|
+
interface XPSResponseExplanationSpec {
|
|
713
|
+
/** Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` */
|
|
714
|
+
explanationType?: string;
|
|
715
|
+
/** Metadata describing the Model's input and output for explanation. */
|
|
716
|
+
metadata?: XPSResponseExplanationMetadata;
|
|
717
|
+
/** Parameters that configure explaining of the Model's predictions. */
|
|
718
|
+
parameters?: XPSResponseExplanationParameters;
|
|
719
|
+
}
|
|
720
|
+
interface XPSRow {
|
|
721
|
+
/** The ids of the columns. Note: The below `values` field must match order of this field, if this field is set. */
|
|
722
|
+
columnIds?: number[];
|
|
723
|
+
/** The values of the row cells, given in the same order as the column_ids. If column_ids is not set, then in the same order as the input_feature_column_ids in TablesModelMetadata. */
|
|
724
|
+
values?: any[];
|
|
725
|
+
}
|
|
726
|
+
interface XPSSpeechEvaluationMetrics {
|
|
727
|
+
/** Evaluation metrics for all submodels contained in this model. */
|
|
728
|
+
subModelEvaluationMetrics?: XPSSpeechEvaluationMetricsSubModelEvaluationMetric[];
|
|
729
|
+
}
|
|
730
|
+
interface XPSSpeechEvaluationMetricsSubModelEvaluationMetric {
|
|
731
|
+
/** Type of the biasing model. */
|
|
732
|
+
biasingModelType?: string;
|
|
733
|
+
/** If true then it means we have an enhanced version of the biasing models. */
|
|
734
|
+
isEnhancedModel?: boolean;
|
|
735
|
+
numDeletions?: number;
|
|
736
|
+
numInsertions?: number;
|
|
737
|
+
numSubstitutions?: number;
|
|
738
|
+
/** Number of utterances used in the wer computation. */
|
|
739
|
+
numUtterances?: number;
|
|
740
|
+
/** Number of words over which the word error rate was computed. */
|
|
741
|
+
numWords?: number;
|
|
742
|
+
/** Below fields are used for debugging purposes */
|
|
743
|
+
sentenceAccuracy?: number;
|
|
744
|
+
/** Word error rate (standard error metric used for speech recognition). */
|
|
745
|
+
wer?: number;
|
|
746
|
+
}
|
|
747
|
+
interface XPSSpeechModelSpec {
|
|
748
|
+
/** Required for speech xps backend. Speech xps has to use dataset_id and model_id as the primary key in db so that speech API can query the db directly. */
|
|
749
|
+
datasetId?: string;
|
|
750
|
+
language?: string;
|
|
751
|
+
/** Model specs for all submodels contained in this model. */
|
|
752
|
+
subModelSpecs?: XPSSpeechModelSpecSubModelSpec[];
|
|
753
|
+
}
|
|
754
|
+
interface XPSSpeechModelSpecSubModelSpec {
|
|
755
|
+
/** Type of the biasing model. */
|
|
756
|
+
biasingModelType?: string;
|
|
757
|
+
/** In S3, Recognition ClientContextId.client_id */
|
|
758
|
+
clientId?: string;
|
|
759
|
+
/** In S3, Recognition ClientContextId.context_id */
|
|
760
|
+
contextId?: string;
|
|
761
|
+
/** If true then it means we have an enhanced version of the biasing models. */
|
|
762
|
+
isEnhancedModel?: boolean;
|
|
763
|
+
}
|
|
764
|
+
interface XPSSpeechPreprocessResponse {
|
|
765
|
+
/** Location od shards of sstables (test data) of DataUtterance protos. */
|
|
766
|
+
cnsTestDataPath?: string;
|
|
767
|
+
/** Location of shards of sstables (training data) of DataUtterance protos. */
|
|
768
|
+
cnsTrainDataPath?: string;
|
|
769
|
+
/** The metrics for prebuilt speech models. They are included here because there is no prebuilt speech models stored in the AutoML. */
|
|
770
|
+
prebuiltModelEvaluationMetrics?: XPSSpeechEvaluationMetrics;
|
|
771
|
+
/** Stats associated with the data. */
|
|
772
|
+
speechPreprocessStats?: XPSSpeechPreprocessStats;
|
|
773
|
+
}
|
|
774
|
+
interface XPSSpeechPreprocessStats {
|
|
775
|
+
/** Different types of data errors and the counts associated with them. */
|
|
776
|
+
dataErrors?: XPSDataErrors[];
|
|
777
|
+
/** The number of rows marked HUMAN_LABELLED */
|
|
778
|
+
numHumanLabeledExamples?: number;
|
|
779
|
+
/** The number of samples found in the previously recorded logs data. */
|
|
780
|
+
numLogsExamples?: number;
|
|
781
|
+
/** The number of rows marked as MACHINE_TRANSCRIBED */
|
|
782
|
+
numMachineTranscribedExamples?: number;
|
|
783
|
+
/** The number of examples labelled as TEST by Speech xps server. */
|
|
784
|
+
testExamplesCount?: number;
|
|
785
|
+
/** The number of sentences in the test data set. */
|
|
786
|
+
testSentencesCount?: number;
|
|
787
|
+
/** The number of words in the test data set. */
|
|
788
|
+
testWordsCount?: number;
|
|
789
|
+
/** The number of examples labeled as TRAIN by Speech xps server. */
|
|
790
|
+
trainExamplesCount?: number;
|
|
791
|
+
/** The number of sentences in the training data set. */
|
|
792
|
+
trainSentencesCount?: number;
|
|
793
|
+
/** The number of words in the training data set. */
|
|
794
|
+
trainWordsCount?: number;
|
|
795
|
+
}
|
|
796
|
+
interface XPSStringStats {
|
|
797
|
+
commonStats?: XPSCommonStats;
|
|
798
|
+
/** The statistics of the top 20 unigrams, ordered by StringStats.UnigramStats.count. */
|
|
799
|
+
topUnigramStats?: XPSStringStatsUnigramStats[];
|
|
800
|
+
}
|
|
801
|
+
interface XPSStringStatsUnigramStats {
|
|
802
|
+
/** The number of occurrences of this unigram in the series. */
|
|
803
|
+
count?: string;
|
|
804
|
+
/** The unigram. */
|
|
805
|
+
value?: string;
|
|
806
|
+
}
|
|
807
|
+
interface XPSStructStats {
|
|
808
|
+
commonStats?: XPSCommonStats;
|
|
809
|
+
/** Map from a field name of the struct to data stats aggregated over series of all data in that field across all the structs. */
|
|
810
|
+
fieldStats?: {[P in string]: XPSDataStats};
|
|
811
|
+
}
|
|
812
|
+
interface XPSStructType {
|
|
813
|
+
/** Unordered map of struct field names to their data types. */
|
|
814
|
+
fields?: {[P in string]: XPSDataType};
|
|
815
|
+
}
|
|
816
|
+
interface XPSTablesClassificationMetrics {
|
|
817
|
+
/** Metrics building a curve. */
|
|
818
|
+
curveMetrics?: XPSTablesClassificationMetricsCurveMetrics[];
|
|
819
|
+
}
|
|
820
|
+
interface XPSTablesClassificationMetricsCurveMetrics {
|
|
821
|
+
/** The area under the precision-recall curve. */
|
|
822
|
+
aucPr?: number;
|
|
823
|
+
/** The area under receiver operating characteristic curve. */
|
|
824
|
+
aucRoc?: number;
|
|
825
|
+
/** Metrics that have confidence thresholds. Precision-recall curve and ROC curve can be derived from them. */
|
|
826
|
+
confidenceMetricsEntries?: XPSTablesConfidenceMetricsEntry[];
|
|
827
|
+
/** The Log loss metric. */
|
|
828
|
+
logLoss?: number;
|
|
829
|
+
/** The position threshold value used to compute the metrics. */
|
|
830
|
+
positionThreshold?: number;
|
|
831
|
+
/** The CATEGORY row value (for ARRAY unnested) the curve metrics are for. */
|
|
832
|
+
value?: string;
|
|
833
|
+
}
|
|
834
|
+
interface XPSTablesConfidenceMetricsEntry {
|
|
835
|
+
/** The confidence threshold value used to compute the metrics. */
|
|
836
|
+
confidenceThreshold?: number;
|
|
837
|
+
/** The harmonic mean of recall and precision. (2 * precision * recall) / (precision + recall) */
|
|
838
|
+
f1Score?: number;
|
|
839
|
+
/** False negative count. */
|
|
840
|
+
falseNegativeCount?: string;
|
|
841
|
+
/** False positive count. */
|
|
842
|
+
falsePositiveCount?: string;
|
|
843
|
+
/** FPR = #false positives / (#false positives + #true negatives) */
|
|
844
|
+
falsePositiveRate?: number;
|
|
845
|
+
/** Precision = #true positives / (#true positives + #false positives). */
|
|
846
|
+
precision?: number;
|
|
847
|
+
/** Recall = #true positives / (#true positives + #false negatives). */
|
|
848
|
+
recall?: number;
|
|
849
|
+
/** True negative count. */
|
|
850
|
+
trueNegativeCount?: string;
|
|
851
|
+
/** True positive count. */
|
|
852
|
+
truePositiveCount?: string;
|
|
853
|
+
/** TPR = #true positives / (#true positives + #false negatvies) */
|
|
854
|
+
truePositiveRate?: number;
|
|
855
|
+
}
|
|
856
|
+
interface XPSTablesDatasetMetadata {
|
|
857
|
+
/** Id the column to split the table. */
|
|
858
|
+
mlUseColumnId?: number;
|
|
859
|
+
/** Primary table. */
|
|
860
|
+
primaryTableSpec?: XPSTableSpec;
|
|
861
|
+
/** (the column id : its CorrelationStats with target column). */
|
|
862
|
+
targetColumnCorrelations?: {[P in string]: XPSCorrelationStats};
|
|
863
|
+
/** Id of the primary table column that should be used as the training label. */
|
|
864
|
+
targetColumnId?: number;
|
|
865
|
+
/** Id of the primary table column that should be used as the weight column. */
|
|
866
|
+
weightColumnId?: number;
|
|
867
|
+
}
|
|
868
|
+
interface XPSTablesEvaluationMetrics {
|
|
869
|
+
/** Classification metrics. */
|
|
870
|
+
classificationMetrics?: XPSTablesClassificationMetrics;
|
|
871
|
+
/** Regression metrics. */
|
|
872
|
+
regressionMetrics?: XPSTablesRegressionMetrics;
|
|
873
|
+
}
|
|
874
|
+
interface XPSTablesModelColumnInfo {
|
|
875
|
+
/** The ID of the column. */
|
|
876
|
+
columnId?: number;
|
|
877
|
+
/** When given as part of a Model: Measurement of how much model predictions correctness on the TEST data depend on values in this column. A value between 0 and 1, higher means higher influence. These values are normalized - for all input feature columns of a given model they add to 1. When given back by Predict or Batch Predict: Measurement of how impactful for the prediction returned for the given row the value in this column was. Specifically, the feature importance specifies the marginal contribution that the feature made to the prediction score compared to the baseline score. These values are computed using the Sampled Shapley method. */
|
|
878
|
+
featureImportance?: number;
|
|
879
|
+
}
|
|
880
|
+
interface XPSTablesModelStructure {
|
|
881
|
+
/** A list of models. */
|
|
882
|
+
modelParameters?: XPSTablesModelStructureModelParameters[];
|
|
883
|
+
}
|
|
884
|
+
interface XPSTablesModelStructureModelParameters {
|
|
885
|
+
hyperparameters?: XPSTablesModelStructureModelParametersParameter[];
|
|
886
|
+
}
|
|
887
|
+
interface XPSTablesModelStructureModelParametersParameter {
|
|
888
|
+
/** Float type parameter value. */
|
|
889
|
+
floatValue?: number;
|
|
890
|
+
/** Integer type parameter value. */
|
|
891
|
+
intValue?: string;
|
|
892
|
+
/** Parameter name. */
|
|
893
|
+
name?: string;
|
|
894
|
+
/** String type parameter value. */
|
|
895
|
+
stringValue?: string;
|
|
896
|
+
}
|
|
897
|
+
interface XPSTableSpec {
|
|
898
|
+
/** Mapping from column id to column spec. */
|
|
899
|
+
columnSpecs?: {[P in string]: XPSColumnSpec};
|
|
900
|
+
/** The total size of imported data of the table. */
|
|
901
|
+
importedDataSizeInBytes?: string;
|
|
902
|
+
/** The number of rows in the table. */
|
|
903
|
+
rowCount?: string;
|
|
904
|
+
/** The id of the time column. */
|
|
905
|
+
timeColumnId?: number;
|
|
906
|
+
/** The number of valid rows. */
|
|
907
|
+
validRowCount?: string;
|
|
908
|
+
}
|
|
909
|
+
interface XPSTablesPreprocessResponse {
|
|
910
|
+
/** The table/column id, column_name and the DataTypes of the columns will be populated. */
|
|
911
|
+
tablesDatasetMetadata?: XPSTablesDatasetMetadata;
|
|
912
|
+
}
|
|
913
|
+
interface XPSTablesRegressionMetrics {
|
|
914
|
+
/** Mean absolute error. */
|
|
915
|
+
meanAbsoluteError?: number;
|
|
916
|
+
/** Mean absolute percentage error, only set if all of the target column's values are positive. */
|
|
917
|
+
meanAbsolutePercentageError?: number;
|
|
918
|
+
/** A list of actual versus predicted points for the model being evaluated. */
|
|
919
|
+
regressionMetricsEntries?: XPSRegressionMetricsEntry[];
|
|
920
|
+
/** Root mean squared error. */
|
|
921
|
+
rootMeanSquaredError?: number;
|
|
922
|
+
/** Root mean squared log error. */
|
|
923
|
+
rootMeanSquaredLogError?: number;
|
|
924
|
+
/** R squared. */
|
|
925
|
+
rSquared?: number;
|
|
926
|
+
}
|
|
927
|
+
interface XPSTablesTrainingOperationMetadata {
|
|
928
|
+
/** Current stage of creating model. */
|
|
929
|
+
createModelStage?: string;
|
|
930
|
+
/** The optimization objective for model. */
|
|
931
|
+
optimizationObjective?: string;
|
|
932
|
+
/** This field is for training. When the operation is terminated successfully, AutoML Backend post this field to operation metadata in spanner. If the metadata has no trials returned, the training operation is supposed to be a failure. */
|
|
933
|
+
topTrials?: XPSTuningTrial[];
|
|
934
|
+
/** Creating model budget. */
|
|
935
|
+
trainBudgetMilliNodeHours?: string;
|
|
936
|
+
/** This field records the training objective value with respect to time, giving insight into how the model architecture search is performing as training time elapses. */
|
|
937
|
+
trainingObjectivePoints?: XPSTrainingObjectivePoint[];
|
|
938
|
+
/** Timestamp when training process starts. */
|
|
939
|
+
trainingStartTime?: string;
|
|
940
|
+
}
|
|
941
|
+
interface XPSTablesTrainResponse {
|
|
942
|
+
modelStructure?: XPSTablesModelStructure;
|
|
943
|
+
/** Sample rows from the dataset this model was trained. */
|
|
944
|
+
predictionSampleRows?: XPSRow[];
|
|
945
|
+
/** Output only. Auxiliary information for each of the input_feature_column_specs, with respect to this particular model. */
|
|
946
|
+
tablesModelColumnInfo?: XPSTablesModelColumnInfo[];
|
|
947
|
+
/** The actual training cost of the model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. */
|
|
948
|
+
trainCostMilliNodeHours?: string;
|
|
949
|
+
}
|
|
950
|
+
interface XPSTextComponentModel {
|
|
951
|
+
/** The Cloud Storage resource path to hold batch prediction model. */
|
|
952
|
+
batchPredictionModelGcsUri?: string;
|
|
953
|
+
/** The Cloud Storage resource path to hold online prediction model. */
|
|
954
|
+
onlinePredictionModelGcsUri?: string;
|
|
955
|
+
/** The partition where the model is deployed. Populated by uCAIP BE as part of online PredictRequest. */
|
|
956
|
+
partition?: string;
|
|
957
|
+
/** The default model binary file used for serving (e.g. online predict, batch predict) via public Cloud Ai Platform API. */
|
|
958
|
+
servingArtifact?: XPSModelArtifactItem;
|
|
959
|
+
/** The name of servo model. Populated by uCAIP BE as part of online PredictRequest. */
|
|
960
|
+
servoModelName?: string;
|
|
961
|
+
/** The name of the trained NL submodel. */
|
|
962
|
+
submodelName?: string;
|
|
963
|
+
/** The type of trained NL submodel */
|
|
964
|
+
submodelType?: string;
|
|
965
|
+
/** ## The fields below are only populated under uCAIP request scope. https://cloud.google.com/ml-engine/docs/runtime-version-list */
|
|
966
|
+
tfRuntimeVersion?: string;
|
|
967
|
+
/** The servomatic model version number. Populated by uCAIP BE as part of online PredictRequest. */
|
|
968
|
+
versionNumber?: string;
|
|
969
|
+
}
|
|
970
|
+
interface XPSTextExtractionEvaluationMetrics {
|
|
971
|
+
/** Values are at the highest F1 score on the precision-recall curve. Only confidence_threshold, recall, precision, and f1_score will be set. */
|
|
972
|
+
bestF1ConfidenceMetrics?: XPSConfidenceMetricsEntry;
|
|
973
|
+
/** If the enclosing EvaluationMetrics.label is empty, confidence_metrics_entries is an evaluation of the entire model across all labels. If the enclosing EvaluationMetrics.label is set, confidence_metrics_entries applies to that label. */
|
|
974
|
+
confidenceMetricsEntries?: XPSConfidenceMetricsEntry[];
|
|
975
|
+
/** Confusion matrix of the model, at the default confidence threshold (0.0). Only set for whole-model evaluation, not for evaluation per label. */
|
|
976
|
+
confusionMatrix?: XPSConfusionMatrix;
|
|
977
|
+
/** Only recall, precision, and f1_score will be set. */
|
|
978
|
+
perLabelConfidenceMetrics?: {[P in string]: XPSConfidenceMetricsEntry};
|
|
979
|
+
}
|
|
980
|
+
interface XPSTextSentimentEvaluationMetrics {
|
|
981
|
+
/** Output only. Confusion matrix of the evaluation. Only set for the overall model evaluation, not for evaluation of a single annotation spec. */
|
|
982
|
+
confusionMatrix?: XPSConfusionMatrix;
|
|
983
|
+
/** Output only. The harmonic mean of recall and precision. */
|
|
984
|
+
f1Score?: number;
|
|
985
|
+
/** Output only. Linear weighted kappa. Only set for the overall model evaluation, not for evaluation of a single annotation spec. */
|
|
986
|
+
linearKappa?: number;
|
|
987
|
+
/** Output only. Mean absolute error. Only set for the overall model evaluation, not for evaluation of a single annotation spec. */
|
|
988
|
+
meanAbsoluteError?: number;
|
|
989
|
+
/** Output only. Mean squared error. Only set for the overall model evaluation, not for evaluation of a single annotation spec. */
|
|
990
|
+
meanSquaredError?: number;
|
|
991
|
+
/** Output only. Precision. */
|
|
992
|
+
precision?: number;
|
|
993
|
+
/** Output only. Quadratic weighted kappa. Only set for the overall model evaluation, not for evaluation of a single annotation spec. */
|
|
994
|
+
quadraticKappa?: number;
|
|
995
|
+
/** Output only. Recall. */
|
|
996
|
+
recall?: number;
|
|
997
|
+
}
|
|
998
|
+
interface XPSTextToSpeechTrainResponse {}
|
|
999
|
+
interface XPSTextTrainResponse {
|
|
1000
|
+
/** Component submodels. */
|
|
1001
|
+
componentModel?: XPSTextComponentModel[];
|
|
1002
|
+
}
|
|
1003
|
+
interface XPSTfJsFormat {}
|
|
1004
|
+
interface XPSTfLiteFormat {}
|
|
1005
|
+
interface XPSTfSavedModelFormat {}
|
|
1006
|
+
interface XPSTimestampStats {
|
|
1007
|
+
commonStats?: XPSCommonStats;
|
|
1008
|
+
/** The string key is the pre-defined granularity. Currently supported: hour_of_day, day_of_week, month_of_year. Granularities finer that the granularity of timestamp data are not populated (e.g. if timestamps are at day granularity, then hour_of_day is not populated). */
|
|
1009
|
+
granularStats?: {[P in string]: XPSTimestampStatsGranularStats};
|
|
1010
|
+
medianTimestampNanos?: string;
|
|
1011
|
+
}
|
|
1012
|
+
interface XPSTimestampStatsGranularStats {
|
|
1013
|
+
/** A map from granularity key to example count for that key. E.g. for hour_of_day `13` means 1pm, or for month_of_year `5` means May). */
|
|
1014
|
+
buckets?: {[P in string]: string};
|
|
1015
|
+
}
|
|
1016
|
+
interface XPSTrackMetricsEntry {
|
|
1017
|
+
/** Output only. Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived from them. */
|
|
1018
|
+
confidenceMetricsEntries?: XPSTrackMetricsEntryConfidenceMetricsEntry[];
|
|
1019
|
+
/** Output only. The intersection-over-union threshold value between bounding boxes across frames used to compute this metric entry. */
|
|
1020
|
+
iouThreshold?: number;
|
|
1021
|
+
/** Output only. The mean bounding box iou over all confidence thresholds. */
|
|
1022
|
+
meanBoundingBoxIou?: number;
|
|
1023
|
+
/** Output only. The mean mismatch rate over all confidence thresholds. */
|
|
1024
|
+
meanMismatchRate?: number;
|
|
1025
|
+
/** Output only. The mean average precision over all confidence thresholds. */
|
|
1026
|
+
meanTrackingAveragePrecision?: number;
|
|
1027
|
+
}
|
|
1028
|
+
interface XPSTrackMetricsEntryConfidenceMetricsEntry {
|
|
1029
|
+
/** Output only. Bounding box intersection-over-union precision. Measures how well the bounding boxes overlap between each other (e.g. complete overlap or just barely above iou_threshold). */
|
|
1030
|
+
boundingBoxIou?: number;
|
|
1031
|
+
/** Output only. The confidence threshold value used to compute the metrics. */
|
|
1032
|
+
confidenceThreshold?: number;
|
|
1033
|
+
/** Output only. Mismatch rate, which measures the tracking consistency, i.e. correctness of instance ID continuity. */
|
|
1034
|
+
mismatchRate?: number;
|
|
1035
|
+
/** Output only. Tracking precision. */
|
|
1036
|
+
trackingPrecision?: number;
|
|
1037
|
+
/** Output only. Tracking recall. */
|
|
1038
|
+
trackingRecall?: number;
|
|
1039
|
+
}
|
|
1040
|
+
interface XPSTrainingObjectivePoint {
|
|
1041
|
+
/** The time at which this point was recorded. */
|
|
1042
|
+
createTime?: string;
|
|
1043
|
+
/** The objective value when this point was recorded. */
|
|
1044
|
+
value?: number;
|
|
1045
|
+
}
|
|
1046
|
+
interface XPSTrainResponse {
|
|
1047
|
+
/** Estimated model size in bytes once deployed. */
|
|
1048
|
+
deployedModelSizeBytes?: string;
|
|
1049
|
+
/** Optional vision model error analysis configuration. The field is set when model error analysis is enabled in the training request. The results of error analysis will be binded together with evaluation results (in the format of AnnotatedExample). */
|
|
1050
|
+
errorAnalysisConfigs?: XPSVisionErrorAnalysisConfig[];
|
|
1051
|
+
/** Examples used to evaluate the model (usually the test set), with the predicted annotations. The file_spec should point to recordio file(s) of AnnotatedExample. For each returned example, the example_id_token and annotations predicted by the model must be set. The example payload can and is recommended to be omitted. */
|
|
1052
|
+
evaluatedExampleSet?: XPSExampleSet;
|
|
1053
|
+
/** The trained model evaluation metrics. This can be optionally returned. */
|
|
1054
|
+
evaluationMetricsSet?: XPSEvaluationMetricsSet;
|
|
1055
|
+
/** VisionExplanationConfig for XAI on test set. Optional for when XAI is enable in training request. */
|
|
1056
|
+
explanationConfigs?: XPSResponseExplanationSpec[];
|
|
1057
|
+
imageClassificationTrainResp?: XPSImageClassificationTrainResponse;
|
|
1058
|
+
imageObjectDetectionTrainResp?: XPSImageObjectDetectionModelSpec;
|
|
1059
|
+
imageSegmentationTrainResp?: XPSImageSegmentationTrainResponse;
|
|
1060
|
+
/** Token that represents the trained model. This is considered immutable and is persisted in AutoML. xPS can put their own proto in the byte string, to e.g. point to the model checkpoints. The token is passed to other xPS APIs to refer to the model. */
|
|
1061
|
+
modelToken?: string;
|
|
1062
|
+
speechTrainResp?: XPSSpeechModelSpec;
|
|
1063
|
+
tablesTrainResp?: XPSTablesTrainResponse;
|
|
1064
|
+
textToSpeechTrainResp?: any;
|
|
1065
|
+
/** Will only be needed for uCAIP from Beta. */
|
|
1066
|
+
textTrainResp?: XPSTextTrainResponse;
|
|
1067
|
+
translationTrainResp?: XPSTranslationTrainResponse;
|
|
1068
|
+
videoActionRecognitionTrainResp?: XPSVideoActionRecognitionTrainResponse;
|
|
1069
|
+
videoClassificationTrainResp?: XPSVideoClassificationTrainResponse;
|
|
1070
|
+
videoObjectTrackingTrainResp?: XPSVideoObjectTrackingTrainResponse;
|
|
1071
|
+
}
|
|
1072
|
+
interface XPSTranslationEvaluationMetrics {
|
|
1073
|
+
/** BLEU score for base model. */
|
|
1074
|
+
baseBleuScore?: number;
|
|
1075
|
+
/** BLEU score. */
|
|
1076
|
+
bleuScore?: number;
|
|
1077
|
+
}
|
|
1078
|
+
interface XPSTranslationPreprocessResponse {
|
|
1079
|
+
/** Total example count parsed. */
|
|
1080
|
+
parsedExampleCount?: string;
|
|
1081
|
+
/** Total valid example count. */
|
|
1082
|
+
validExampleCount?: string;
|
|
1083
|
+
}
|
|
1084
|
+
interface XPSTranslationTrainResponse {
|
|
1085
|
+
/** Type of the model. */
|
|
1086
|
+
modelType?: string;
|
|
1087
|
+
}
|
|
1088
|
+
interface XPSTuningTrial {
|
|
1089
|
+
/** Model parameters for the trial. */
|
|
1090
|
+
modelStructure?: XPSTablesModelStructure;
|
|
1091
|
+
/** The optimization objective evaluation of the eval split data. */
|
|
1092
|
+
trainingObjectivePoint?: XPSTrainingObjectivePoint;
|
|
1093
|
+
}
|
|
1094
|
+
interface XPSVideoActionMetricsEntry {
|
|
1095
|
+
/** Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */
|
|
1096
|
+
confidenceMetricsEntries?: XPSVideoActionMetricsEntryConfidenceMetricsEntry[];
|
|
1097
|
+
/** The mean average precision. */
|
|
1098
|
+
meanAveragePrecision?: number;
|
|
1099
|
+
/** This VideoActionMetricsEntry is calculated based on this prediction window length. If the predicted action's timestamp is inside the time window whose center is the ground truth action's timestamp with this specific length, the prediction result is treated as a true positive. */
|
|
1100
|
+
precisionWindowLength?: string;
|
|
1101
|
+
}
|
|
1102
|
+
interface XPSVideoActionMetricsEntryConfidenceMetricsEntry {
|
|
1103
|
+
/** Output only. The confidence threshold value used to compute the metrics. */
|
|
1104
|
+
confidenceThreshold?: number;
|
|
1105
|
+
/** Output only. The harmonic mean of recall and precision. */
|
|
1106
|
+
f1Score?: number;
|
|
1107
|
+
/** Output only. Precision for the given confidence threshold. */
|
|
1108
|
+
precision?: number;
|
|
1109
|
+
/** Output only. Recall for the given confidence threshold. */
|
|
1110
|
+
recall?: number;
|
|
1111
|
+
}
|
|
1112
|
+
interface XPSVideoActionRecognitionEvaluationMetrics {
|
|
1113
|
+
/** Output only. The number of ground truth actions used to create this evaluation. */
|
|
1114
|
+
evaluatedActionCount?: number;
|
|
1115
|
+
/** Output only. The metric entries for precision window lengths: 1s,2s,3s,4s, 5s. */
|
|
1116
|
+
videoActionMetricsEntries?: XPSVideoActionMetricsEntry[];
|
|
1117
|
+
}
|
|
1118
|
+
interface XPSVideoActionRecognitionTrainResponse {
|
|
1119
|
+
/** ## The fields below are only populated under uCAIP request scope. */
|
|
1120
|
+
modelArtifactSpec?: XPSVideoModelArtifactSpec;
|
|
1121
|
+
/** The actual train cost of creating this model, expressed in node seconds, i.e. 3,600 value in this field means 1 node hour. */
|
|
1122
|
+
trainCostNodeSeconds?: string;
|
|
1123
|
+
}
|
|
1124
|
+
interface XPSVideoBatchPredictOperationMetadata {
|
|
1125
|
+
/** All the partial batch prediction results that are completed at the moment. Output examples are sorted by completion time. The order will not be changed. Each output example should be the path of a single RecordIO file of AnnotatedExamples. */
|
|
1126
|
+
outputExamples?: string[];
|
|
1127
|
+
}
|
|
1128
|
+
interface XPSVideoClassificationTrainResponse {
|
|
1129
|
+
/** ## The fields below are only populated under uCAIP request scope. */
|
|
1130
|
+
modelArtifactSpec?: XPSVideoModelArtifactSpec;
|
|
1131
|
+
/** The actual train cost of creating this model, expressed in node seconds, i.e. 3,600 value in this field means 1 node hour. */
|
|
1132
|
+
trainCostNodeSeconds?: string;
|
|
1133
|
+
}
|
|
1134
|
+
interface XPSVideoExportModelSpec {
|
|
1135
|
+
/** Contains the model format and internal location of the model files to be exported/downloaded. Use the GCS bucket name which is provided via TrainRequest.gcs_bucket_name to store the model files. */
|
|
1136
|
+
exportModelOutputConfig?: XPSExportModelOutputConfig[];
|
|
1137
|
+
}
|
|
1138
|
+
interface XPSVideoModelArtifactSpec {
|
|
1139
|
+
/** The model binary files in different formats for model export. */
|
|
1140
|
+
exportArtifact?: XPSModelArtifactItem[];
|
|
1141
|
+
/** The default model binary file used for serving (e.g. batch predict) via public Cloud AI Platform API. */
|
|
1142
|
+
servingArtifact?: XPSModelArtifactItem;
|
|
1143
|
+
}
|
|
1144
|
+
interface XPSVideoObjectTrackingEvaluationMetrics {
|
|
1145
|
+
/** Output only. The single metric for bounding boxes evaluation: the mean_average_precision averaged over all bounding_box_metrics_entries. */
|
|
1146
|
+
boundingBoxMeanAveragePrecision?: number;
|
|
1147
|
+
/** Output only. The bounding boxes match metrics for each Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */
|
|
1148
|
+
boundingBoxMetricsEntries?: XPSBoundingBoxMetricsEntry[];
|
|
1149
|
+
/** The number of bounding boxes used for model evaluation. */
|
|
1150
|
+
evaluatedBoundingboxCount?: number;
|
|
1151
|
+
/** The number of video frames used for model evaluation. */
|
|
1152
|
+
evaluatedFrameCount?: number;
|
|
1153
|
+
/** The number of tracks used for model evaluation. */
|
|
1154
|
+
evaluatedTrackCount?: number;
|
|
1155
|
+
/** Output only. The single metric for tracks accuracy evaluation: the mean_average_precision averaged over all track_metrics_entries. */
|
|
1156
|
+
trackMeanAveragePrecision?: number;
|
|
1157
|
+
/** Output only. The single metric for tracks bounding box iou evaluation: the mean_bounding_box_iou averaged over all track_metrics_entries. */
|
|
1158
|
+
trackMeanBoundingBoxIou?: number;
|
|
1159
|
+
/** Output only. The single metric for tracking consistency evaluation: the mean_mismatch_rate averaged over all track_metrics_entries. */
|
|
1160
|
+
trackMeanMismatchRate?: number;
|
|
1161
|
+
/** Output only. The tracks match metrics for each Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */
|
|
1162
|
+
trackMetricsEntries?: XPSTrackMetricsEntry[];
|
|
1163
|
+
}
|
|
1164
|
+
interface XPSVideoObjectTrackingTrainResponse {
|
|
1165
|
+
/** Populated for AutoML request only. */
|
|
1166
|
+
exportModelSpec?: XPSVideoExportModelSpec;
|
|
1167
|
+
/** ## The fields below are only populated under uCAIP request scope. */
|
|
1168
|
+
modelArtifactSpec?: XPSVideoModelArtifactSpec;
|
|
1169
|
+
/** The actual train cost of creating this model, expressed in node seconds, i.e. 3,600 value in this field means 1 node hour. */
|
|
1170
|
+
trainCostNodeSeconds?: string;
|
|
1171
|
+
}
|
|
1172
|
+
interface XPSVideoTrainingOperationMetadata {
|
|
1173
|
+
/** This is an estimation of the node hours necessary for training a model, expressed in milli node hours (i.e. 1,000 value in this field means 1 node hour). A node hour represents the time a virtual machine spends running your training job. The cost of one node running for one hour is a node hour. */
|
|
1174
|
+
trainCostMilliNodeHour?: string;
|
|
1175
|
+
}
|
|
1176
|
+
interface XPSVisionErrorAnalysisConfig {
|
|
1177
|
+
/** The number of query examples in error analysis. */
|
|
1178
|
+
exampleCount?: number;
|
|
1179
|
+
/** The query type used in retrieval. The enum values are frozen in the foreseeable future. */
|
|
1180
|
+
queryType?: string;
|
|
1181
|
+
}
|
|
1182
|
+
interface XPSVisionTrainingOperationMetadata {
|
|
1183
|
+
/** Aggregated infra usage within certain time period, for billing report purpose if XAI is enable in training request. */
|
|
1184
|
+
explanationUsage?: InfraUsage;
|
|
1185
|
+
}
|
|
1186
|
+
interface XPSVisualization {
|
|
1187
|
+
/** Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62. */
|
|
1188
|
+
clipPercentLowerbound?: number;
|
|
1189
|
+
/** Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9. */
|
|
1190
|
+
clipPercentUpperbound?: number;
|
|
1191
|
+
/** The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue. */
|
|
1192
|
+
colorMap?: string;
|
|
1193
|
+
/** How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE. */
|
|
1194
|
+
overlayType?: string;
|
|
1195
|
+
/** Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE. */
|
|
1196
|
+
polarity?: string;
|
|
1197
|
+
/** Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES. */
|
|
1198
|
+
type?: string;
|
|
1199
|
+
}
|
|
1200
|
+
interface XPSXpsOperationMetadata {
|
|
1201
|
+
/** Optional. XPS server can opt to provide example count of the long running operation (e.g. training, data importing, batch prediction). */
|
|
1202
|
+
exampleCount?: string;
|
|
1203
|
+
/** Metrics for the operation. By the time the operation is terminated (whether succeeded or failed) as returned from XPS, AutoML BE assumes the metrics are finalized. AutoML BE transparently posts the metrics to Chemist if it's not empty, regardless of the response content or error type. If user is supposed to be charged in case of cancellation/error, this field should be set. In the case where the type of LRO doesn't require any billing, this field should be left unset. */
|
|
1204
|
+
reportingMetrics?: XPSReportingMetrics;
|
|
1205
|
+
tablesTrainingOperationMetadata?: XPSTablesTrainingOperationMetadata;
|
|
1206
|
+
videoBatchPredictOperationMetadata?: XPSVideoBatchPredictOperationMetadata;
|
|
1207
|
+
videoTrainingOperationMetadata?: XPSVideoTrainingOperationMetadata;
|
|
1208
|
+
visionTrainingOperationMetadata?: XPSVisionTrainingOperationMetadata;
|
|
1209
|
+
}
|
|
1210
|
+
interface XPSXraiAttribution {
|
|
1211
|
+
/** The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. */
|
|
1212
|
+
stepCount?: number;
|
|
1213
|
+
}
|
|
179
1214
|
interface DocumentsResource {
|
|
180
1215
|
/** Finds named entities (currently proper names and common nouns) in the text along with entity types, probability, mentions for each entity, and other properties. */
|
|
181
1216
|
analyzeEntities(request: {
|