public static final class ModelMonitoringSchema.Builder extends com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder> implements ModelMonitoringSchemaOrBuilder
The Model Monitoring Schema definition.Protobuf type
google.cloud.aiplatform.v1beta1.ModelMonitoringSchemagetAllFields, getField, getFieldBuilder, getOneofFieldDescriptor, getParentForChildren, getRepeatedField, getRepeatedFieldBuilder, getRepeatedFieldCount, getUnknownFields, getUnknownFieldSetBuilder, hasField, hasOneof, internalGetMapField, internalGetMapFieldReflection, internalGetMutableMapField, internalGetMutableMapFieldReflection, isClean, markClean, mergeUnknownLengthDelimitedField, mergeUnknownVarintField, newBuilderForField, onBuilt, onChanged, parseUnknownField, setUnknownFieldSetBuilder, setUnknownFieldsProto3findInitializationErrors, getInitializationErrorString, internalMergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, newUninitializedMessageException, toStringaddAll, addAll, mergeDelimitedFrom, mergeDelimitedFrom, newUninitializedMessageExceptionequals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, waitpublic static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder clear()
clear in interface com.google.protobuf.Message.Builderclear in interface com.google.protobuf.MessageLite.Builderclear in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
getDescriptorForType in interface com.google.protobuf.Message.BuildergetDescriptorForType in interface com.google.protobuf.MessageOrBuildergetDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema getDefaultInstanceForType()
getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuildergetDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilderpublic ModelMonitoringSchema build()
build in interface com.google.protobuf.Message.Builderbuild in interface com.google.protobuf.MessageLite.Builderpublic ModelMonitoringSchema buildPartial()
buildPartial in interface com.google.protobuf.Message.BuilderbuildPartial in interface com.google.protobuf.MessageLite.Builderpublic ModelMonitoringSchema.Builder clone()
clone in interface com.google.protobuf.Message.Builderclone in interface com.google.protobuf.MessageLite.Builderclone in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
setField in interface com.google.protobuf.Message.BuildersetField in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field)
clearField in interface com.google.protobuf.Message.BuilderclearField in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)
clearOneof in interface com.google.protobuf.Message.BuilderclearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value)
setRepeatedField in interface com.google.protobuf.Message.BuildersetRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
addRepeatedField in interface com.google.protobuf.Message.BuilderaddRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder mergeFrom(com.google.protobuf.Message other)
mergeFrom in interface com.google.protobuf.Message.BuildermergeFrom in class com.google.protobuf.AbstractMessage.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder mergeFrom(ModelMonitoringSchema other)
public final boolean isInitialized()
isInitialized in interface com.google.protobuf.MessageLiteOrBuilderisInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public ModelMonitoringSchema.Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException
mergeFrom in interface com.google.protobuf.Message.BuildermergeFrom in interface com.google.protobuf.MessageLite.BuildermergeFrom in class com.google.protobuf.AbstractMessage.Builder<ModelMonitoringSchema.Builder>IOExceptionpublic List<ModelMonitoringSchema.FieldSchema> getFeatureFieldsList()
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
getFeatureFieldsList in interface ModelMonitoringSchemaOrBuilderpublic int getFeatureFieldsCount()
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
getFeatureFieldsCount in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.FieldSchema getFeatureFields(int index)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
getFeatureFields in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.Builder setFeatureFields(int index, ModelMonitoringSchema.FieldSchema value)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder setFeatureFields(int index, ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder addFeatureFields(ModelMonitoringSchema.FieldSchema value)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder addFeatureFields(int index, ModelMonitoringSchema.FieldSchema value)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder addFeatureFields(ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder addFeatureFields(int index, ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder addAllFeatureFields(Iterable<? extends ModelMonitoringSchema.FieldSchema> values)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder clearFeatureFields()
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.Builder removeFeatureFields(int index)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.FieldSchema.Builder getFeatureFieldsBuilder(int index)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.FieldSchemaOrBuilder getFeatureFieldsOrBuilder(int index)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
getFeatureFieldsOrBuilder in interface ModelMonitoringSchemaOrBuilderpublic List<? extends ModelMonitoringSchema.FieldSchemaOrBuilder> getFeatureFieldsOrBuilderList()
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
getFeatureFieldsOrBuilderList in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.FieldSchema.Builder addFeatureFieldsBuilder()
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public ModelMonitoringSchema.FieldSchema.Builder addFeatureFieldsBuilder(int index)
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public List<ModelMonitoringSchema.FieldSchema.Builder> getFeatureFieldsBuilderList()
Feature names of the model. Vertex AI will try to match the features from
your dataset as follows:
* For 'csv' files, the header names are required, and we will extract the
corresponding feature values when the header names align with the
feature names.
* For 'jsonl' files, we will extract the corresponding feature values if
the key names match the feature names.
Note: Nested features are not supported, so please ensure your features
are flattened. Ensure the feature values are scalar or an array of
scalars.
* For 'bigquery' dataset, we will extract the corresponding feature values
if the column names match the feature names.
Note: The column type can be a scalar or an array of scalars. STRUCT or
JSON types are not supported. You may use SQL queries to select or
aggregate the relevant features from your original table. However,
ensure that the 'schema' of the query results meets our requirements.
* For the Vertex AI Endpoint Request Response Logging table or Vertex AI
Batch Prediction Job results. If the
[instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type]
is an array, ensure that the sequence in
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]
matches the order of features in the prediction instance. We will match
the feature with the array in the order specified in [feature_fields].
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema feature_fields = 1;
public List<ModelMonitoringSchema.FieldSchema> getPredictionFieldsList()
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
getPredictionFieldsList in interface ModelMonitoringSchemaOrBuilderpublic int getPredictionFieldsCount()
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
getPredictionFieldsCount in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.FieldSchema getPredictionFields(int index)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
getPredictionFields in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.Builder setPredictionFields(int index, ModelMonitoringSchema.FieldSchema value)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder setPredictionFields(int index, ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder addPredictionFields(ModelMonitoringSchema.FieldSchema value)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder addPredictionFields(int index, ModelMonitoringSchema.FieldSchema value)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder addPredictionFields(ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder addPredictionFields(int index, ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder addAllPredictionFields(Iterable<? extends ModelMonitoringSchema.FieldSchema> values)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder clearPredictionFields()
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.Builder removePredictionFields(int index)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.FieldSchema.Builder getPredictionFieldsBuilder(int index)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.FieldSchemaOrBuilder getPredictionFieldsOrBuilder(int index)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
getPredictionFieldsOrBuilder in interface ModelMonitoringSchemaOrBuilderpublic List<? extends ModelMonitoringSchema.FieldSchemaOrBuilder> getPredictionFieldsOrBuilderList()
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
getPredictionFieldsOrBuilderList in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.FieldSchema.Builder addPredictionFieldsBuilder()
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public ModelMonitoringSchema.FieldSchema.Builder addPredictionFieldsBuilder(int index)
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public List<ModelMonitoringSchema.FieldSchema.Builder> getPredictionFieldsBuilderList()
Prediction output names of the model. The requirements are the same as the
[feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields].
For AutoML Tables, the prediction output name presented in schema will be:
`predicted_{target_column}`, the `target_column` is the one you specified
when you train the model.
For Prediction output drift analysis:
* AutoML Classification, the distribution of the argmax label will be
analyzed.
* AutoML Regression, the distribution of the value will be analyzed.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema prediction_fields = 2;
public List<ModelMonitoringSchema.FieldSchema> getGroundTruthFieldsList()
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
getGroundTruthFieldsList in interface ModelMonitoringSchemaOrBuilderpublic int getGroundTruthFieldsCount()
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
getGroundTruthFieldsCount in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.FieldSchema getGroundTruthFields(int index)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
getGroundTruthFields in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.Builder setGroundTruthFields(int index, ModelMonitoringSchema.FieldSchema value)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder setGroundTruthFields(int index, ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder addGroundTruthFields(ModelMonitoringSchema.FieldSchema value)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder addGroundTruthFields(int index, ModelMonitoringSchema.FieldSchema value)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder addGroundTruthFields(ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder addGroundTruthFields(int index, ModelMonitoringSchema.FieldSchema.Builder builderForValue)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder addAllGroundTruthFields(Iterable<? extends ModelMonitoringSchema.FieldSchema> values)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder clearGroundTruthFields()
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.Builder removeGroundTruthFields(int index)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.FieldSchema.Builder getGroundTruthFieldsBuilder(int index)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.FieldSchemaOrBuilder getGroundTruthFieldsOrBuilder(int index)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
getGroundTruthFieldsOrBuilder in interface ModelMonitoringSchemaOrBuilderpublic List<? extends ModelMonitoringSchema.FieldSchemaOrBuilder> getGroundTruthFieldsOrBuilderList()
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
getGroundTruthFieldsOrBuilderList in interface ModelMonitoringSchemaOrBuilderpublic ModelMonitoringSchema.FieldSchema.Builder addGroundTruthFieldsBuilder()
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public ModelMonitoringSchema.FieldSchema.Builder addGroundTruthFieldsBuilder(int index)
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public List<ModelMonitoringSchema.FieldSchema.Builder> getGroundTruthFieldsBuilderList()
Target /ground truth names of the model.
repeated .google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.FieldSchema ground_truth_fields = 3;
public final ModelMonitoringSchema.Builder setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
setUnknownFields in interface com.google.protobuf.Message.BuildersetUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>public final ModelMonitoringSchema.Builder mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
mergeUnknownFields in interface com.google.protobuf.Message.BuildermergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<ModelMonitoringSchema.Builder>Copyright © 2024 Google LLC. All rights reserved.