/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v2/cloud_speech.proto

package com.google.cloud.speech.v2;

/**
 *
 *
 * <pre>
 * Available recognition features.
 * </pre>
 *
 * Protobuf type {@code google.cloud.speech.v2.RecognitionFeatures}
 */
public final class RecognitionFeatures extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.speech.v2.RecognitionFeatures)
    RecognitionFeaturesOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use RecognitionFeatures.newBuilder() to construct.
  private RecognitionFeatures(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private RecognitionFeatures() {
    multiChannelMode_ = 0;
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new RecognitionFeatures();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.speech.v2.CloudSpeechProto
        .internal_static_google_cloud_speech_v2_RecognitionFeatures_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.speech.v2.CloudSpeechProto
        .internal_static_google_cloud_speech_v2_RecognitionFeatures_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.speech.v2.RecognitionFeatures.class,
            com.google.cloud.speech.v2.RecognitionFeatures.Builder.class);
  }

  /**
   *
   *
   * <pre>
   * Options for how to recognize multi-channel audio.
   * </pre>
   *
   * Protobuf enum {@code google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode}
   */
  public enum MultiChannelMode implements com.google.protobuf.ProtocolMessageEnum {
    /**
     *
     *
     * <pre>
     * Default value for the multi-channel mode. If the audio contains
     * multiple channels, only the first channel will be transcribed; other
     * channels will be ignored.
     * </pre>
     *
     * <code>MULTI_CHANNEL_MODE_UNSPECIFIED = 0;</code>
     */
    MULTI_CHANNEL_MODE_UNSPECIFIED(0),
    /**
     *
     *
     * <pre>
     * If selected, each channel in the provided audio is transcribed
     * independently. This cannot be selected if the selected
     * [model][google.cloud.speech.v2.Recognizer.model] is `latest_short`.
     * </pre>
     *
     * <code>SEPARATE_RECOGNITION_PER_CHANNEL = 1;</code>
     */
    SEPARATE_RECOGNITION_PER_CHANNEL(1),
    UNRECOGNIZED(-1),
    ;

    /**
     *
     *
     * <pre>
     * Default value for the multi-channel mode. If the audio contains
     * multiple channels, only the first channel will be transcribed; other
     * channels will be ignored.
     * </pre>
     *
     * <code>MULTI_CHANNEL_MODE_UNSPECIFIED = 0;</code>
     */
    public static final int MULTI_CHANNEL_MODE_UNSPECIFIED_VALUE = 0;
    /**
     *
     *
     * <pre>
     * If selected, each channel in the provided audio is transcribed
     * independently. This cannot be selected if the selected
     * [model][google.cloud.speech.v2.Recognizer.model] is `latest_short`.
     * </pre>
     *
     * <code>SEPARATE_RECOGNITION_PER_CHANNEL = 1;</code>
     */
    public static final int SEPARATE_RECOGNITION_PER_CHANNEL_VALUE = 1;

    public final int getNumber() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalArgumentException(
            "Can't get the number of an unknown enum value.");
      }
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static MultiChannelMode valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static MultiChannelMode forNumber(int value) {
      switch (value) {
        case 0:
          return MULTI_CHANNEL_MODE_UNSPECIFIED;
        case 1:
          return SEPARATE_RECOGNITION_PER_CHANNEL;
        default:
          return null;
      }
    }

    public static com.google.protobuf.Internal.EnumLiteMap<MultiChannelMode> internalGetValueMap() {
      return internalValueMap;
    }

    private static final com.google.protobuf.Internal.EnumLiteMap<MultiChannelMode>
        internalValueMap =
            new com.google.protobuf.Internal.EnumLiteMap<MultiChannelMode>() {
              public MultiChannelMode findValueByNumber(int number) {
                return MultiChannelMode.forNumber(number);
              }
            };

    public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalStateException(
            "Can't get the descriptor of an unrecognized enum value.");
      }
      return getDescriptor().getValues().get(ordinal());
    }

    public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
      return getDescriptor();
    }

    public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
      return com.google.cloud.speech.v2.RecognitionFeatures.getDescriptor().getEnumTypes().get(0);
    }

    private static final MultiChannelMode[] VALUES = values();

    public static MultiChannelMode valueOf(
        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
      }
      if (desc.getIndex() == -1) {
        return UNRECOGNIZED;
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private MultiChannelMode(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode)
  }

  public static final int PROFANITY_FILTER_FIELD_NUMBER = 1;
  private boolean profanityFilter_ = false;
  /**
   *
   *
   * <pre>
   * If set to `true`, the server will attempt to filter out profanities,
   * replacing all but the initial character in each filtered word with
   * asterisks, for instance, "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * </pre>
   *
   * <code>bool profanity_filter = 1;</code>
   *
   * @return The profanityFilter.
   */
  @java.lang.Override
  public boolean getProfanityFilter() {
    return profanityFilter_;
  }

  public static final int ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER = 2;
  private boolean enableWordTimeOffsets_ = false;
  /**
   *
   *
   * <pre>
   * If `true`, the top result includes a list of words and the start and end
   * time offsets (timestamps) for those words. If `false`, no word-level time
   * offset information is returned. The default is `false`.
   * </pre>
   *
   * <code>bool enable_word_time_offsets = 2;</code>
   *
   * @return The enableWordTimeOffsets.
   */
  @java.lang.Override
  public boolean getEnableWordTimeOffsets() {
    return enableWordTimeOffsets_;
  }

  public static final int ENABLE_WORD_CONFIDENCE_FIELD_NUMBER = 3;
  private boolean enableWordConfidence_ = false;
  /**
   *
   *
   * <pre>
   * If `true`, the top result includes a list of words and the confidence for
   * those words. If `false`, no word-level confidence information is returned.
   * The default is `false`.
   * </pre>
   *
   * <code>bool enable_word_confidence = 3;</code>
   *
   * @return The enableWordConfidence.
   */
  @java.lang.Override
  public boolean getEnableWordConfidence() {
    return enableWordConfidence_;
  }

  public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 4;
  private boolean enableAutomaticPunctuation_ = false;
  /**
   *
   *
   * <pre>
   * If `true`, adds punctuation to recognition result hypotheses. This feature
   * is only available in select languages. The default `false` value does not
   * add punctuation to result hypotheses.
   * </pre>
   *
   * <code>bool enable_automatic_punctuation = 4;</code>
   *
   * @return The enableAutomaticPunctuation.
   */
  @java.lang.Override
  public boolean getEnableAutomaticPunctuation() {
    return enableAutomaticPunctuation_;
  }

  public static final int ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER = 14;
  private boolean enableSpokenPunctuation_ = false;
  /**
   *
   *
   * <pre>
   * The spoken punctuation behavior for the call. If `true`, replaces spoken
   * punctuation with the corresponding symbols in the request. For example,
   * "how are you question mark" becomes "how are you?". See
   * https://cloud.google.com/speech-to-text/docs/spoken-punctuation for
   * support. If `false`, spoken punctuation is not replaced.
   * </pre>
   *
   * <code>bool enable_spoken_punctuation = 14;</code>
   *
   * @return The enableSpokenPunctuation.
   */
  @java.lang.Override
  public boolean getEnableSpokenPunctuation() {
    return enableSpokenPunctuation_;
  }

  public static final int ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER = 15;
  private boolean enableSpokenEmojis_ = false;
  /**
   *
   *
   * <pre>
   * The spoken emoji behavior for the call. If `true`, adds spoken emoji
   * formatting for the request. This will replace spoken emojis with the
   * corresponding Unicode symbols in the final transcript. If `false`, spoken
   * emojis are not replaced.
   * </pre>
   *
   * <code>bool enable_spoken_emojis = 15;</code>
   *
   * @return The enableSpokenEmojis.
   */
  @java.lang.Override
  public boolean getEnableSpokenEmojis() {
    return enableSpokenEmojis_;
  }

  public static final int MULTI_CHANNEL_MODE_FIELD_NUMBER = 17;
  private int multiChannelMode_ = 0;
  /**
   *
   *
   * <pre>
   * Mode for recognizing multi-channel audio.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
   * </code>
   *
   * @return The enum numeric value on the wire for multiChannelMode.
   */
  @java.lang.Override
  public int getMultiChannelModeValue() {
    return multiChannelMode_;
  }
  /**
   *
   *
   * <pre>
   * Mode for recognizing multi-channel audio.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
   * </code>
   *
   * @return The multiChannelMode.
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode getMultiChannelMode() {
    com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode result =
        com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode.forNumber(
            multiChannelMode_);
    return result == null
        ? com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode.UNRECOGNIZED
        : result;
  }

  public static final int DIARIZATION_CONFIG_FIELD_NUMBER = 9;
  private com.google.cloud.speech.v2.SpeakerDiarizationConfig diarizationConfig_;
  /**
   *
   *
   * <pre>
   * Configuration to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
   *
   * @return Whether the diarizationConfig field is set.
   */
  @java.lang.Override
  public boolean hasDiarizationConfig() {
    return diarizationConfig_ != null;
  }
  /**
   *
   *
   * <pre>
   * Configuration to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
   *
   * @return The diarizationConfig.
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.SpeakerDiarizationConfig getDiarizationConfig() {
    return diarizationConfig_ == null
        ? com.google.cloud.speech.v2.SpeakerDiarizationConfig.getDefaultInstance()
        : diarizationConfig_;
  }
  /**
   *
   *
   * <pre>
   * Configuration to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.SpeakerDiarizationConfigOrBuilder
      getDiarizationConfigOrBuilder() {
    return diarizationConfig_ == null
        ? com.google.cloud.speech.v2.SpeakerDiarizationConfig.getDefaultInstance()
        : diarizationConfig_;
  }

  public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 16;
  private int maxAlternatives_ = 0;
  /**
   *
   *
   * <pre>
   * Maximum number of recognition hypotheses to be returned.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * </pre>
   *
   * <code>int32 max_alternatives = 16;</code>
   *
   * @return The maxAlternatives.
   */
  @java.lang.Override
  public int getMaxAlternatives() {
    return maxAlternatives_;
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (profanityFilter_ != false) {
      output.writeBool(1, profanityFilter_);
    }
    if (enableWordTimeOffsets_ != false) {
      output.writeBool(2, enableWordTimeOffsets_);
    }
    if (enableWordConfidence_ != false) {
      output.writeBool(3, enableWordConfidence_);
    }
    if (enableAutomaticPunctuation_ != false) {
      output.writeBool(4, enableAutomaticPunctuation_);
    }
    if (diarizationConfig_ != null) {
      output.writeMessage(9, getDiarizationConfig());
    }
    if (enableSpokenPunctuation_ != false) {
      output.writeBool(14, enableSpokenPunctuation_);
    }
    if (enableSpokenEmojis_ != false) {
      output.writeBool(15, enableSpokenEmojis_);
    }
    if (maxAlternatives_ != 0) {
      output.writeInt32(16, maxAlternatives_);
    }
    if (multiChannelMode_
        != com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode
            .MULTI_CHANNEL_MODE_UNSPECIFIED
            .getNumber()) {
      output.writeEnum(17, multiChannelMode_);
    }
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (profanityFilter_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, profanityFilter_);
    }
    if (enableWordTimeOffsets_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, enableWordTimeOffsets_);
    }
    if (enableWordConfidence_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, enableWordConfidence_);
    }
    if (enableAutomaticPunctuation_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, enableAutomaticPunctuation_);
    }
    if (diarizationConfig_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getDiarizationConfig());
    }
    if (enableSpokenPunctuation_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(14, enableSpokenPunctuation_);
    }
    if (enableSpokenEmojis_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, enableSpokenEmojis_);
    }
    if (maxAlternatives_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(16, maxAlternatives_);
    }
    if (multiChannelMode_
        != com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode
            .MULTI_CHANNEL_MODE_UNSPECIFIED
            .getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(17, multiChannelMode_);
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.speech.v2.RecognitionFeatures)) {
      return super.equals(obj);
    }
    com.google.cloud.speech.v2.RecognitionFeatures other =
        (com.google.cloud.speech.v2.RecognitionFeatures) obj;

    if (getProfanityFilter() != other.getProfanityFilter()) return false;
    if (getEnableWordTimeOffsets() != other.getEnableWordTimeOffsets()) return false;
    if (getEnableWordConfidence() != other.getEnableWordConfidence()) return false;
    if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
    if (getEnableSpokenPunctuation() != other.getEnableSpokenPunctuation()) return false;
    if (getEnableSpokenEmojis() != other.getEnableSpokenEmojis()) return false;
    if (multiChannelMode_ != other.multiChannelMode_) return false;
    if (hasDiarizationConfig() != other.hasDiarizationConfig()) return false;
    if (hasDiarizationConfig()) {
      if (!getDiarizationConfig().equals(other.getDiarizationConfig())) return false;
    }
    if (getMaxAlternatives() != other.getMaxAlternatives()) return false;
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter());
    hash = (37 * hash) + ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordTimeOffsets());
    hash = (37 * hash) + ENABLE_WORD_CONFIDENCE_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordConfidence());
    hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation());
    hash = (37 * hash) + ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpokenPunctuation());
    hash = (37 * hash) + ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpokenEmojis());
    hash = (37 * hash) + MULTI_CHANNEL_MODE_FIELD_NUMBER;
    hash = (53 * hash) + multiChannelMode_;
    if (hasDiarizationConfig()) {
      hash = (37 * hash) + DIARIZATION_CONFIG_FIELD_NUMBER;
      hash = (53 * hash) + getDiarizationConfig().hashCode();
    }
    hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER;
    hash = (53 * hash) + getMaxAlternatives();
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(java.nio.ByteBuffer data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(java.io.InputStream input)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(com.google.cloud.speech.v2.RecognitionFeatures prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Available recognition features.
   * </pre>
   *
   * Protobuf type {@code google.cloud.speech.v2.RecognitionFeatures}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.speech.v2.RecognitionFeatures)
      com.google.cloud.speech.v2.RecognitionFeaturesOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.speech.v2.CloudSpeechProto
          .internal_static_google_cloud_speech_v2_RecognitionFeatures_descriptor;
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.speech.v2.CloudSpeechProto
          .internal_static_google_cloud_speech_v2_RecognitionFeatures_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.speech.v2.RecognitionFeatures.class,
              com.google.cloud.speech.v2.RecognitionFeatures.Builder.class);
    }

    // Construct using com.google.cloud.speech.v2.RecognitionFeatures.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      profanityFilter_ = false;
      enableWordTimeOffsets_ = false;
      enableWordConfidence_ = false;
      enableAutomaticPunctuation_ = false;
      enableSpokenPunctuation_ = false;
      enableSpokenEmojis_ = false;
      multiChannelMode_ = 0;
      diarizationConfig_ = null;
      if (diarizationConfigBuilder_ != null) {
        diarizationConfigBuilder_.dispose();
        diarizationConfigBuilder_ = null;
      }
      maxAlternatives_ = 0;
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.speech.v2.CloudSpeechProto
          .internal_static_google_cloud_speech_v2_RecognitionFeatures_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.speech.v2.RecognitionFeatures getDefaultInstanceForType() {
      return com.google.cloud.speech.v2.RecognitionFeatures.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.speech.v2.RecognitionFeatures build() {
      com.google.cloud.speech.v2.RecognitionFeatures result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.speech.v2.RecognitionFeatures buildPartial() {
      com.google.cloud.speech.v2.RecognitionFeatures result =
          new com.google.cloud.speech.v2.RecognitionFeatures(this);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      onBuilt();
      return result;
    }

    private void buildPartial0(com.google.cloud.speech.v2.RecognitionFeatures result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000001) != 0)) {
        result.profanityFilter_ = profanityFilter_;
      }
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.enableWordTimeOffsets_ = enableWordTimeOffsets_;
      }
      if (((from_bitField0_ & 0x00000004) != 0)) {
        result.enableWordConfidence_ = enableWordConfidence_;
      }
      if (((from_bitField0_ & 0x00000008) != 0)) {
        result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
      }
      if (((from_bitField0_ & 0x00000010) != 0)) {
        result.enableSpokenPunctuation_ = enableSpokenPunctuation_;
      }
      if (((from_bitField0_ & 0x00000020) != 0)) {
        result.enableSpokenEmojis_ = enableSpokenEmojis_;
      }
      if (((from_bitField0_ & 0x00000040) != 0)) {
        result.multiChannelMode_ = multiChannelMode_;
      }
      if (((from_bitField0_ & 0x00000080) != 0)) {
        result.diarizationConfig_ =
            diarizationConfigBuilder_ == null
                ? diarizationConfig_
                : diarizationConfigBuilder_.build();
      }
      if (((from_bitField0_ & 0x00000100) != 0)) {
        result.maxAlternatives_ = maxAlternatives_;
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.speech.v2.RecognitionFeatures) {
        return mergeFrom((com.google.cloud.speech.v2.RecognitionFeatures) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.speech.v2.RecognitionFeatures other) {
      if (other == com.google.cloud.speech.v2.RecognitionFeatures.getDefaultInstance()) return this;
      if (other.getProfanityFilter() != false) {
        setProfanityFilter(other.getProfanityFilter());
      }
      if (other.getEnableWordTimeOffsets() != false) {
        setEnableWordTimeOffsets(other.getEnableWordTimeOffsets());
      }
      if (other.getEnableWordConfidence() != false) {
        setEnableWordConfidence(other.getEnableWordConfidence());
      }
      if (other.getEnableAutomaticPunctuation() != false) {
        setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation());
      }
      if (other.getEnableSpokenPunctuation() != false) {
        setEnableSpokenPunctuation(other.getEnableSpokenPunctuation());
      }
      if (other.getEnableSpokenEmojis() != false) {
        setEnableSpokenEmojis(other.getEnableSpokenEmojis());
      }
      if (other.multiChannelMode_ != 0) {
        setMultiChannelModeValue(other.getMultiChannelModeValue());
      }
      if (other.hasDiarizationConfig()) {
        mergeDiarizationConfig(other.getDiarizationConfig());
      }
      if (other.getMaxAlternatives() != 0) {
        setMaxAlternatives(other.getMaxAlternatives());
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 8:
              {
                profanityFilter_ = input.readBool();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
            case 16:
              {
                enableWordTimeOffsets_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
            case 24:
              {
                enableWordConfidence_ = input.readBool();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
            case 32:
              {
                enableAutomaticPunctuation_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
            case 74:
              {
                input.readMessage(
                    getDiarizationConfigFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00000080;
                break;
              } // case 74
            case 112:
              {
                enableSpokenPunctuation_ = input.readBool();
                bitField0_ |= 0x00000010;
                break;
              } // case 112
            case 120:
              {
                enableSpokenEmojis_ = input.readBool();
                bitField0_ |= 0x00000020;
                break;
              } // case 120
            case 128:
              {
                maxAlternatives_ = input.readInt32();
                bitField0_ |= 0x00000100;
                break;
              } // case 128
            case 136:
              {
                multiChannelMode_ = input.readEnum();
                bitField0_ |= 0x00000040;
                break;
              } // case 136
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int bitField0_;

    private boolean profanityFilter_;
    /**
     *
     *
     * <pre>
     * If set to `true`, the server will attempt to filter out profanities,
     * replacing all but the initial character in each filtered word with
     * asterisks, for instance, "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 1;</code>
     *
     * @return The profanityFilter.
     */
    @java.lang.Override
    public boolean getProfanityFilter() {
      return profanityFilter_;
    }
    /**
     *
     *
     * <pre>
     * If set to `true`, the server will attempt to filter out profanities,
     * replacing all but the initial character in each filtered word with
     * asterisks, for instance, "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 1;</code>
     *
     * @param value The profanityFilter to set.
     * @return This builder for chaining.
     */
    public Builder setProfanityFilter(boolean value) {

      profanityFilter_ = value;
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If set to `true`, the server will attempt to filter out profanities,
     * replacing all but the initial character in each filtered word with
     * asterisks, for instance, "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 1;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearProfanityFilter() {
      bitField0_ = (bitField0_ & ~0x00000001);
      profanityFilter_ = false;
      onChanged();
      return this;
    }

    private boolean enableWordTimeOffsets_;
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the start and end
     * time offsets (timestamps) for those words. If `false`, no word-level time
     * offset information is returned. The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_time_offsets = 2;</code>
     *
     * @return The enableWordTimeOffsets.
     */
    @java.lang.Override
    public boolean getEnableWordTimeOffsets() {
      return enableWordTimeOffsets_;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the start and end
     * time offsets (timestamps) for those words. If `false`, no word-level time
     * offset information is returned. The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_time_offsets = 2;</code>
     *
     * @param value The enableWordTimeOffsets to set.
     * @return This builder for chaining.
     */
    public Builder setEnableWordTimeOffsets(boolean value) {

      enableWordTimeOffsets_ = value;
      bitField0_ |= 0x00000002;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the start and end
     * time offsets (timestamps) for those words. If `false`, no word-level time
     * offset information is returned. The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_time_offsets = 2;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableWordTimeOffsets() {
      bitField0_ = (bitField0_ & ~0x00000002);
      enableWordTimeOffsets_ = false;
      onChanged();
      return this;
    }

    private boolean enableWordConfidence_;
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the confidence for
     * those words. If `false`, no word-level confidence information is returned.
     * The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_confidence = 3;</code>
     *
     * @return The enableWordConfidence.
     */
    @java.lang.Override
    public boolean getEnableWordConfidence() {
      return enableWordConfidence_;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the confidence for
     * those words. If `false`, no word-level confidence information is returned.
     * The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_confidence = 3;</code>
     *
     * @param value The enableWordConfidence to set.
     * @return This builder for chaining.
     */
    public Builder setEnableWordConfidence(boolean value) {

      enableWordConfidence_ = value;
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the confidence for
     * those words. If `false`, no word-level confidence information is returned.
     * The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_confidence = 3;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableWordConfidence() {
      bitField0_ = (bitField0_ & ~0x00000004);
      enableWordConfidence_ = false;
      onChanged();
      return this;
    }

    private boolean enableAutomaticPunctuation_;
    /**
     *
     *
     * <pre>
     * If `true`, adds punctuation to recognition result hypotheses. This feature
     * is only available in select languages. The default `false` value does not
     * add punctuation to result hypotheses.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 4;</code>
     *
     * @return The enableAutomaticPunctuation.
     */
    @java.lang.Override
    public boolean getEnableAutomaticPunctuation() {
      return enableAutomaticPunctuation_;
    }
    /**
     *
     *
     * <pre>
     * If `true`, adds punctuation to recognition result hypotheses. This feature
     * is only available in select languages. The default `false` value does not
     * add punctuation to result hypotheses.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 4;</code>
     *
     * @param value The enableAutomaticPunctuation to set.
     * @return This builder for chaining.
     */
    public Builder setEnableAutomaticPunctuation(boolean value) {

      enableAutomaticPunctuation_ = value;
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `true`, adds punctuation to recognition result hypotheses. This feature
     * is only available in select languages. The default `false` value does not
     * add punctuation to result hypotheses.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 4;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableAutomaticPunctuation() {
      bitField0_ = (bitField0_ & ~0x00000008);
      enableAutomaticPunctuation_ = false;
      onChanged();
      return this;
    }

    private boolean enableSpokenPunctuation_;
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call. If `true`, replaces spoken
     * punctuation with the corresponding symbols in the request. For example,
     * "how are you question mark" becomes "how are you?". See
     * https://cloud.google.com/speech-to-text/docs/spoken-punctuation for
     * support. If `false`, spoken punctuation is not replaced.
     * </pre>
     *
     * <code>bool enable_spoken_punctuation = 14;</code>
     *
     * @return The enableSpokenPunctuation.
     */
    @java.lang.Override
    public boolean getEnableSpokenPunctuation() {
      return enableSpokenPunctuation_;
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call. If `true`, replaces spoken
     * punctuation with the corresponding symbols in the request. For example,
     * "how are you question mark" becomes "how are you?". See
     * https://cloud.google.com/speech-to-text/docs/spoken-punctuation for
     * support. If `false`, spoken punctuation is not replaced.
     * </pre>
     *
     * <code>bool enable_spoken_punctuation = 14;</code>
     *
     * @param value The enableSpokenPunctuation to set.
     * @return This builder for chaining.
     */
    public Builder setEnableSpokenPunctuation(boolean value) {

      enableSpokenPunctuation_ = value;
      bitField0_ |= 0x00000010;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call. If `true`, replaces spoken
     * punctuation with the corresponding symbols in the request. For example,
     * "how are you question mark" becomes "how are you?". See
     * https://cloud.google.com/speech-to-text/docs/spoken-punctuation for
     * support. If `false`, spoken punctuation is not replaced.
     * </pre>
     *
     * <code>bool enable_spoken_punctuation = 14;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableSpokenPunctuation() {
      bitField0_ = (bitField0_ & ~0x00000010);
      enableSpokenPunctuation_ = false;
      onChanged();
      return this;
    }

    private boolean enableSpokenEmojis_;
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call. If `true`, adds spoken emoji
     * formatting for the request. This will replace spoken emojis with the
     * corresponding Unicode symbols in the final transcript. If `false`, spoken
     * emojis are not replaced.
     * </pre>
     *
     * <code>bool enable_spoken_emojis = 15;</code>
     *
     * @return The enableSpokenEmojis.
     */
    @java.lang.Override
    public boolean getEnableSpokenEmojis() {
      return enableSpokenEmojis_;
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call. If `true`, adds spoken emoji
     * formatting for the request. This will replace spoken emojis with the
     * corresponding Unicode symbols in the final transcript. If `false`, spoken
     * emojis are not replaced.
     * </pre>
     *
     * <code>bool enable_spoken_emojis = 15;</code>
     *
     * @param value The enableSpokenEmojis to set.
     * @return This builder for chaining.
     */
    public Builder setEnableSpokenEmojis(boolean value) {

      enableSpokenEmojis_ = value;
      bitField0_ |= 0x00000020;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call. If `true`, adds spoken emoji
     * formatting for the request. This will replace spoken emojis with the
     * corresponding Unicode symbols in the final transcript. If `false`, spoken
     * emojis are not replaced.
     * </pre>
     *
     * <code>bool enable_spoken_emojis = 15;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableSpokenEmojis() {
      bitField0_ = (bitField0_ & ~0x00000020);
      enableSpokenEmojis_ = false;
      onChanged();
      return this;
    }

    private int multiChannelMode_ = 0;
    /**
     *
     *
     * <pre>
     * Mode for recognizing multi-channel audio.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
     * </code>
     *
     * @return The enum numeric value on the wire for multiChannelMode.
     */
    @java.lang.Override
    public int getMultiChannelModeValue() {
      return multiChannelMode_;
    }
    /**
     *
     *
     * <pre>
     * Mode for recognizing multi-channel audio.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
     * </code>
     *
     * @param value The enum numeric value on the wire for multiChannelMode to set.
     * @return This builder for chaining.
     */
    public Builder setMultiChannelModeValue(int value) {
      multiChannelMode_ = value;
      bitField0_ |= 0x00000040;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Mode for recognizing multi-channel audio.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
     * </code>
     *
     * @return The multiChannelMode.
     */
    @java.lang.Override
    public com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode getMultiChannelMode() {
      com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode result =
          com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode.forNumber(
              multiChannelMode_);
      return result == null
          ? com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode.UNRECOGNIZED
          : result;
    }
    /**
     *
     *
     * <pre>
     * Mode for recognizing multi-channel audio.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
     * </code>
     *
     * @param value The multiChannelMode to set.
     * @return This builder for chaining.
     */
    public Builder setMultiChannelMode(
        com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode value) {
      if (value == null) {
        throw new NullPointerException();
      }
      bitField0_ |= 0x00000040;
      multiChannelMode_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Mode for recognizing multi-channel audio.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearMultiChannelMode() {
      bitField0_ = (bitField0_ & ~0x00000040);
      multiChannelMode_ = 0;
      onChanged();
      return this;
    }

    private com.google.cloud.speech.v2.SpeakerDiarizationConfig diarizationConfig_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v2.SpeakerDiarizationConfig,
            com.google.cloud.speech.v2.SpeakerDiarizationConfig.Builder,
            com.google.cloud.speech.v2.SpeakerDiarizationConfigOrBuilder>
        diarizationConfigBuilder_;
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     *
     * @return Whether the diarizationConfig field is set.
     */
    public boolean hasDiarizationConfig() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     *
     * @return The diarizationConfig.
     */
    public com.google.cloud.speech.v2.SpeakerDiarizationConfig getDiarizationConfig() {
      if (diarizationConfigBuilder_ == null) {
        return diarizationConfig_ == null
            ? com.google.cloud.speech.v2.SpeakerDiarizationConfig.getDefaultInstance()
            : diarizationConfig_;
      } else {
        return diarizationConfigBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    public Builder setDiarizationConfig(com.google.cloud.speech.v2.SpeakerDiarizationConfig value) {
      if (diarizationConfigBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        diarizationConfig_ = value;
      } else {
        diarizationConfigBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00000080;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    public Builder setDiarizationConfig(
        com.google.cloud.speech.v2.SpeakerDiarizationConfig.Builder builderForValue) {
      if (diarizationConfigBuilder_ == null) {
        diarizationConfig_ = builderForValue.build();
      } else {
        diarizationConfigBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00000080;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    public Builder mergeDiarizationConfig(
        com.google.cloud.speech.v2.SpeakerDiarizationConfig value) {
      if (diarizationConfigBuilder_ == null) {
        if (((bitField0_ & 0x00000080) != 0)
            && diarizationConfig_ != null
            && diarizationConfig_
                != com.google.cloud.speech.v2.SpeakerDiarizationConfig.getDefaultInstance()) {
          getDiarizationConfigBuilder().mergeFrom(value);
        } else {
          diarizationConfig_ = value;
        }
      } else {
        diarizationConfigBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00000080;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    public Builder clearDiarizationConfig() {
      bitField0_ = (bitField0_ & ~0x00000080);
      diarizationConfig_ = null;
      if (diarizationConfigBuilder_ != null) {
        diarizationConfigBuilder_.dispose();
        diarizationConfigBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    public com.google.cloud.speech.v2.SpeakerDiarizationConfig.Builder
        getDiarizationConfigBuilder() {
      bitField0_ |= 0x00000080;
      onChanged();
      return getDiarizationConfigFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    public com.google.cloud.speech.v2.SpeakerDiarizationConfigOrBuilder
        getDiarizationConfigOrBuilder() {
      if (diarizationConfigBuilder_ != null) {
        return diarizationConfigBuilder_.getMessageOrBuilder();
      } else {
        return diarizationConfig_ == null
            ? com.google.cloud.speech.v2.SpeakerDiarizationConfig.getDefaultInstance()
            : diarizationConfig_;
      }
    }
    /**
     *
     *
     * <pre>
     * Configuration to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v2.SpeakerDiarizationConfig,
            com.google.cloud.speech.v2.SpeakerDiarizationConfig.Builder,
            com.google.cloud.speech.v2.SpeakerDiarizationConfigOrBuilder>
        getDiarizationConfigFieldBuilder() {
      if (diarizationConfigBuilder_ == null) {
        diarizationConfigBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v2.SpeakerDiarizationConfig,
                com.google.cloud.speech.v2.SpeakerDiarizationConfig.Builder,
                com.google.cloud.speech.v2.SpeakerDiarizationConfigOrBuilder>(
                getDiarizationConfig(), getParentForChildren(), isClean());
        diarizationConfig_ = null;
      }
      return diarizationConfigBuilder_;
    }

    private int maxAlternatives_;
    /**
     *
     *
     * <pre>
     * Maximum number of recognition hypotheses to be returned.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 16;</code>
     *
     * @return The maxAlternatives.
     */
    @java.lang.Override
    public int getMaxAlternatives() {
      return maxAlternatives_;
    }
    /**
     *
     *
     * <pre>
     * Maximum number of recognition hypotheses to be returned.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 16;</code>
     *
     * @param value The maxAlternatives to set.
     * @return This builder for chaining.
     */
    public Builder setMaxAlternatives(int value) {

      maxAlternatives_ = value;
      bitField0_ |= 0x00000100;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Maximum number of recognition hypotheses to be returned.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 16;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearMaxAlternatives() {
      bitField0_ = (bitField0_ & ~0x00000100);
      maxAlternatives_ = 0;
      onChanged();
      return this;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.speech.v2.RecognitionFeatures)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.speech.v2.RecognitionFeatures)
  private static final com.google.cloud.speech.v2.RecognitionFeatures DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.speech.v2.RecognitionFeatures();
  }

  public static com.google.cloud.speech.v2.RecognitionFeatures getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<RecognitionFeatures> PARSER =
      new com.google.protobuf.AbstractParser<RecognitionFeatures>() {
        @java.lang.Override
        public RecognitionFeatures parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<RecognitionFeatures> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<RecognitionFeatures> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.speech.v2.RecognitionFeatures getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
