/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v1p1beta1/cloud_speech.proto

package com.google.cloud.speech.v1p1beta1;

/**
 *
 *
 * <pre>
 * Provides information to the recognizer that specifies how to process the
 * request.
 * </pre>
 *
 * Protobuf type {@code google.cloud.speech.v1p1beta1.RecognitionConfig}
 */
public final class RecognitionConfig extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.speech.v1p1beta1.RecognitionConfig)
    RecognitionConfigOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use RecognitionConfig.newBuilder() to construct.
  private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private RecognitionConfig() {
    encoding_ = 0;
    languageCode_ = "";
    alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
    speechContexts_ = java.util.Collections.emptyList();
    model_ = "";
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new RecognitionConfig();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.speech.v1p1beta1.SpeechProto
        .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.speech.v1p1beta1.SpeechProto
        .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.speech.v1p1beta1.RecognitionConfig.class,
            com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder.class);
  }

  /**
   *
   *
   * <pre>
   * The encoding of the audio data sent in the request.
   * All encodings support only 1 channel (mono) audio, unless the
   * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
   * are set.
   * For best results, the audio source should be captured and transmitted using
   * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
   * recognition can be reduced if lossy codecs are used to capture or transmit
   * audio, particularly if background noise is present. Lossy codecs include
   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`,
   * and `WEBM_OPUS`.
   * The `FLAC` and `WAV` audio file formats include a header that describes the
   * included audio content. You can request recognition for `WAV` files that
   * contain either `LINEAR16` or `MULAW` encoded audio.
   * If you send `FLAC` or `WAV` audio file format in
   * your request, you do not need to specify an `AudioEncoding`; the audio
   * encoding format is determined from the file header. If you specify
   * an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
   * encoding configuration must match the encoding described in the audio
   * header; otherwise the request returns an
   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
   * code.
   * </pre>
   *
   * Protobuf enum {@code google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding}
   */
  public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum {
    /**
     *
     *
     * <pre>
     * Not specified.
     * </pre>
     *
     * <code>ENCODING_UNSPECIFIED = 0;</code>
     */
    ENCODING_UNSPECIFIED(0),
    /**
     *
     *
     * <pre>
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * </pre>
     *
     * <code>LINEAR16 = 1;</code>
     */
    LINEAR16(1),
    /**
     *
     *
     * <pre>
     * `FLAC` (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * </pre>
     *
     * <code>FLAC = 2;</code>
     */
    FLAC(2),
    /**
     *
     *
     * <pre>
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * </pre>
     *
     * <code>MULAW = 3;</code>
     */
    MULAW(3),
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * </pre>
     *
     * <code>AMR = 4;</code>
     */
    AMR(4),
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * </pre>
     *
     * <code>AMR_WB = 5;</code>
     */
    AMR_WB(5),
    /**
     *
     *
     * <pre>
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
     * </pre>
     *
     * <code>OGG_OPUS = 6;</code>
     */
    OGG_OPUS(6),
    /**
     *
     *
     * <pre>
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * </pre>
     *
     * <code>SPEEX_WITH_HEADER_BYTE = 7;</code>
     */
    SPEEX_WITH_HEADER_BYTE(7),
    /**
     *
     *
     * <pre>
     * MP3 audio. MP3 encoding is a Beta feature and only available in
     * v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
     * kbps). When using this encoding, `sample_rate_hertz` has to match the
     * sample rate of the file being used.
     * </pre>
     *
     * <code>MP3 = 8;</code>
     */
    MP3(8),
    /**
     *
     *
     * <pre>
     * Opus encoded audio frames in WebM container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
     * one of 8000, 12000, 16000, 24000, or 48000.
     * </pre>
     *
     * <code>WEBM_OPUS = 9;</code>
     */
    WEBM_OPUS(9),
    UNRECOGNIZED(-1),
    ;

    /**
     *
     *
     * <pre>
     * Not specified.
     * </pre>
     *
     * <code>ENCODING_UNSPECIFIED = 0;</code>
     */
    public static final int ENCODING_UNSPECIFIED_VALUE = 0;
    /**
     *
     *
     * <pre>
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * </pre>
     *
     * <code>LINEAR16 = 1;</code>
     */
    public static final int LINEAR16_VALUE = 1;
    /**
     *
     *
     * <pre>
     * `FLAC` (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * </pre>
     *
     * <code>FLAC = 2;</code>
     */
    public static final int FLAC_VALUE = 2;
    /**
     *
     *
     * <pre>
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * </pre>
     *
     * <code>MULAW = 3;</code>
     */
    public static final int MULAW_VALUE = 3;
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * </pre>
     *
     * <code>AMR = 4;</code>
     */
    public static final int AMR_VALUE = 4;
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * </pre>
     *
     * <code>AMR_WB = 5;</code>
     */
    public static final int AMR_WB_VALUE = 5;
    /**
     *
     *
     * <pre>
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
     * </pre>
     *
     * <code>OGG_OPUS = 6;</code>
     */
    public static final int OGG_OPUS_VALUE = 6;
    /**
     *
     *
     * <pre>
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * </pre>
     *
     * <code>SPEEX_WITH_HEADER_BYTE = 7;</code>
     */
    public static final int SPEEX_WITH_HEADER_BYTE_VALUE = 7;
    /**
     *
     *
     * <pre>
     * MP3 audio. MP3 encoding is a Beta feature and only available in
     * v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
     * kbps). When using this encoding, `sample_rate_hertz` has to match the
     * sample rate of the file being used.
     * </pre>
     *
     * <code>MP3 = 8;</code>
     */
    public static final int MP3_VALUE = 8;
    /**
     *
     *
     * <pre>
     * Opus encoded audio frames in WebM container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
     * one of 8000, 12000, 16000, 24000, or 48000.
     * </pre>
     *
     * <code>WEBM_OPUS = 9;</code>
     */
    public static final int WEBM_OPUS_VALUE = 9;

    public final int getNumber() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalArgumentException(
            "Can't get the number of an unknown enum value.");
      }
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static AudioEncoding valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static AudioEncoding forNumber(int value) {
      switch (value) {
        case 0:
          return ENCODING_UNSPECIFIED;
        case 1:
          return LINEAR16;
        case 2:
          return FLAC;
        case 3:
          return MULAW;
        case 4:
          return AMR;
        case 5:
          return AMR_WB;
        case 6:
          return OGG_OPUS;
        case 7:
          return SPEEX_WITH_HEADER_BYTE;
        case 8:
          return MP3;
        case 9:
          return WEBM_OPUS;
        default:
          return null;
      }
    }

    public static com.google.protobuf.Internal.EnumLiteMap<AudioEncoding> internalGetValueMap() {
      return internalValueMap;
    }

    private static final com.google.protobuf.Internal.EnumLiteMap<AudioEncoding> internalValueMap =
        new com.google.protobuf.Internal.EnumLiteMap<AudioEncoding>() {
          public AudioEncoding findValueByNumber(int number) {
            return AudioEncoding.forNumber(number);
          }
        };

    public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalStateException(
            "Can't get the descriptor of an unrecognized enum value.");
      }
      return getDescriptor().getValues().get(ordinal());
    }

    public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
      return getDescriptor();
    }

    public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
      return com.google.cloud.speech.v1p1beta1.RecognitionConfig.getDescriptor()
          .getEnumTypes()
          .get(0);
    }

    private static final AudioEncoding[] VALUES = values();

    public static AudioEncoding valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
      }
      if (desc.getIndex() == -1) {
        return UNRECOGNIZED;
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private AudioEncoding(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding)
  }

  public static final int ENCODING_FIELD_NUMBER = 1;
  private int encoding_ = 0;
  /**
   *
   *
   * <pre>
   * Encoding of audio data sent in all `RecognitionAudio` messages.
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see
   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
   *
   * @return The enum numeric value on the wire for encoding.
   */
  @java.lang.Override
  public int getEncodingValue() {
    return encoding_;
  }
  /**
   *
   *
   * <pre>
   * Encoding of audio data sent in all `RecognitionAudio` messages.
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see
   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
   *
   * @return The encoding.
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding getEncoding() {
    com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding result =
        com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.forNumber(encoding_);
    return result == null
        ? com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.UNRECOGNIZED
        : result;
  }

  public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2;
  private int sampleRateHertz_ = 0;
  /**
   *
   *
   * <pre>
   * Sample rate in Hertz of the audio data sent in all
   * `RecognitionAudio` messages. Valid values are: 8000-48000.
   * 16000 is optimal. For best results, set the sampling rate of the audio
   * source to 16000 Hz. If that's not possible, use the native sample rate of
   * the audio source (instead of re-sampling).
   * This field is optional for FLAC and WAV audio files, but is
   * required for all other audio formats. For details, see
   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   * </pre>
   *
   * <code>int32 sample_rate_hertz = 2;</code>
   *
   * @return The sampleRateHertz.
   */
  @java.lang.Override
  public int getSampleRateHertz() {
    return sampleRateHertz_;
  }

  public static final int AUDIO_CHANNEL_COUNT_FIELD_NUMBER = 7;
  private int audioChannelCount_ = 0;
  /**
   *
   *
   * <pre>
   * The number of channels in the input audio data.
   * ONLY set this for MULTI-CHANNEL recognition.
   * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
   * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
   * If `0` or omitted, defaults to one channel (mono).
   * Note: We only recognize the first channel by default.
   * To perform independent recognition on each channel set
   * `enable_separate_recognition_per_channel` to 'true'.
   * </pre>
   *
   * <code>int32 audio_channel_count = 7;</code>
   *
   * @return The audioChannelCount.
   */
  @java.lang.Override
  public int getAudioChannelCount() {
    return audioChannelCount_;
  }

  public static final int ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER = 12;
  private boolean enableSeparateRecognitionPerChannel_ = false;
  /**
   *
   *
   * <pre>
   * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
   * to get each channel recognized separately. The recognition result will
   * contain a `channel_tag` field to state which channel that result belongs
   * to. If this is not true, we will only recognize the first channel. The
   * request is billed cumulatively for all channels recognized:
   * `audio_channel_count` multiplied by the length of the audio.
   * </pre>
   *
   * <code>bool enable_separate_recognition_per_channel = 12;</code>
   *
   * @return The enableSeparateRecognitionPerChannel.
   */
  @java.lang.Override
  public boolean getEnableSeparateRecognitionPerChannel() {
    return enableSeparateRecognitionPerChannel_;
  }

  public static final int LANGUAGE_CODE_FIELD_NUMBER = 3;

  @SuppressWarnings("serial")
  private volatile java.lang.Object languageCode_ = "";
  /**
   *
   *
   * <pre>
   * Required. The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes.
   * </pre>
   *
   * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
   *
   * @return The languageCode.
   */
  @java.lang.Override
  public java.lang.String getLanguageCode() {
    java.lang.Object ref = languageCode_;
    if (ref instanceof java.lang.String) {
      return (java.lang.String) ref;
    } else {
      com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
      java.lang.String s = bs.toStringUtf8();
      languageCode_ = s;
      return s;
    }
  }
  /**
   *
   *
   * <pre>
   * Required. The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes.
   * </pre>
   *
   * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
   *
   * @return The bytes for languageCode.
   */
  @java.lang.Override
  public com.google.protobuf.ByteString getLanguageCodeBytes() {
    java.lang.Object ref = languageCode_;
    if (ref instanceof java.lang.String) {
      com.google.protobuf.ByteString b =
          com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
      languageCode_ = b;
      return b;
    } else {
      return (com.google.protobuf.ByteString) ref;
    }
  }

  public static final int ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER = 18;

  @SuppressWarnings("serial")
  private com.google.protobuf.LazyStringList alternativeLanguageCodes_;
  /**
   *
   *
   * <pre>
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * </pre>
   *
   * <code>repeated string alternative_language_codes = 18;</code>
   *
   * @return A list containing the alternativeLanguageCodes.
   */
  public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() {
    return alternativeLanguageCodes_;
  }
  /**
   *
   *
   * <pre>
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * </pre>
   *
   * <code>repeated string alternative_language_codes = 18;</code>
   *
   * @return The count of alternativeLanguageCodes.
   */
  public int getAlternativeLanguageCodesCount() {
    return alternativeLanguageCodes_.size();
  }
  /**
   *
   *
   * <pre>
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * </pre>
   *
   * <code>repeated string alternative_language_codes = 18;</code>
   *
   * @param index The index of the element to return.
   * @return The alternativeLanguageCodes at the given index.
   */
  public java.lang.String getAlternativeLanguageCodes(int index) {
    return alternativeLanguageCodes_.get(index);
  }
  /**
   *
   *
   * <pre>
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * </pre>
   *
   * <code>repeated string alternative_language_codes = 18;</code>
   *
   * @param index The index of the value to return.
   * @return The bytes of the alternativeLanguageCodes at the given index.
   */
  public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) {
    return alternativeLanguageCodes_.getByteString(index);
  }

  public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 4;
  private int maxAlternatives_ = 0;
  /**
   *
   *
   * <pre>
   * Maximum number of recognition hypotheses to be returned.
   * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   * within each `SpeechRecognitionResult`.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * </pre>
   *
   * <code>int32 max_alternatives = 4;</code>
   *
   * @return The maxAlternatives.
   */
  @java.lang.Override
  public int getMaxAlternatives() {
    return maxAlternatives_;
  }

  public static final int PROFANITY_FILTER_FIELD_NUMBER = 5;
  private boolean profanityFilter_ = false;
  /**
   *
   *
   * <pre>
   * If set to `true`, the server will attempt to filter out
   * profanities, replacing all but the initial character in each filtered word
   * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * </pre>
   *
   * <code>bool profanity_filter = 5;</code>
   *
   * @return The profanityFilter.
   */
  @java.lang.Override
  public boolean getProfanityFilter() {
    return profanityFilter_;
  }

  public static final int ADAPTATION_FIELD_NUMBER = 20;
  private com.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation_;
  /**
   *
   *
   * <pre>
   * Speech adaptation configuration improves the accuracy of speech
   * recognition. For more information, see the [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
   * documentation.
   * When speech adaptation is set it supersedes the `speech_contexts` field.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
   *
   * @return Whether the adaptation field is set.
   */
  @java.lang.Override
  public boolean hasAdaptation() {
    return adaptation_ != null;
  }
  /**
   *
   *
   * <pre>
   * Speech adaptation configuration improves the accuracy of speech
   * recognition. For more information, see the [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
   * documentation.
   * When speech adaptation is set it supersedes the `speech_contexts` field.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
   *
   * @return The adaptation.
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.SpeechAdaptation getAdaptation() {
    return adaptation_ == null
        ? com.google.cloud.speech.v1p1beta1.SpeechAdaptation.getDefaultInstance()
        : adaptation_;
  }
  /**
   *
   *
   * <pre>
   * Speech adaptation configuration improves the accuracy of speech
   * recognition. For more information, see the [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
   * documentation.
   * When speech adaptation is set it supersedes the `speech_contexts` field.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder getAdaptationOrBuilder() {
    return adaptation_ == null
        ? com.google.cloud.speech.v1p1beta1.SpeechAdaptation.getDefaultInstance()
        : adaptation_;
  }

  public static final int TRANSCRIPT_NORMALIZATION_FIELD_NUMBER = 24;
  private com.google.cloud.speech.v1p1beta1.TranscriptNormalization transcriptNormalization_;
  /**
   *
   *
   * <pre>
   * Use transcription normalization to automatically replace parts of the
   * transcript with phrases of your choosing. For StreamingRecognize, this
   * normalization only applies to stable partial transcripts (stability &gt; 0.8)
   * and final transcripts.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
   * </code>
   *
   * @return Whether the transcriptNormalization field is set.
   */
  @java.lang.Override
  public boolean hasTranscriptNormalization() {
    return transcriptNormalization_ != null;
  }
  /**
   *
   *
   * <pre>
   * Use transcription normalization to automatically replace parts of the
   * transcript with phrases of your choosing. For StreamingRecognize, this
   * normalization only applies to stable partial transcripts (stability &gt; 0.8)
   * and final transcripts.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
   * </code>
   *
   * @return The transcriptNormalization.
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.TranscriptNormalization getTranscriptNormalization() {
    return transcriptNormalization_ == null
        ? com.google.cloud.speech.v1p1beta1.TranscriptNormalization.getDefaultInstance()
        : transcriptNormalization_;
  }
  /**
   *
   *
   * <pre>
   * Use transcription normalization to automatically replace parts of the
   * transcript with phrases of your choosing. For StreamingRecognize, this
   * normalization only applies to stable partial transcripts (stability &gt; 0.8)
   * and final transcripts.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
   * </code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.TranscriptNormalizationOrBuilder
      getTranscriptNormalizationOrBuilder() {
    return transcriptNormalization_ == null
        ? com.google.cloud.speech.v1p1beta1.TranscriptNormalization.getDefaultInstance()
        : transcriptNormalization_;
  }

  public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 6;

  @SuppressWarnings("serial")
  private java.util.List<com.google.cloud.speech.v1p1beta1.SpeechContext> speechContexts_;
  /**
   *
   *
   * <pre>
   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
   */
  @java.lang.Override
  public java.util.List<com.google.cloud.speech.v1p1beta1.SpeechContext> getSpeechContextsList() {
    return speechContexts_;
  }
  /**
   *
   *
   * <pre>
   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
   */
  @java.lang.Override
  public java.util.List<? extends com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder>
      getSpeechContextsOrBuilderList() {
    return speechContexts_;
  }
  /**
   *
   *
   * <pre>
   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
   */
  @java.lang.Override
  public int getSpeechContextsCount() {
    return speechContexts_.size();
  }
  /**
   *
   *
   * <pre>
   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int index) {
    return speechContexts_.get(index);
  }
  /**
   *
   *
   * <pre>
   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(
      int index) {
    return speechContexts_.get(index);
  }

  public static final int ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER = 8;
  private boolean enableWordTimeOffsets_ = false;
  /**
   *
   *
   * <pre>
   * If `true`, the top result includes a list of words and
   * the start and end time offsets (timestamps) for those words. If
   * `false`, no word-level time offset information is returned. The default is
   * `false`.
   * </pre>
   *
   * <code>bool enable_word_time_offsets = 8;</code>
   *
   * @return The enableWordTimeOffsets.
   */
  @java.lang.Override
  public boolean getEnableWordTimeOffsets() {
    return enableWordTimeOffsets_;
  }

  public static final int ENABLE_WORD_CONFIDENCE_FIELD_NUMBER = 15;
  private boolean enableWordConfidence_ = false;
  /**
   *
   *
   * <pre>
   * If `true`, the top result includes a list of words and the
   * confidence for those words. If `false`, no word-level confidence
   * information is returned. The default is `false`.
   * </pre>
   *
   * <code>bool enable_word_confidence = 15;</code>
   *
   * @return The enableWordConfidence.
   */
  @java.lang.Override
  public boolean getEnableWordConfidence() {
    return enableWordConfidence_;
  }

  public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 11;
  private boolean enableAutomaticPunctuation_ = false;
  /**
   *
   *
   * <pre>
   * If 'true', adds punctuation to recognition result hypotheses.
   * This feature is only available in select languages. Setting this for
   * requests in other languages has no effect at all.
   * The default 'false' value does not add punctuation to result hypotheses.
   * </pre>
   *
   * <code>bool enable_automatic_punctuation = 11;</code>
   *
   * @return The enableAutomaticPunctuation.
   */
  @java.lang.Override
  public boolean getEnableAutomaticPunctuation() {
    return enableAutomaticPunctuation_;
  }

  public static final int ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER = 22;
  private com.google.protobuf.BoolValue enableSpokenPunctuation_;
  /**
   *
   *
   * <pre>
   * The spoken punctuation behavior for the call
   * If not set, uses default behavior based on model of choice
   * e.g. command_and_search will enable spoken punctuation by default
   * If 'true', replaces spoken punctuation with the corresponding symbols in
   * the request. For example, "how are you question mark" becomes "how are
   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
   * for support. If 'false', spoken punctuation is not replaced.
   * </pre>
   *
   * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
   *
   * @return Whether the enableSpokenPunctuation field is set.
   */
  @java.lang.Override
  public boolean hasEnableSpokenPunctuation() {
    return enableSpokenPunctuation_ != null;
  }
  /**
   *
   *
   * <pre>
   * The spoken punctuation behavior for the call
   * If not set, uses default behavior based on model of choice
   * e.g. command_and_search will enable spoken punctuation by default
   * If 'true', replaces spoken punctuation with the corresponding symbols in
   * the request. For example, "how are you question mark" becomes "how are
   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
   * for support. If 'false', spoken punctuation is not replaced.
   * </pre>
   *
   * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
   *
   * @return The enableSpokenPunctuation.
   */
  @java.lang.Override
  public com.google.protobuf.BoolValue getEnableSpokenPunctuation() {
    return enableSpokenPunctuation_ == null
        ? com.google.protobuf.BoolValue.getDefaultInstance()
        : enableSpokenPunctuation_;
  }
  /**
   *
   *
   * <pre>
   * The spoken punctuation behavior for the call
   * If not set, uses default behavior based on model of choice
   * e.g. command_and_search will enable spoken punctuation by default
   * If 'true', replaces spoken punctuation with the corresponding symbols in
   * the request. For example, "how are you question mark" becomes "how are
   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
   * for support. If 'false', spoken punctuation is not replaced.
   * </pre>
   *
   * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
   */
  @java.lang.Override
  public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() {
    return enableSpokenPunctuation_ == null
        ? com.google.protobuf.BoolValue.getDefaultInstance()
        : enableSpokenPunctuation_;
  }

  public static final int ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER = 23;
  private com.google.protobuf.BoolValue enableSpokenEmojis_;
  /**
   *
   *
   * <pre>
   * The spoken emoji behavior for the call
   * If not set, uses default behavior based on model of choice
   * If 'true', adds spoken emoji formatting for the request. This will replace
   * spoken emojis with the corresponding Unicode symbols in the final
   * transcript. If 'false', spoken emojis are not replaced.
   * </pre>
   *
   * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
   *
   * @return Whether the enableSpokenEmojis field is set.
   */
  @java.lang.Override
  public boolean hasEnableSpokenEmojis() {
    return enableSpokenEmojis_ != null;
  }
  /**
   *
   *
   * <pre>
   * The spoken emoji behavior for the call
   * If not set, uses default behavior based on model of choice
   * If 'true', adds spoken emoji formatting for the request. This will replace
   * spoken emojis with the corresponding Unicode symbols in the final
   * transcript. If 'false', spoken emojis are not replaced.
   * </pre>
   *
   * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
   *
   * @return The enableSpokenEmojis.
   */
  @java.lang.Override
  public com.google.protobuf.BoolValue getEnableSpokenEmojis() {
    return enableSpokenEmojis_ == null
        ? com.google.protobuf.BoolValue.getDefaultInstance()
        : enableSpokenEmojis_;
  }
  /**
   *
   *
   * <pre>
   * The spoken emoji behavior for the call
   * If not set, uses default behavior based on model of choice
   * If 'true', adds spoken emoji formatting for the request. This will replace
   * spoken emojis with the corresponding Unicode symbols in the final
   * transcript. If 'false', spoken emojis are not replaced.
   * </pre>
   *
   * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
   */
  @java.lang.Override
  public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() {
    return enableSpokenEmojis_ == null
        ? com.google.protobuf.BoolValue.getDefaultInstance()
        : enableSpokenEmojis_;
  }

  public static final int ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER = 16;
  private boolean enableSpeakerDiarization_ = false;
  /**
   *
   *
   * <pre>
   * If 'true', enables speaker detection for each recognized word in
   * the top alternative of the recognition result using a speaker_tag provided
   * in the WordInfo.
   * Note: Use diarization_config instead.
   * </pre>
   *
   * <code>bool enable_speaker_diarization = 16 [deprecated = true];</code>
   *
   * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.enable_speaker_diarization is
   *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=401
   * @return The enableSpeakerDiarization.
   */
  @java.lang.Override
  @java.lang.Deprecated
  public boolean getEnableSpeakerDiarization() {
    return enableSpeakerDiarization_;
  }

  public static final int DIARIZATION_SPEAKER_COUNT_FIELD_NUMBER = 17;
  private int diarizationSpeakerCount_ = 0;
  /**
   *
   *
   * <pre>
   * If set, specifies the estimated number of speakers in the conversation.
   * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
   * Note: Use diarization_config instead.
   * </pre>
   *
   * <code>int32 diarization_speaker_count = 17 [deprecated = true];</code>
   *
   * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.diarization_speaker_count is
   *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=406
   * @return The diarizationSpeakerCount.
   */
  @java.lang.Override
  @java.lang.Deprecated
  public int getDiarizationSpeakerCount() {
    return diarizationSpeakerCount_;
  }

  public static final int DIARIZATION_CONFIG_FIELD_NUMBER = 19;
  private com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarizationConfig_;
  /**
   *
   *
   * <pre>
   * Config to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * Note: When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
   *
   * @return Whether the diarizationConfig field is set.
   */
  @java.lang.Override
  public boolean hasDiarizationConfig() {
    return diarizationConfig_ != null;
  }
  /**
   *
   *
   * <pre>
   * Config to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * Note: When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
   *
   * @return The diarizationConfig.
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarizationConfig() {
    return diarizationConfig_ == null
        ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
        : diarizationConfig_;
  }
  /**
   *
   *
   * <pre>
   * Config to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * Note: When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder
      getDiarizationConfigOrBuilder() {
    return diarizationConfig_ == null
        ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
        : diarizationConfig_;
  }

  public static final int METADATA_FIELD_NUMBER = 9;
  private com.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata_;
  /**
   *
   *
   * <pre>
   * Metadata regarding this request.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
   *
   * @return Whether the metadata field is set.
   */
  @java.lang.Override
  public boolean hasMetadata() {
    return metadata_ != null;
  }
  /**
   *
   *
   * <pre>
   * Metadata regarding this request.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
   *
   * @return The metadata.
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.RecognitionMetadata getMetadata() {
    return metadata_ == null
        ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance()
        : metadata_;
  }
  /**
   *
   *
   * <pre>
   * Metadata regarding this request.
   * </pre>
   *
   * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadataOrBuilder() {
    return metadata_ == null
        ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance()
        : metadata_;
  }

  public static final int MODEL_FIELD_NUMBER = 13;

  @SuppressWarnings("serial")
  private volatile java.lang.Object model_ = "";
  /**
   *
   *
   * <pre>
   * Which model to select for the given request. Select the model
   * best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the RecognitionConfig.
   * &lt;table&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
   *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for short form content like commands or single shot directed
   *     speech.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from a phone call (typically
   *     recorded at an 8khz sampling rate).&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from video or includes multiple
   *         speakers. Ideally the audio is recorded at a 16khz or greater
   *         sampling rate. This is a premium model that costs more than the
   *         standard rate.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that is not one of the specific audio models.
   *         For example, long-form audio. Ideally the audio is high-fidelity,
   *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from a conversation between a
   *         medical provider and patient.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
   *         provider.&lt;/td&gt;
   *   &lt;/tr&gt;
   * &lt;/table&gt;
   * </pre>
   *
   * <code>string model = 13;</code>
   *
   * @return The model.
   */
  @java.lang.Override
  public java.lang.String getModel() {
    java.lang.Object ref = model_;
    if (ref instanceof java.lang.String) {
      return (java.lang.String) ref;
    } else {
      com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
      java.lang.String s = bs.toStringUtf8();
      model_ = s;
      return s;
    }
  }
  /**
   *
   *
   * <pre>
   * Which model to select for the given request. Select the model
   * best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the RecognitionConfig.
   * &lt;table&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
   *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for short form content like commands or single shot directed
   *     speech.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from a phone call (typically
   *     recorded at an 8khz sampling rate).&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from video or includes multiple
   *         speakers. Ideally the audio is recorded at a 16khz or greater
   *         sampling rate. This is a premium model that costs more than the
   *         standard rate.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that is not one of the specific audio models.
   *         For example, long-form audio. Ideally the audio is high-fidelity,
   *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from a conversation between a
   *         medical provider and patient.&lt;/td&gt;
   *   &lt;/tr&gt;
   *   &lt;tr&gt;
   *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
   *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
   *         provider.&lt;/td&gt;
   *   &lt;/tr&gt;
   * &lt;/table&gt;
   * </pre>
   *
   * <code>string model = 13;</code>
   *
   * @return The bytes for model.
   */
  @java.lang.Override
  public com.google.protobuf.ByteString getModelBytes() {
    java.lang.Object ref = model_;
    if (ref instanceof java.lang.String) {
      com.google.protobuf.ByteString b =
          com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
      model_ = b;
      return b;
    } else {
      return (com.google.protobuf.ByteString) ref;
    }
  }

  public static final int USE_ENHANCED_FIELD_NUMBER = 14;
  private boolean useEnhanced_ = false;
  /**
   *
   *
   * <pre>
   * Set to true to use an enhanced model for speech recognition.
   * If `use_enhanced` is set to true and the `model` field is not set, then
   * an appropriate enhanced model is chosen if an enhanced model exists for
   * the audio.
   * If `use_enhanced` is true and an enhanced version of the specified model
   * does not exist, then the speech is recognized using the standard version
   * of the specified model.
   * </pre>
   *
   * <code>bool use_enhanced = 14;</code>
   *
   * @return The useEnhanced.
   */
  @java.lang.Override
  public boolean getUseEnhanced() {
    return useEnhanced_;
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (encoding_
        != com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
            .getNumber()) {
      output.writeEnum(1, encoding_);
    }
    if (sampleRateHertz_ != 0) {
      output.writeInt32(2, sampleRateHertz_);
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_);
    }
    if (maxAlternatives_ != 0) {
      output.writeInt32(4, maxAlternatives_);
    }
    if (profanityFilter_ != false) {
      output.writeBool(5, profanityFilter_);
    }
    for (int i = 0; i < speechContexts_.size(); i++) {
      output.writeMessage(6, speechContexts_.get(i));
    }
    if (audioChannelCount_ != 0) {
      output.writeInt32(7, audioChannelCount_);
    }
    if (enableWordTimeOffsets_ != false) {
      output.writeBool(8, enableWordTimeOffsets_);
    }
    if (metadata_ != null) {
      output.writeMessage(9, getMetadata());
    }
    if (enableAutomaticPunctuation_ != false) {
      output.writeBool(11, enableAutomaticPunctuation_);
    }
    if (enableSeparateRecognitionPerChannel_ != false) {
      output.writeBool(12, enableSeparateRecognitionPerChannel_);
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 13, model_);
    }
    if (useEnhanced_ != false) {
      output.writeBool(14, useEnhanced_);
    }
    if (enableWordConfidence_ != false) {
      output.writeBool(15, enableWordConfidence_);
    }
    if (enableSpeakerDiarization_ != false) {
      output.writeBool(16, enableSpeakerDiarization_);
    }
    if (diarizationSpeakerCount_ != 0) {
      output.writeInt32(17, diarizationSpeakerCount_);
    }
    for (int i = 0; i < alternativeLanguageCodes_.size(); i++) {
      com.google.protobuf.GeneratedMessageV3.writeString(
          output, 18, alternativeLanguageCodes_.getRaw(i));
    }
    if (diarizationConfig_ != null) {
      output.writeMessage(19, getDiarizationConfig());
    }
    if (adaptation_ != null) {
      output.writeMessage(20, getAdaptation());
    }
    if (enableSpokenPunctuation_ != null) {
      output.writeMessage(22, getEnableSpokenPunctuation());
    }
    if (enableSpokenEmojis_ != null) {
      output.writeMessage(23, getEnableSpokenEmojis());
    }
    if (transcriptNormalization_ != null) {
      output.writeMessage(24, getTranscriptNormalization());
    }
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (encoding_
        != com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
            .getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encoding_);
    }
    if (sampleRateHertz_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_);
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
      size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_);
    }
    if (maxAlternatives_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, maxAlternatives_);
    }
    if (profanityFilter_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, profanityFilter_);
    }
    for (int i = 0; i < speechContexts_.size(); i++) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, speechContexts_.get(i));
    }
    if (audioChannelCount_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, audioChannelCount_);
    }
    if (enableWordTimeOffsets_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, enableWordTimeOffsets_);
    }
    if (metadata_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getMetadata());
    }
    if (enableAutomaticPunctuation_ != false) {
      size +=
          com.google.protobuf.CodedOutputStream.computeBoolSize(11, enableAutomaticPunctuation_);
    }
    if (enableSeparateRecognitionPerChannel_ != false) {
      size +=
          com.google.protobuf.CodedOutputStream.computeBoolSize(
              12, enableSeparateRecognitionPerChannel_);
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
      size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, model_);
    }
    if (useEnhanced_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(14, useEnhanced_);
    }
    if (enableWordConfidence_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, enableWordConfidence_);
    }
    if (enableSpeakerDiarization_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(16, enableSpeakerDiarization_);
    }
    if (diarizationSpeakerCount_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(17, diarizationSpeakerCount_);
    }
    {
      int dataSize = 0;
      for (int i = 0; i < alternativeLanguageCodes_.size(); i++) {
        dataSize += computeStringSizeNoTag(alternativeLanguageCodes_.getRaw(i));
      }
      size += dataSize;
      size += 2 * getAlternativeLanguageCodesList().size();
    }
    if (diarizationConfig_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getDiarizationConfig());
    }
    if (adaptation_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getAdaptation());
    }
    if (enableSpokenPunctuation_ != null) {
      size +=
          com.google.protobuf.CodedOutputStream.computeMessageSize(
              22, getEnableSpokenPunctuation());
    }
    if (enableSpokenEmojis_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(23, getEnableSpokenEmojis());
    }
    if (transcriptNormalization_ != null) {
      size +=
          com.google.protobuf.CodedOutputStream.computeMessageSize(
              24, getTranscriptNormalization());
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.speech.v1p1beta1.RecognitionConfig)) {
      return super.equals(obj);
    }
    com.google.cloud.speech.v1p1beta1.RecognitionConfig other =
        (com.google.cloud.speech.v1p1beta1.RecognitionConfig) obj;

    if (encoding_ != other.encoding_) return false;
    if (getSampleRateHertz() != other.getSampleRateHertz()) return false;
    if (getAudioChannelCount() != other.getAudioChannelCount()) return false;
    if (getEnableSeparateRecognitionPerChannel() != other.getEnableSeparateRecognitionPerChannel())
      return false;
    if (!getLanguageCode().equals(other.getLanguageCode())) return false;
    if (!getAlternativeLanguageCodesList().equals(other.getAlternativeLanguageCodesList()))
      return false;
    if (getMaxAlternatives() != other.getMaxAlternatives()) return false;
    if (getProfanityFilter() != other.getProfanityFilter()) return false;
    if (hasAdaptation() != other.hasAdaptation()) return false;
    if (hasAdaptation()) {
      if (!getAdaptation().equals(other.getAdaptation())) return false;
    }
    if (hasTranscriptNormalization() != other.hasTranscriptNormalization()) return false;
    if (hasTranscriptNormalization()) {
      if (!getTranscriptNormalization().equals(other.getTranscriptNormalization())) return false;
    }
    if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false;
    if (getEnableWordTimeOffsets() != other.getEnableWordTimeOffsets()) return false;
    if (getEnableWordConfidence() != other.getEnableWordConfidence()) return false;
    if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
    if (hasEnableSpokenPunctuation() != other.hasEnableSpokenPunctuation()) return false;
    if (hasEnableSpokenPunctuation()) {
      if (!getEnableSpokenPunctuation().equals(other.getEnableSpokenPunctuation())) return false;
    }
    if (hasEnableSpokenEmojis() != other.hasEnableSpokenEmojis()) return false;
    if (hasEnableSpokenEmojis()) {
      if (!getEnableSpokenEmojis().equals(other.getEnableSpokenEmojis())) return false;
    }
    if (getEnableSpeakerDiarization() != other.getEnableSpeakerDiarization()) return false;
    if (getDiarizationSpeakerCount() != other.getDiarizationSpeakerCount()) return false;
    if (hasDiarizationConfig() != other.hasDiarizationConfig()) return false;
    if (hasDiarizationConfig()) {
      if (!getDiarizationConfig().equals(other.getDiarizationConfig())) return false;
    }
    if (hasMetadata() != other.hasMetadata()) return false;
    if (hasMetadata()) {
      if (!getMetadata().equals(other.getMetadata())) return false;
    }
    if (!getModel().equals(other.getModel())) return false;
    if (getUseEnhanced() != other.getUseEnhanced()) return false;
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    hash = (37 * hash) + ENCODING_FIELD_NUMBER;
    hash = (53 * hash) + encoding_;
    hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
    hash = (53 * hash) + getSampleRateHertz();
    hash = (37 * hash) + AUDIO_CHANNEL_COUNT_FIELD_NUMBER;
    hash = (53 * hash) + getAudioChannelCount();
    hash = (37 * hash) + ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashBoolean(getEnableSeparateRecognitionPerChannel());
    hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
    hash = (53 * hash) + getLanguageCode().hashCode();
    if (getAlternativeLanguageCodesCount() > 0) {
      hash = (37 * hash) + ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER;
      hash = (53 * hash) + getAlternativeLanguageCodesList().hashCode();
    }
    hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER;
    hash = (53 * hash) + getMaxAlternatives();
    hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter());
    if (hasAdaptation()) {
      hash = (37 * hash) + ADAPTATION_FIELD_NUMBER;
      hash = (53 * hash) + getAdaptation().hashCode();
    }
    if (hasTranscriptNormalization()) {
      hash = (37 * hash) + TRANSCRIPT_NORMALIZATION_FIELD_NUMBER;
      hash = (53 * hash) + getTranscriptNormalization().hashCode();
    }
    if (getSpeechContextsCount() > 0) {
      hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER;
      hash = (53 * hash) + getSpeechContextsList().hashCode();
    }
    hash = (37 * hash) + ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordTimeOffsets());
    hash = (37 * hash) + ENABLE_WORD_CONFIDENCE_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordConfidence());
    hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation());
    if (hasEnableSpokenPunctuation()) {
      hash = (37 * hash) + ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER;
      hash = (53 * hash) + getEnableSpokenPunctuation().hashCode();
    }
    if (hasEnableSpokenEmojis()) {
      hash = (37 * hash) + ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER;
      hash = (53 * hash) + getEnableSpokenEmojis().hashCode();
    }
    hash = (37 * hash) + ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpeakerDiarization());
    hash = (37 * hash) + DIARIZATION_SPEAKER_COUNT_FIELD_NUMBER;
    hash = (53 * hash) + getDiarizationSpeakerCount();
    if (hasDiarizationConfig()) {
      hash = (37 * hash) + DIARIZATION_CONFIG_FIELD_NUMBER;
      hash = (53 * hash) + getDiarizationConfig().hashCode();
    }
    if (hasMetadata()) {
      hash = (37 * hash) + METADATA_FIELD_NUMBER;
      hash = (53 * hash) + getMetadata().hashCode();
    }
    hash = (37 * hash) + MODEL_FIELD_NUMBER;
    hash = (53 * hash) + getModel().hashCode();
    hash = (37 * hash) + USE_ENHANCED_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseEnhanced());
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(com.google.cloud.speech.v1p1beta1.RecognitionConfig prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Provides information to the recognizer that specifies how to process the
   * request.
   * </pre>
   *
   * Protobuf type {@code google.cloud.speech.v1p1beta1.RecognitionConfig}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.speech.v1p1beta1.RecognitionConfig)
      com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.speech.v1p1beta1.SpeechProto
          .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor;
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.speech.v1p1beta1.SpeechProto
          .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.speech.v1p1beta1.RecognitionConfig.class,
              com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder.class);
    }

    // Construct using com.google.cloud.speech.v1p1beta1.RecognitionConfig.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      encoding_ = 0;
      sampleRateHertz_ = 0;
      audioChannelCount_ = 0;
      enableSeparateRecognitionPerChannel_ = false;
      languageCode_ = "";
      alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
      bitField0_ = (bitField0_ & ~0x00000020);
      maxAlternatives_ = 0;
      profanityFilter_ = false;
      adaptation_ = null;
      if (adaptationBuilder_ != null) {
        adaptationBuilder_.dispose();
        adaptationBuilder_ = null;
      }
      transcriptNormalization_ = null;
      if (transcriptNormalizationBuilder_ != null) {
        transcriptNormalizationBuilder_.dispose();
        transcriptNormalizationBuilder_ = null;
      }
      if (speechContextsBuilder_ == null) {
        speechContexts_ = java.util.Collections.emptyList();
      } else {
        speechContexts_ = null;
        speechContextsBuilder_.clear();
      }
      bitField0_ = (bitField0_ & ~0x00000400);
      enableWordTimeOffsets_ = false;
      enableWordConfidence_ = false;
      enableAutomaticPunctuation_ = false;
      enableSpokenPunctuation_ = null;
      if (enableSpokenPunctuationBuilder_ != null) {
        enableSpokenPunctuationBuilder_.dispose();
        enableSpokenPunctuationBuilder_ = null;
      }
      enableSpokenEmojis_ = null;
      if (enableSpokenEmojisBuilder_ != null) {
        enableSpokenEmojisBuilder_.dispose();
        enableSpokenEmojisBuilder_ = null;
      }
      enableSpeakerDiarization_ = false;
      diarizationSpeakerCount_ = 0;
      diarizationConfig_ = null;
      if (diarizationConfigBuilder_ != null) {
        diarizationConfigBuilder_.dispose();
        diarizationConfigBuilder_ = null;
      }
      metadata_ = null;
      if (metadataBuilder_ != null) {
        metadataBuilder_.dispose();
        metadataBuilder_ = null;
      }
      model_ = "";
      useEnhanced_ = false;
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.speech.v1p1beta1.SpeechProto
          .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.speech.v1p1beta1.RecognitionConfig getDefaultInstanceForType() {
      return com.google.cloud.speech.v1p1beta1.RecognitionConfig.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.speech.v1p1beta1.RecognitionConfig build() {
      com.google.cloud.speech.v1p1beta1.RecognitionConfig result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.speech.v1p1beta1.RecognitionConfig buildPartial() {
      com.google.cloud.speech.v1p1beta1.RecognitionConfig result =
          new com.google.cloud.speech.v1p1beta1.RecognitionConfig(this);
      buildPartialRepeatedFields(result);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      onBuilt();
      return result;
    }

    private void buildPartialRepeatedFields(
        com.google.cloud.speech.v1p1beta1.RecognitionConfig result) {
      if (((bitField0_ & 0x00000020) != 0)) {
        alternativeLanguageCodes_ = alternativeLanguageCodes_.getUnmodifiableView();
        bitField0_ = (bitField0_ & ~0x00000020);
      }
      result.alternativeLanguageCodes_ = alternativeLanguageCodes_;
      if (speechContextsBuilder_ == null) {
        if (((bitField0_ & 0x00000400) != 0)) {
          speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_);
          bitField0_ = (bitField0_ & ~0x00000400);
        }
        result.speechContexts_ = speechContexts_;
      } else {
        result.speechContexts_ = speechContextsBuilder_.build();
      }
    }

    private void buildPartial0(com.google.cloud.speech.v1p1beta1.RecognitionConfig result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000001) != 0)) {
        result.encoding_ = encoding_;
      }
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.sampleRateHertz_ = sampleRateHertz_;
      }
      if (((from_bitField0_ & 0x00000004) != 0)) {
        result.audioChannelCount_ = audioChannelCount_;
      }
      if (((from_bitField0_ & 0x00000008) != 0)) {
        result.enableSeparateRecognitionPerChannel_ = enableSeparateRecognitionPerChannel_;
      }
      if (((from_bitField0_ & 0x00000010) != 0)) {
        result.languageCode_ = languageCode_;
      }
      if (((from_bitField0_ & 0x00000040) != 0)) {
        result.maxAlternatives_ = maxAlternatives_;
      }
      if (((from_bitField0_ & 0x00000080) != 0)) {
        result.profanityFilter_ = profanityFilter_;
      }
      if (((from_bitField0_ & 0x00000100) != 0)) {
        result.adaptation_ = adaptationBuilder_ == null ? adaptation_ : adaptationBuilder_.build();
      }
      if (((from_bitField0_ & 0x00000200) != 0)) {
        result.transcriptNormalization_ =
            transcriptNormalizationBuilder_ == null
                ? transcriptNormalization_
                : transcriptNormalizationBuilder_.build();
      }
      if (((from_bitField0_ & 0x00000800) != 0)) {
        result.enableWordTimeOffsets_ = enableWordTimeOffsets_;
      }
      if (((from_bitField0_ & 0x00001000) != 0)) {
        result.enableWordConfidence_ = enableWordConfidence_;
      }
      if (((from_bitField0_ & 0x00002000) != 0)) {
        result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
      }
      if (((from_bitField0_ & 0x00004000) != 0)) {
        result.enableSpokenPunctuation_ =
            enableSpokenPunctuationBuilder_ == null
                ? enableSpokenPunctuation_
                : enableSpokenPunctuationBuilder_.build();
      }
      if (((from_bitField0_ & 0x00008000) != 0)) {
        result.enableSpokenEmojis_ =
            enableSpokenEmojisBuilder_ == null
                ? enableSpokenEmojis_
                : enableSpokenEmojisBuilder_.build();
      }
      if (((from_bitField0_ & 0x00010000) != 0)) {
        result.enableSpeakerDiarization_ = enableSpeakerDiarization_;
      }
      if (((from_bitField0_ & 0x00020000) != 0)) {
        result.diarizationSpeakerCount_ = diarizationSpeakerCount_;
      }
      if (((from_bitField0_ & 0x00040000) != 0)) {
        result.diarizationConfig_ =
            diarizationConfigBuilder_ == null
                ? diarizationConfig_
                : diarizationConfigBuilder_.build();
      }
      if (((from_bitField0_ & 0x00080000) != 0)) {
        result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build();
      }
      if (((from_bitField0_ & 0x00100000) != 0)) {
        result.model_ = model_;
      }
      if (((from_bitField0_ & 0x00200000) != 0)) {
        result.useEnhanced_ = useEnhanced_;
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.speech.v1p1beta1.RecognitionConfig) {
        return mergeFrom((com.google.cloud.speech.v1p1beta1.RecognitionConfig) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.speech.v1p1beta1.RecognitionConfig other) {
      if (other == com.google.cloud.speech.v1p1beta1.RecognitionConfig.getDefaultInstance())
        return this;
      if (other.encoding_ != 0) {
        setEncodingValue(other.getEncodingValue());
      }
      if (other.getSampleRateHertz() != 0) {
        setSampleRateHertz(other.getSampleRateHertz());
      }
      if (other.getAudioChannelCount() != 0) {
        setAudioChannelCount(other.getAudioChannelCount());
      }
      if (other.getEnableSeparateRecognitionPerChannel() != false) {
        setEnableSeparateRecognitionPerChannel(other.getEnableSeparateRecognitionPerChannel());
      }
      if (!other.getLanguageCode().isEmpty()) {
        languageCode_ = other.languageCode_;
        bitField0_ |= 0x00000010;
        onChanged();
      }
      if (!other.alternativeLanguageCodes_.isEmpty()) {
        if (alternativeLanguageCodes_.isEmpty()) {
          alternativeLanguageCodes_ = other.alternativeLanguageCodes_;
          bitField0_ = (bitField0_ & ~0x00000020);
        } else {
          ensureAlternativeLanguageCodesIsMutable();
          alternativeLanguageCodes_.addAll(other.alternativeLanguageCodes_);
        }
        onChanged();
      }
      if (other.getMaxAlternatives() != 0) {
        setMaxAlternatives(other.getMaxAlternatives());
      }
      if (other.getProfanityFilter() != false) {
        setProfanityFilter(other.getProfanityFilter());
      }
      if (other.hasAdaptation()) {
        mergeAdaptation(other.getAdaptation());
      }
      if (other.hasTranscriptNormalization()) {
        mergeTranscriptNormalization(other.getTranscriptNormalization());
      }
      if (speechContextsBuilder_ == null) {
        if (!other.speechContexts_.isEmpty()) {
          if (speechContexts_.isEmpty()) {
            speechContexts_ = other.speechContexts_;
            bitField0_ = (bitField0_ & ~0x00000400);
          } else {
            ensureSpeechContextsIsMutable();
            speechContexts_.addAll(other.speechContexts_);
          }
          onChanged();
        }
      } else {
        if (!other.speechContexts_.isEmpty()) {
          if (speechContextsBuilder_.isEmpty()) {
            speechContextsBuilder_.dispose();
            speechContextsBuilder_ = null;
            speechContexts_ = other.speechContexts_;
            bitField0_ = (bitField0_ & ~0x00000400);
            speechContextsBuilder_ =
                com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
                    ? getSpeechContextsFieldBuilder()
                    : null;
          } else {
            speechContextsBuilder_.addAllMessages(other.speechContexts_);
          }
        }
      }
      if (other.getEnableWordTimeOffsets() != false) {
        setEnableWordTimeOffsets(other.getEnableWordTimeOffsets());
      }
      if (other.getEnableWordConfidence() != false) {
        setEnableWordConfidence(other.getEnableWordConfidence());
      }
      if (other.getEnableAutomaticPunctuation() != false) {
        setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation());
      }
      if (other.hasEnableSpokenPunctuation()) {
        mergeEnableSpokenPunctuation(other.getEnableSpokenPunctuation());
      }
      if (other.hasEnableSpokenEmojis()) {
        mergeEnableSpokenEmojis(other.getEnableSpokenEmojis());
      }
      if (other.getEnableSpeakerDiarization() != false) {
        setEnableSpeakerDiarization(other.getEnableSpeakerDiarization());
      }
      if (other.getDiarizationSpeakerCount() != 0) {
        setDiarizationSpeakerCount(other.getDiarizationSpeakerCount());
      }
      if (other.hasDiarizationConfig()) {
        mergeDiarizationConfig(other.getDiarizationConfig());
      }
      if (other.hasMetadata()) {
        mergeMetadata(other.getMetadata());
      }
      if (!other.getModel().isEmpty()) {
        model_ = other.model_;
        bitField0_ |= 0x00100000;
        onChanged();
      }
      if (other.getUseEnhanced() != false) {
        setUseEnhanced(other.getUseEnhanced());
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 8:
              {
                encoding_ = input.readEnum();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
            case 16:
              {
                sampleRateHertz_ = input.readInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
            case 26:
              {
                languageCode_ = input.readStringRequireUtf8();
                bitField0_ |= 0x00000010;
                break;
              } // case 26
            case 32:
              {
                maxAlternatives_ = input.readInt32();
                bitField0_ |= 0x00000040;
                break;
              } // case 32
            case 40:
              {
                profanityFilter_ = input.readBool();
                bitField0_ |= 0x00000080;
                break;
              } // case 40
            case 50:
              {
                com.google.cloud.speech.v1p1beta1.SpeechContext m =
                    input.readMessage(
                        com.google.cloud.speech.v1p1beta1.SpeechContext.parser(),
                        extensionRegistry);
                if (speechContextsBuilder_ == null) {
                  ensureSpeechContextsIsMutable();
                  speechContexts_.add(m);
                } else {
                  speechContextsBuilder_.addMessage(m);
                }
                break;
              } // case 50
            case 56:
              {
                audioChannelCount_ = input.readInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 56
            case 64:
              {
                enableWordTimeOffsets_ = input.readBool();
                bitField0_ |= 0x00000800;
                break;
              } // case 64
            case 74:
              {
                input.readMessage(getMetadataFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00080000;
                break;
              } // case 74
            case 88:
              {
                enableAutomaticPunctuation_ = input.readBool();
                bitField0_ |= 0x00002000;
                break;
              } // case 88
            case 96:
              {
                enableSeparateRecognitionPerChannel_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 96
            case 106:
              {
                model_ = input.readStringRequireUtf8();
                bitField0_ |= 0x00100000;
                break;
              } // case 106
            case 112:
              {
                useEnhanced_ = input.readBool();
                bitField0_ |= 0x00200000;
                break;
              } // case 112
            case 120:
              {
                enableWordConfidence_ = input.readBool();
                bitField0_ |= 0x00001000;
                break;
              } // case 120
            case 128:
              {
                enableSpeakerDiarization_ = input.readBool();
                bitField0_ |= 0x00010000;
                break;
              } // case 128
            case 136:
              {
                diarizationSpeakerCount_ = input.readInt32();
                bitField0_ |= 0x00020000;
                break;
              } // case 136
            case 146:
              {
                java.lang.String s = input.readStringRequireUtf8();
                ensureAlternativeLanguageCodesIsMutable();
                alternativeLanguageCodes_.add(s);
                break;
              } // case 146
            case 154:
              {
                input.readMessage(
                    getDiarizationConfigFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00040000;
                break;
              } // case 154
            case 162:
              {
                input.readMessage(getAdaptationFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00000100;
                break;
              } // case 162
            case 178:
              {
                input.readMessage(
                    getEnableSpokenPunctuationFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00004000;
                break;
              } // case 178
            case 186:
              {
                input.readMessage(
                    getEnableSpokenEmojisFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00008000;
                break;
              } // case 186
            case 194:
              {
                input.readMessage(
                    getTranscriptNormalizationFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00000200;
                break;
              } // case 194
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int bitField0_;

    private int encoding_ = 0;
    /**
     *
     *
     * <pre>
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     *
     * @return The enum numeric value on the wire for encoding.
     */
    @java.lang.Override
    public int getEncodingValue() {
      return encoding_;
    }
    /**
     *
     *
     * <pre>
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     *
     * @param value The enum numeric value on the wire for encoding to set.
     * @return This builder for chaining.
     */
    public Builder setEncodingValue(int value) {
      encoding_ = value;
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     *
     * @return The encoding.
     */
    @java.lang.Override
    public com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding getEncoding() {
      com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding result =
          com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.forNumber(encoding_);
      return result == null
          ? com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.UNRECOGNIZED
          : result;
    }
    /**
     *
     *
     * <pre>
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     *
     * @param value The encoding to set.
     * @return This builder for chaining.
     */
    public Builder setEncoding(
        com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding value) {
      if (value == null) {
        throw new NullPointerException();
      }
      bitField0_ |= 0x00000001;
      encoding_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEncoding() {
      bitField0_ = (bitField0_ & ~0x00000001);
      encoding_ = 0;
      onChanged();
      return this;
    }

    private int sampleRateHertz_;
    /**
     *
     *
     * <pre>
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for FLAC and WAV audio files, but is
     * required for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 2;</code>
     *
     * @return The sampleRateHertz.
     */
    @java.lang.Override
    public int getSampleRateHertz() {
      return sampleRateHertz_;
    }
    /**
     *
     *
     * <pre>
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for FLAC and WAV audio files, but is
     * required for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 2;</code>
     *
     * @param value The sampleRateHertz to set.
     * @return This builder for chaining.
     */
    public Builder setSampleRateHertz(int value) {

      sampleRateHertz_ = value;
      bitField0_ |= 0x00000002;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for FLAC and WAV audio files, but is
     * required for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 2;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearSampleRateHertz() {
      bitField0_ = (bitField0_ & ~0x00000002);
      sampleRateHertz_ = 0;
      onChanged();
      return this;
    }

    private int audioChannelCount_;
    /**
     *
     *
     * <pre>
     * The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * </pre>
     *
     * <code>int32 audio_channel_count = 7;</code>
     *
     * @return The audioChannelCount.
     */
    @java.lang.Override
    public int getAudioChannelCount() {
      return audioChannelCount_;
    }
    /**
     *
     *
     * <pre>
     * The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * </pre>
     *
     * <code>int32 audio_channel_count = 7;</code>
     *
     * @param value The audioChannelCount to set.
     * @return This builder for chaining.
     */
    public Builder setAudioChannelCount(int value) {

      audioChannelCount_ = value;
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * </pre>
     *
     * <code>int32 audio_channel_count = 7;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearAudioChannelCount() {
      bitField0_ = (bitField0_ & ~0x00000004);
      audioChannelCount_ = 0;
      onChanged();
      return this;
    }

    private boolean enableSeparateRecognitionPerChannel_;
    /**
     *
     *
     * <pre>
     * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * </pre>
     *
     * <code>bool enable_separate_recognition_per_channel = 12;</code>
     *
     * @return The enableSeparateRecognitionPerChannel.
     */
    @java.lang.Override
    public boolean getEnableSeparateRecognitionPerChannel() {
      return enableSeparateRecognitionPerChannel_;
    }
    /**
     *
     *
     * <pre>
     * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * </pre>
     *
     * <code>bool enable_separate_recognition_per_channel = 12;</code>
     *
     * @param value The enableSeparateRecognitionPerChannel to set.
     * @return This builder for chaining.
     */
    public Builder setEnableSeparateRecognitionPerChannel(boolean value) {

      enableSeparateRecognitionPerChannel_ = value;
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * </pre>
     *
     * <code>bool enable_separate_recognition_per_channel = 12;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableSeparateRecognitionPerChannel() {
      bitField0_ = (bitField0_ & ~0x00000008);
      enableSeparateRecognitionPerChannel_ = false;
      onChanged();
      return this;
    }

    private java.lang.Object languageCode_ = "";
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return The languageCode.
     */
    public java.lang.String getLanguageCode() {
      java.lang.Object ref = languageCode_;
      if (!(ref instanceof java.lang.String)) {
        com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        languageCode_ = s;
        return s;
      } else {
        return (java.lang.String) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return The bytes for languageCode.
     */
    public com.google.protobuf.ByteString getLanguageCodeBytes() {
      java.lang.Object ref = languageCode_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b =
            com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
        languageCode_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @param value The languageCode to set.
     * @return This builder for chaining.
     */
    public Builder setLanguageCode(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      languageCode_ = value;
      bitField0_ |= 0x00000010;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearLanguageCode() {
      languageCode_ = getDefaultInstance().getLanguageCode();
      bitField0_ = (bitField0_ & ~0x00000010);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @param value The bytes for languageCode to set.
     * @return This builder for chaining.
     */
    public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      languageCode_ = value;
      bitField0_ |= 0x00000010;
      onChanged();
      return this;
    }

    private com.google.protobuf.LazyStringList alternativeLanguageCodes_ =
        com.google.protobuf.LazyStringArrayList.EMPTY;

    private void ensureAlternativeLanguageCodesIsMutable() {
      if (!((bitField0_ & 0x00000020) != 0)) {
        alternativeLanguageCodes_ =
            new com.google.protobuf.LazyStringArrayList(alternativeLanguageCodes_);
        bitField0_ |= 0x00000020;
      }
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @return A list containing the alternativeLanguageCodes.
     */
    public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() {
      return alternativeLanguageCodes_.getUnmodifiableView();
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @return The count of alternativeLanguageCodes.
     */
    public int getAlternativeLanguageCodesCount() {
      return alternativeLanguageCodes_.size();
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @param index The index of the element to return.
     * @return The alternativeLanguageCodes at the given index.
     */
    public java.lang.String getAlternativeLanguageCodes(int index) {
      return alternativeLanguageCodes_.get(index);
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @param index The index of the value to return.
     * @return The bytes of the alternativeLanguageCodes at the given index.
     */
    public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) {
      return alternativeLanguageCodes_.getByteString(index);
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @param index The index to set the value at.
     * @param value The alternativeLanguageCodes to set.
     * @return This builder for chaining.
     */
    public Builder setAlternativeLanguageCodes(int index, java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      ensureAlternativeLanguageCodesIsMutable();
      alternativeLanguageCodes_.set(index, value);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @param value The alternativeLanguageCodes to add.
     * @return This builder for chaining.
     */
    public Builder addAlternativeLanguageCodes(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      ensureAlternativeLanguageCodesIsMutable();
      alternativeLanguageCodes_.add(value);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @param values The alternativeLanguageCodes to add.
     * @return This builder for chaining.
     */
    public Builder addAllAlternativeLanguageCodes(java.lang.Iterable<java.lang.String> values) {
      ensureAlternativeLanguageCodesIsMutable();
      com.google.protobuf.AbstractMessageLite.Builder.addAll(values, alternativeLanguageCodes_);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearAlternativeLanguageCodes() {
      alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
      bitField0_ = (bitField0_ & ~0x00000020);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * </pre>
     *
     * <code>repeated string alternative_language_codes = 18;</code>
     *
     * @param value The bytes of the alternativeLanguageCodes to add.
     * @return This builder for chaining.
     */
    public Builder addAlternativeLanguageCodesBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      ensureAlternativeLanguageCodesIsMutable();
      alternativeLanguageCodes_.add(value);
      onChanged();
      return this;
    }

    private int maxAlternatives_;
    /**
     *
     *
     * <pre>
     * Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 4;</code>
     *
     * @return The maxAlternatives.
     */
    @java.lang.Override
    public int getMaxAlternatives() {
      return maxAlternatives_;
    }
    /**
     *
     *
     * <pre>
     * Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 4;</code>
     *
     * @param value The maxAlternatives to set.
     * @return This builder for chaining.
     */
    public Builder setMaxAlternatives(int value) {

      maxAlternatives_ = value;
      bitField0_ |= 0x00000040;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 4;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearMaxAlternatives() {
      bitField0_ = (bitField0_ & ~0x00000040);
      maxAlternatives_ = 0;
      onChanged();
      return this;
    }

    private boolean profanityFilter_;
    /**
     *
     *
     * <pre>
     * If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 5;</code>
     *
     * @return The profanityFilter.
     */
    @java.lang.Override
    public boolean getProfanityFilter() {
      return profanityFilter_;
    }
    /**
     *
     *
     * <pre>
     * If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 5;</code>
     *
     * @param value The profanityFilter to set.
     * @return This builder for chaining.
     */
    public Builder setProfanityFilter(boolean value) {

      profanityFilter_ = value;
      bitField0_ |= 0x00000080;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 5;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearProfanityFilter() {
      bitField0_ = (bitField0_ & ~0x00000080);
      profanityFilter_ = false;
      onChanged();
      return this;
    }

    private com.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.SpeechAdaptation,
            com.google.cloud.speech.v1p1beta1.SpeechAdaptation.Builder,
            com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder>
        adaptationBuilder_;
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     *
     * @return Whether the adaptation field is set.
     */
    public boolean hasAdaptation() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     *
     * @return The adaptation.
     */
    public com.google.cloud.speech.v1p1beta1.SpeechAdaptation getAdaptation() {
      if (adaptationBuilder_ == null) {
        return adaptation_ == null
            ? com.google.cloud.speech.v1p1beta1.SpeechAdaptation.getDefaultInstance()
            : adaptation_;
      } else {
        return adaptationBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    public Builder setAdaptation(com.google.cloud.speech.v1p1beta1.SpeechAdaptation value) {
      if (adaptationBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        adaptation_ = value;
      } else {
        adaptationBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00000100;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    public Builder setAdaptation(
        com.google.cloud.speech.v1p1beta1.SpeechAdaptation.Builder builderForValue) {
      if (adaptationBuilder_ == null) {
        adaptation_ = builderForValue.build();
      } else {
        adaptationBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00000100;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    public Builder mergeAdaptation(com.google.cloud.speech.v1p1beta1.SpeechAdaptation value) {
      if (adaptationBuilder_ == null) {
        if (((bitField0_ & 0x00000100) != 0)
            && adaptation_ != null
            && adaptation_
                != com.google.cloud.speech.v1p1beta1.SpeechAdaptation.getDefaultInstance()) {
          getAdaptationBuilder().mergeFrom(value);
        } else {
          adaptation_ = value;
        }
      } else {
        adaptationBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00000100;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    public Builder clearAdaptation() {
      bitField0_ = (bitField0_ & ~0x00000100);
      adaptation_ = null;
      if (adaptationBuilder_ != null) {
        adaptationBuilder_.dispose();
        adaptationBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechAdaptation.Builder getAdaptationBuilder() {
      bitField0_ |= 0x00000100;
      onChanged();
      return getAdaptationFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder getAdaptationOrBuilder() {
      if (adaptationBuilder_ != null) {
        return adaptationBuilder_.getMessageOrBuilder();
      } else {
        return adaptation_ == null
            ? com.google.cloud.speech.v1p1beta1.SpeechAdaptation.getDefaultInstance()
            : adaptation_;
      }
    }
    /**
     *
     *
     * <pre>
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeechAdaptation adaptation = 20;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.SpeechAdaptation,
            com.google.cloud.speech.v1p1beta1.SpeechAdaptation.Builder,
            com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder>
        getAdaptationFieldBuilder() {
      if (adaptationBuilder_ == null) {
        adaptationBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v1p1beta1.SpeechAdaptation,
                com.google.cloud.speech.v1p1beta1.SpeechAdaptation.Builder,
                com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder>(
                getAdaptation(), getParentForChildren(), isClean());
        adaptation_ = null;
      }
      return adaptationBuilder_;
    }

    private com.google.cloud.speech.v1p1beta1.TranscriptNormalization transcriptNormalization_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.TranscriptNormalization,
            com.google.cloud.speech.v1p1beta1.TranscriptNormalization.Builder,
            com.google.cloud.speech.v1p1beta1.TranscriptNormalizationOrBuilder>
        transcriptNormalizationBuilder_;
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     *
     * @return Whether the transcriptNormalization field is set.
     */
    public boolean hasTranscriptNormalization() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     *
     * @return The transcriptNormalization.
     */
    public com.google.cloud.speech.v1p1beta1.TranscriptNormalization getTranscriptNormalization() {
      if (transcriptNormalizationBuilder_ == null) {
        return transcriptNormalization_ == null
            ? com.google.cloud.speech.v1p1beta1.TranscriptNormalization.getDefaultInstance()
            : transcriptNormalization_;
      } else {
        return transcriptNormalizationBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    public Builder setTranscriptNormalization(
        com.google.cloud.speech.v1p1beta1.TranscriptNormalization value) {
      if (transcriptNormalizationBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        transcriptNormalization_ = value;
      } else {
        transcriptNormalizationBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00000200;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    public Builder setTranscriptNormalization(
        com.google.cloud.speech.v1p1beta1.TranscriptNormalization.Builder builderForValue) {
      if (transcriptNormalizationBuilder_ == null) {
        transcriptNormalization_ = builderForValue.build();
      } else {
        transcriptNormalizationBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00000200;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    public Builder mergeTranscriptNormalization(
        com.google.cloud.speech.v1p1beta1.TranscriptNormalization value) {
      if (transcriptNormalizationBuilder_ == null) {
        if (((bitField0_ & 0x00000200) != 0)
            && transcriptNormalization_ != null
            && transcriptNormalization_
                != com.google.cloud.speech.v1p1beta1.TranscriptNormalization.getDefaultInstance()) {
          getTranscriptNormalizationBuilder().mergeFrom(value);
        } else {
          transcriptNormalization_ = value;
        }
      } else {
        transcriptNormalizationBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00000200;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    public Builder clearTranscriptNormalization() {
      bitField0_ = (bitField0_ & ~0x00000200);
      transcriptNormalization_ = null;
      if (transcriptNormalizationBuilder_ != null) {
        transcriptNormalizationBuilder_.dispose();
        transcriptNormalizationBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    public com.google.cloud.speech.v1p1beta1.TranscriptNormalization.Builder
        getTranscriptNormalizationBuilder() {
      bitField0_ |= 0x00000200;
      onChanged();
      return getTranscriptNormalizationFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    public com.google.cloud.speech.v1p1beta1.TranscriptNormalizationOrBuilder
        getTranscriptNormalizationOrBuilder() {
      if (transcriptNormalizationBuilder_ != null) {
        return transcriptNormalizationBuilder_.getMessageOrBuilder();
      } else {
        return transcriptNormalization_ == null
            ? com.google.cloud.speech.v1p1beta1.TranscriptNormalization.getDefaultInstance()
            : transcriptNormalization_;
      }
    }
    /**
     *
     *
     * <pre>
     * Use transcription normalization to automatically replace parts of the
     * transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability &gt; 0.8)
     * and final transcripts.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.TranscriptNormalization transcript_normalization = 24;
     * </code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.TranscriptNormalization,
            com.google.cloud.speech.v1p1beta1.TranscriptNormalization.Builder,
            com.google.cloud.speech.v1p1beta1.TranscriptNormalizationOrBuilder>
        getTranscriptNormalizationFieldBuilder() {
      if (transcriptNormalizationBuilder_ == null) {
        transcriptNormalizationBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v1p1beta1.TranscriptNormalization,
                com.google.cloud.speech.v1p1beta1.TranscriptNormalization.Builder,
                com.google.cloud.speech.v1p1beta1.TranscriptNormalizationOrBuilder>(
                getTranscriptNormalization(), getParentForChildren(), isClean());
        transcriptNormalization_ = null;
      }
      return transcriptNormalizationBuilder_;
    }

    private java.util.List<com.google.cloud.speech.v1p1beta1.SpeechContext> speechContexts_ =
        java.util.Collections.emptyList();

    private void ensureSpeechContextsIsMutable() {
      if (!((bitField0_ & 0x00000400) != 0)) {
        speechContexts_ =
            new java.util.ArrayList<com.google.cloud.speech.v1p1beta1.SpeechContext>(
                speechContexts_);
        bitField0_ |= 0x00000400;
      }
    }

    private com.google.protobuf.RepeatedFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.SpeechContext,
            com.google.cloud.speech.v1p1beta1.SpeechContext.Builder,
            com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder>
        speechContextsBuilder_;

    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public java.util.List<com.google.cloud.speech.v1p1beta1.SpeechContext> getSpeechContextsList() {
      if (speechContextsBuilder_ == null) {
        return java.util.Collections.unmodifiableList(speechContexts_);
      } else {
        return speechContextsBuilder_.getMessageList();
      }
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public int getSpeechContextsCount() {
      if (speechContextsBuilder_ == null) {
        return speechContexts_.size();
      } else {
        return speechContextsBuilder_.getCount();
      }
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int index) {
      if (speechContextsBuilder_ == null) {
        return speechContexts_.get(index);
      } else {
        return speechContextsBuilder_.getMessage(index);
      }
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder setSpeechContexts(
        int index, com.google.cloud.speech.v1p1beta1.SpeechContext value) {
      if (speechContextsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureSpeechContextsIsMutable();
        speechContexts_.set(index, value);
        onChanged();
      } else {
        speechContextsBuilder_.setMessage(index, value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder setSpeechContexts(
        int index, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder builderForValue) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.set(index, builderForValue.build());
        onChanged();
      } else {
        speechContextsBuilder_.setMessage(index, builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder addSpeechContexts(com.google.cloud.speech.v1p1beta1.SpeechContext value) {
      if (speechContextsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureSpeechContextsIsMutable();
        speechContexts_.add(value);
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder addSpeechContexts(
        int index, com.google.cloud.speech.v1p1beta1.SpeechContext value) {
      if (speechContextsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureSpeechContextsIsMutable();
        speechContexts_.add(index, value);
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(index, value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder addSpeechContexts(
        com.google.cloud.speech.v1p1beta1.SpeechContext.Builder builderForValue) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.add(builderForValue.build());
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder addSpeechContexts(
        int index, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder builderForValue) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.add(index, builderForValue.build());
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(index, builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder addAllSpeechContexts(
        java.lang.Iterable<? extends com.google.cloud.speech.v1p1beta1.SpeechContext> values) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_);
        onChanged();
      } else {
        speechContextsBuilder_.addAllMessages(values);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder clearSpeechContexts() {
      if (speechContextsBuilder_ == null) {
        speechContexts_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000400);
        onChanged();
      } else {
        speechContextsBuilder_.clear();
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public Builder removeSpeechContexts(int index) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.remove(index);
        onChanged();
      } else {
        speechContextsBuilder_.remove(index);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder getSpeechContextsBuilder(
        int index) {
      return getSpeechContextsFieldBuilder().getBuilder(index);
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(
        int index) {
      if (speechContextsBuilder_ == null) {
        return speechContexts_.get(index);
      } else {
        return speechContextsBuilder_.getMessageOrBuilder(index);
      }
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public java.util.List<? extends com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder>
        getSpeechContextsOrBuilderList() {
      if (speechContextsBuilder_ != null) {
        return speechContextsBuilder_.getMessageOrBuilderList();
      } else {
        return java.util.Collections.unmodifiableList(speechContexts_);
      }
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContextsBuilder() {
      return getSpeechContextsFieldBuilder()
          .addBuilder(com.google.cloud.speech.v1p1beta1.SpeechContext.getDefaultInstance());
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContextsBuilder(
        int index) {
      return getSpeechContextsFieldBuilder()
          .addBuilder(index, com.google.cloud.speech.v1p1beta1.SpeechContext.getDefaultInstance());
    }
    /**
     *
     *
     * <pre>
     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6;</code>
     */
    public java.util.List<com.google.cloud.speech.v1p1beta1.SpeechContext.Builder>
        getSpeechContextsBuilderList() {
      return getSpeechContextsFieldBuilder().getBuilderList();
    }

    private com.google.protobuf.RepeatedFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.SpeechContext,
            com.google.cloud.speech.v1p1beta1.SpeechContext.Builder,
            com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder>
        getSpeechContextsFieldBuilder() {
      if (speechContextsBuilder_ == null) {
        speechContextsBuilder_ =
            new com.google.protobuf.RepeatedFieldBuilderV3<
                com.google.cloud.speech.v1p1beta1.SpeechContext,
                com.google.cloud.speech.v1p1beta1.SpeechContext.Builder,
                com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder>(
                speechContexts_,
                ((bitField0_ & 0x00000400) != 0),
                getParentForChildren(),
                isClean());
        speechContexts_ = null;
      }
      return speechContextsBuilder_;
    }

    private boolean enableWordTimeOffsets_;
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * </pre>
     *
     * <code>bool enable_word_time_offsets = 8;</code>
     *
     * @return The enableWordTimeOffsets.
     */
    @java.lang.Override
    public boolean getEnableWordTimeOffsets() {
      return enableWordTimeOffsets_;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * </pre>
     *
     * <code>bool enable_word_time_offsets = 8;</code>
     *
     * @param value The enableWordTimeOffsets to set.
     * @return This builder for chaining.
     */
    public Builder setEnableWordTimeOffsets(boolean value) {

      enableWordTimeOffsets_ = value;
      bitField0_ |= 0x00000800;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * </pre>
     *
     * <code>bool enable_word_time_offsets = 8;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableWordTimeOffsets() {
      bitField0_ = (bitField0_ & ~0x00000800);
      enableWordTimeOffsets_ = false;
      onChanged();
      return this;
    }

    private boolean enableWordConfidence_;
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_confidence = 15;</code>
     *
     * @return The enableWordConfidence.
     */
    @java.lang.Override
    public boolean getEnableWordConfidence() {
      return enableWordConfidence_;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_confidence = 15;</code>
     *
     * @param value The enableWordConfidence to set.
     * @return This builder for chaining.
     */
    public Builder setEnableWordConfidence(boolean value) {

      enableWordConfidence_ = value;
      bitField0_ |= 0x00001000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * </pre>
     *
     * <code>bool enable_word_confidence = 15;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableWordConfidence() {
      bitField0_ = (bitField0_ & ~0x00001000);
      enableWordConfidence_ = false;
      onChanged();
      return this;
    }

    private boolean enableAutomaticPunctuation_;
    /**
     *
     *
     * <pre>
     * If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 11;</code>
     *
     * @return The enableAutomaticPunctuation.
     */
    @java.lang.Override
    public boolean getEnableAutomaticPunctuation() {
      return enableAutomaticPunctuation_;
    }
    /**
     *
     *
     * <pre>
     * If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 11;</code>
     *
     * @param value The enableAutomaticPunctuation to set.
     * @return This builder for chaining.
     */
    public Builder setEnableAutomaticPunctuation(boolean value) {

      enableAutomaticPunctuation_ = value;
      bitField0_ |= 0x00002000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 11;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableAutomaticPunctuation() {
      bitField0_ = (bitField0_ & ~0x00002000);
      enableAutomaticPunctuation_ = false;
      onChanged();
      return this;
    }

    private com.google.protobuf.BoolValue enableSpokenPunctuation_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.BoolValue,
            com.google.protobuf.BoolValue.Builder,
            com.google.protobuf.BoolValueOrBuilder>
        enableSpokenPunctuationBuilder_;
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     *
     * @return Whether the enableSpokenPunctuation field is set.
     */
    public boolean hasEnableSpokenPunctuation() {
      return ((bitField0_ & 0x00004000) != 0);
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     *
     * @return The enableSpokenPunctuation.
     */
    public com.google.protobuf.BoolValue getEnableSpokenPunctuation() {
      if (enableSpokenPunctuationBuilder_ == null) {
        return enableSpokenPunctuation_ == null
            ? com.google.protobuf.BoolValue.getDefaultInstance()
            : enableSpokenPunctuation_;
      } else {
        return enableSpokenPunctuationBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    public Builder setEnableSpokenPunctuation(com.google.protobuf.BoolValue value) {
      if (enableSpokenPunctuationBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        enableSpokenPunctuation_ = value;
      } else {
        enableSpokenPunctuationBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00004000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    public Builder setEnableSpokenPunctuation(
        com.google.protobuf.BoolValue.Builder builderForValue) {
      if (enableSpokenPunctuationBuilder_ == null) {
        enableSpokenPunctuation_ = builderForValue.build();
      } else {
        enableSpokenPunctuationBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00004000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    public Builder mergeEnableSpokenPunctuation(com.google.protobuf.BoolValue value) {
      if (enableSpokenPunctuationBuilder_ == null) {
        if (((bitField0_ & 0x00004000) != 0)
            && enableSpokenPunctuation_ != null
            && enableSpokenPunctuation_ != com.google.protobuf.BoolValue.getDefaultInstance()) {
          getEnableSpokenPunctuationBuilder().mergeFrom(value);
        } else {
          enableSpokenPunctuation_ = value;
        }
      } else {
        enableSpokenPunctuationBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00004000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    public Builder clearEnableSpokenPunctuation() {
      bitField0_ = (bitField0_ & ~0x00004000);
      enableSpokenPunctuation_ = null;
      if (enableSpokenPunctuationBuilder_ != null) {
        enableSpokenPunctuationBuilder_.dispose();
        enableSpokenPunctuationBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    public com.google.protobuf.BoolValue.Builder getEnableSpokenPunctuationBuilder() {
      bitField0_ |= 0x00004000;
      onChanged();
      return getEnableSpokenPunctuationFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() {
      if (enableSpokenPunctuationBuilder_ != null) {
        return enableSpokenPunctuationBuilder_.getMessageOrBuilder();
      } else {
        return enableSpokenPunctuation_ == null
            ? com.google.protobuf.BoolValue.getDefaultInstance()
            : enableSpokenPunctuation_;
      }
    }
    /**
     *
     *
     * <pre>
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.BoolValue,
            com.google.protobuf.BoolValue.Builder,
            com.google.protobuf.BoolValueOrBuilder>
        getEnableSpokenPunctuationFieldBuilder() {
      if (enableSpokenPunctuationBuilder_ == null) {
        enableSpokenPunctuationBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.protobuf.BoolValue,
                com.google.protobuf.BoolValue.Builder,
                com.google.protobuf.BoolValueOrBuilder>(
                getEnableSpokenPunctuation(), getParentForChildren(), isClean());
        enableSpokenPunctuation_ = null;
      }
      return enableSpokenPunctuationBuilder_;
    }

    private com.google.protobuf.BoolValue enableSpokenEmojis_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.BoolValue,
            com.google.protobuf.BoolValue.Builder,
            com.google.protobuf.BoolValueOrBuilder>
        enableSpokenEmojisBuilder_;
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     *
     * @return Whether the enableSpokenEmojis field is set.
     */
    public boolean hasEnableSpokenEmojis() {
      return ((bitField0_ & 0x00008000) != 0);
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     *
     * @return The enableSpokenEmojis.
     */
    public com.google.protobuf.BoolValue getEnableSpokenEmojis() {
      if (enableSpokenEmojisBuilder_ == null) {
        return enableSpokenEmojis_ == null
            ? com.google.protobuf.BoolValue.getDefaultInstance()
            : enableSpokenEmojis_;
      } else {
        return enableSpokenEmojisBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue value) {
      if (enableSpokenEmojisBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        enableSpokenEmojis_ = value;
      } else {
        enableSpokenEmojisBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00008000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue.Builder builderForValue) {
      if (enableSpokenEmojisBuilder_ == null) {
        enableSpokenEmojis_ = builderForValue.build();
      } else {
        enableSpokenEmojisBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00008000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    public Builder mergeEnableSpokenEmojis(com.google.protobuf.BoolValue value) {
      if (enableSpokenEmojisBuilder_ == null) {
        if (((bitField0_ & 0x00008000) != 0)
            && enableSpokenEmojis_ != null
            && enableSpokenEmojis_ != com.google.protobuf.BoolValue.getDefaultInstance()) {
          getEnableSpokenEmojisBuilder().mergeFrom(value);
        } else {
          enableSpokenEmojis_ = value;
        }
      } else {
        enableSpokenEmojisBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00008000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    public Builder clearEnableSpokenEmojis() {
      bitField0_ = (bitField0_ & ~0x00008000);
      enableSpokenEmojis_ = null;
      if (enableSpokenEmojisBuilder_ != null) {
        enableSpokenEmojisBuilder_.dispose();
        enableSpokenEmojisBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    public com.google.protobuf.BoolValue.Builder getEnableSpokenEmojisBuilder() {
      bitField0_ |= 0x00008000;
      onChanged();
      return getEnableSpokenEmojisFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() {
      if (enableSpokenEmojisBuilder_ != null) {
        return enableSpokenEmojisBuilder_.getMessageOrBuilder();
      } else {
        return enableSpokenEmojis_ == null
            ? com.google.protobuf.BoolValue.getDefaultInstance()
            : enableSpokenEmojis_;
      }
    }
    /**
     *
     *
     * <pre>
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * </pre>
     *
     * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.BoolValue,
            com.google.protobuf.BoolValue.Builder,
            com.google.protobuf.BoolValueOrBuilder>
        getEnableSpokenEmojisFieldBuilder() {
      if (enableSpokenEmojisBuilder_ == null) {
        enableSpokenEmojisBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.protobuf.BoolValue,
                com.google.protobuf.BoolValue.Builder,
                com.google.protobuf.BoolValueOrBuilder>(
                getEnableSpokenEmojis(), getParentForChildren(), isClean());
        enableSpokenEmojis_ = null;
      }
      return enableSpokenEmojisBuilder_;
    }

    private boolean enableSpeakerDiarization_;
    /**
     *
     *
     * <pre>
     * If 'true', enables speaker detection for each recognized word in
     * the top alternative of the recognition result using a speaker_tag provided
     * in the WordInfo.
     * Note: Use diarization_config instead.
     * </pre>
     *
     * <code>bool enable_speaker_diarization = 16 [deprecated = true];</code>
     *
     * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.enable_speaker_diarization is
     *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=401
     * @return The enableSpeakerDiarization.
     */
    @java.lang.Override
    @java.lang.Deprecated
    public boolean getEnableSpeakerDiarization() {
      return enableSpeakerDiarization_;
    }
    /**
     *
     *
     * <pre>
     * If 'true', enables speaker detection for each recognized word in
     * the top alternative of the recognition result using a speaker_tag provided
     * in the WordInfo.
     * Note: Use diarization_config instead.
     * </pre>
     *
     * <code>bool enable_speaker_diarization = 16 [deprecated = true];</code>
     *
     * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.enable_speaker_diarization is
     *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=401
     * @param value The enableSpeakerDiarization to set.
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder setEnableSpeakerDiarization(boolean value) {

      enableSpeakerDiarization_ = value;
      bitField0_ |= 0x00010000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If 'true', enables speaker detection for each recognized word in
     * the top alternative of the recognition result using a speaker_tag provided
     * in the WordInfo.
     * Note: Use diarization_config instead.
     * </pre>
     *
     * <code>bool enable_speaker_diarization = 16 [deprecated = true];</code>
     *
     * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.enable_speaker_diarization is
     *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=401
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder clearEnableSpeakerDiarization() {
      bitField0_ = (bitField0_ & ~0x00010000);
      enableSpeakerDiarization_ = false;
      onChanged();
      return this;
    }

    private int diarizationSpeakerCount_;
    /**
     *
     *
     * <pre>
     * If set, specifies the estimated number of speakers in the conversation.
     * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
     * Note: Use diarization_config instead.
     * </pre>
     *
     * <code>int32 diarization_speaker_count = 17 [deprecated = true];</code>
     *
     * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.diarization_speaker_count is
     *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=406
     * @return The diarizationSpeakerCount.
     */
    @java.lang.Override
    @java.lang.Deprecated
    public int getDiarizationSpeakerCount() {
      return diarizationSpeakerCount_;
    }
    /**
     *
     *
     * <pre>
     * If set, specifies the estimated number of speakers in the conversation.
     * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
     * Note: Use diarization_config instead.
     * </pre>
     *
     * <code>int32 diarization_speaker_count = 17 [deprecated = true];</code>
     *
     * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.diarization_speaker_count is
     *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=406
     * @param value The diarizationSpeakerCount to set.
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder setDiarizationSpeakerCount(int value) {

      diarizationSpeakerCount_ = value;
      bitField0_ |= 0x00020000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If set, specifies the estimated number of speakers in the conversation.
     * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
     * Note: Use diarization_config instead.
     * </pre>
     *
     * <code>int32 diarization_speaker_count = 17 [deprecated = true];</code>
     *
     * @deprecated google.cloud.speech.v1p1beta1.RecognitionConfig.diarization_speaker_count is
     *     deprecated. See google/cloud/speech/v1p1beta1/cloud_speech.proto;l=406
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder clearDiarizationSpeakerCount() {
      bitField0_ = (bitField0_ & ~0x00020000);
      diarizationSpeakerCount_ = 0;
      onChanged();
      return this;
    }

    private com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarizationConfig_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig,
            com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder,
            com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder>
        diarizationConfigBuilder_;
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     *
     * @return Whether the diarizationConfig field is set.
     */
    public boolean hasDiarizationConfig() {
      return ((bitField0_ & 0x00040000) != 0);
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     *
     * @return The diarizationConfig.
     */
    public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarizationConfig() {
      if (diarizationConfigBuilder_ == null) {
        return diarizationConfig_ == null
            ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
            : diarizationConfig_;
      } else {
        return diarizationConfigBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    public Builder setDiarizationConfig(
        com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig value) {
      if (diarizationConfigBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        diarizationConfig_ = value;
      } else {
        diarizationConfigBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00040000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    public Builder setDiarizationConfig(
        com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder builderForValue) {
      if (diarizationConfigBuilder_ == null) {
        diarizationConfig_ = builderForValue.build();
      } else {
        diarizationConfigBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00040000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    public Builder mergeDiarizationConfig(
        com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig value) {
      if (diarizationConfigBuilder_ == null) {
        if (((bitField0_ & 0x00040000) != 0)
            && diarizationConfig_ != null
            && diarizationConfig_
                != com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig
                    .getDefaultInstance()) {
          getDiarizationConfigBuilder().mergeFrom(value);
        } else {
          diarizationConfig_ = value;
        }
      } else {
        diarizationConfigBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00040000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    public Builder clearDiarizationConfig() {
      bitField0_ = (bitField0_ & ~0x00040000);
      diarizationConfig_ = null;
      if (diarizationConfigBuilder_ != null) {
        diarizationConfigBuilder_.dispose();
        diarizationConfigBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder
        getDiarizationConfigBuilder() {
      bitField0_ |= 0x00040000;
      onChanged();
      return getDiarizationConfigFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder
        getDiarizationConfigOrBuilder() {
      if (diarizationConfigBuilder_ != null) {
        return diarizationConfigBuilder_.getMessageOrBuilder();
      } else {
        return diarizationConfig_ == null
            ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
            : diarizationConfig_;
      }
    }
    /**
     *
     *
     * <pre>
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig,
            com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder,
            com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder>
        getDiarizationConfigFieldBuilder() {
      if (diarizationConfigBuilder_ == null) {
        diarizationConfigBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig,
                com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder,
                com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder>(
                getDiarizationConfig(), getParentForChildren(), isClean());
        diarizationConfig_ = null;
      }
      return diarizationConfigBuilder_;
    }

    private com.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.RecognitionMetadata,
            com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder,
            com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder>
        metadataBuilder_;
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     *
     * @return Whether the metadata field is set.
     */
    public boolean hasMetadata() {
      return ((bitField0_ & 0x00080000) != 0);
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     *
     * @return The metadata.
     */
    public com.google.cloud.speech.v1p1beta1.RecognitionMetadata getMetadata() {
      if (metadataBuilder_ == null) {
        return metadata_ == null
            ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance()
            : metadata_;
      } else {
        return metadataBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    public Builder setMetadata(com.google.cloud.speech.v1p1beta1.RecognitionMetadata value) {
      if (metadataBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        metadata_ = value;
      } else {
        metadataBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00080000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    public Builder setMetadata(
        com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder builderForValue) {
      if (metadataBuilder_ == null) {
        metadata_ = builderForValue.build();
      } else {
        metadataBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00080000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    public Builder mergeMetadata(com.google.cloud.speech.v1p1beta1.RecognitionMetadata value) {
      if (metadataBuilder_ == null) {
        if (((bitField0_ & 0x00080000) != 0)
            && metadata_ != null
            && metadata_
                != com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance()) {
          getMetadataBuilder().mergeFrom(value);
        } else {
          metadata_ = value;
        }
      } else {
        metadataBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00080000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    public Builder clearMetadata() {
      bitField0_ = (bitField0_ & ~0x00080000);
      metadata_ = null;
      if (metadataBuilder_ != null) {
        metadataBuilder_.dispose();
        metadataBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    public com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder getMetadataBuilder() {
      bitField0_ |= 0x00080000;
      onChanged();
      return getMetadataFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadataOrBuilder() {
      if (metadataBuilder_ != null) {
        return metadataBuilder_.getMessageOrBuilder();
      } else {
        return metadata_ == null
            ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance()
            : metadata_;
      }
    }
    /**
     *
     *
     * <pre>
     * Metadata regarding this request.
     * </pre>
     *
     * <code>.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1p1beta1.RecognitionMetadata,
            com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder,
            com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder>
        getMetadataFieldBuilder() {
      if (metadataBuilder_ == null) {
        metadataBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v1p1beta1.RecognitionMetadata,
                com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder,
                com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder>(
                getMetadata(), getParentForChildren(), isClean());
        metadata_ = null;
      }
      return metadataBuilder_;
    }

    private java.lang.Object model_ = "";
    /**
     *
     *
     * <pre>
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * &lt;table&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
     *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short form content like commands or single shot directed
     *     speech.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a conversation between a
     *         medical provider and patient.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
     *         provider.&lt;/td&gt;
     *   &lt;/tr&gt;
     * &lt;/table&gt;
     * </pre>
     *
     * <code>string model = 13;</code>
     *
     * @return The model.
     */
    public java.lang.String getModel() {
      java.lang.Object ref = model_;
      if (!(ref instanceof java.lang.String)) {
        com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        model_ = s;
        return s;
      } else {
        return (java.lang.String) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * &lt;table&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
     *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short form content like commands or single shot directed
     *     speech.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a conversation between a
     *         medical provider and patient.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
     *         provider.&lt;/td&gt;
     *   &lt;/tr&gt;
     * &lt;/table&gt;
     * </pre>
     *
     * <code>string model = 13;</code>
     *
     * @return The bytes for model.
     */
    public com.google.protobuf.ByteString getModelBytes() {
      java.lang.Object ref = model_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b =
            com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
        model_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * &lt;table&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
     *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short form content like commands or single shot directed
     *     speech.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a conversation between a
     *         medical provider and patient.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
     *         provider.&lt;/td&gt;
     *   &lt;/tr&gt;
     * &lt;/table&gt;
     * </pre>
     *
     * <code>string model = 13;</code>
     *
     * @param value The model to set.
     * @return This builder for chaining.
     */
    public Builder setModel(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      model_ = value;
      bitField0_ |= 0x00100000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * &lt;table&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
     *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short form content like commands or single shot directed
     *     speech.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a conversation between a
     *         medical provider and patient.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
     *         provider.&lt;/td&gt;
     *   &lt;/tr&gt;
     * &lt;/table&gt;
     * </pre>
     *
     * <code>string model = 13;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearModel() {
      model_ = getDefaultInstance().getModel();
      bitField0_ = (bitField0_ & ~0x00100000);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * &lt;table&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
     *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short form content like commands or single shot directed
     *     speech.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from a conversation between a
     *         medical provider and patient.&lt;/td&gt;
     *   &lt;/tr&gt;
     *   &lt;tr&gt;
     *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
     *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
     *         provider.&lt;/td&gt;
     *   &lt;/tr&gt;
     * &lt;/table&gt;
     * </pre>
     *
     * <code>string model = 13;</code>
     *
     * @param value The bytes for model to set.
     * @return This builder for chaining.
     */
    public Builder setModelBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      model_ = value;
      bitField0_ |= 0x00100000;
      onChanged();
      return this;
    }

    private boolean useEnhanced_;
    /**
     *
     *
     * <pre>
     * Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if an enhanced model exists for
     * the audio.
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * </pre>
     *
     * <code>bool use_enhanced = 14;</code>
     *
     * @return The useEnhanced.
     */
    @java.lang.Override
    public boolean getUseEnhanced() {
      return useEnhanced_;
    }
    /**
     *
     *
     * <pre>
     * Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if an enhanced model exists for
     * the audio.
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * </pre>
     *
     * <code>bool use_enhanced = 14;</code>
     *
     * @param value The useEnhanced to set.
     * @return This builder for chaining.
     */
    public Builder setUseEnhanced(boolean value) {

      useEnhanced_ = value;
      bitField0_ |= 0x00200000;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if an enhanced model exists for
     * the audio.
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * </pre>
     *
     * <code>bool use_enhanced = 14;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearUseEnhanced() {
      bitField0_ = (bitField0_ & ~0x00200000);
      useEnhanced_ = false;
      onChanged();
      return this;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1p1beta1.RecognitionConfig)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognitionConfig)
  private static final com.google.cloud.speech.v1p1beta1.RecognitionConfig DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.speech.v1p1beta1.RecognitionConfig();
  }

  public static com.google.cloud.speech.v1p1beta1.RecognitionConfig getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<RecognitionConfig> PARSER =
      new com.google.protobuf.AbstractParser<RecognitionConfig>() {
        @java.lang.Override
        public RecognitionConfig parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<RecognitionConfig> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<RecognitionConfig> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.speech.v1p1beta1.RecognitionConfig getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
