/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/dialogflow/v2/audio_config.proto

package com.google.cloud.dialogflow.v2;

/**
 *
 *
 * <pre>
 * Instructs the speech recognizer how to process the audio content.
 * </pre>
 *
 * Protobuf type {@code google.cloud.dialogflow.v2.InputAudioConfig}
 */
public final class InputAudioConfig extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.dialogflow.v2.InputAudioConfig)
    InputAudioConfigOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use InputAudioConfig.newBuilder() to construct.
  private InputAudioConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private InputAudioConfig() {
    audioEncoding_ = 0;
    languageCode_ = "";
    phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
    speechContexts_ = java.util.Collections.emptyList();
    model_ = "";
    modelVariant_ = 0;
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new InputAudioConfig();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.dialogflow.v2.AudioConfigProto
        .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.dialogflow.v2.AudioConfigProto
        .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.dialogflow.v2.InputAudioConfig.class,
            com.google.cloud.dialogflow.v2.InputAudioConfig.Builder.class);
  }

  public static final int AUDIO_ENCODING_FIELD_NUMBER = 1;
  private int audioEncoding_ = 0;
  /**
   *
   *
   * <pre>
   * Required. Audio encoding of the audio content to process.
   * </pre>
   *
   * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
   *
   * @return The enum numeric value on the wire for audioEncoding.
   */
  @java.lang.Override
  public int getAudioEncodingValue() {
    return audioEncoding_;
  }
  /**
   *
   *
   * <pre>
   * Required. Audio encoding of the audio content to process.
   * </pre>
   *
   * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
   *
   * @return The audioEncoding.
   */
  @java.lang.Override
  public com.google.cloud.dialogflow.v2.AudioEncoding getAudioEncoding() {
    com.google.cloud.dialogflow.v2.AudioEncoding result =
        com.google.cloud.dialogflow.v2.AudioEncoding.forNumber(audioEncoding_);
    return result == null ? com.google.cloud.dialogflow.v2.AudioEncoding.UNRECOGNIZED : result;
  }

  public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2;
  private int sampleRateHertz_ = 0;
  /**
   *
   *
   * <pre>
   * Required. Sample rate (in Hertz) of the audio content sent in the query.
   * Refer to
   * [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
   * more details.
   * </pre>
   *
   * <code>int32 sample_rate_hertz = 2;</code>
   *
   * @return The sampleRateHertz.
   */
  @java.lang.Override
  public int getSampleRateHertz() {
    return sampleRateHertz_;
  }

  public static final int LANGUAGE_CODE_FIELD_NUMBER = 3;

  @SuppressWarnings("serial")
  private volatile java.lang.Object languageCode_ = "";
  /**
   *
   *
   * <pre>
   * Required. The language of the supplied audio. Dialogflow does not do
   * translations. See [Language
   * Support](https://cloud.google.com/dialogflow/docs/reference/language)
   * for a list of the currently supported language codes. Note that queries in
   * the same session do not necessarily need to specify the same language.
   * </pre>
   *
   * <code>string language_code = 3;</code>
   *
   * @return The languageCode.
   */
  @java.lang.Override
  public java.lang.String getLanguageCode() {
    java.lang.Object ref = languageCode_;
    if (ref instanceof java.lang.String) {
      return (java.lang.String) ref;
    } else {
      com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
      java.lang.String s = bs.toStringUtf8();
      languageCode_ = s;
      return s;
    }
  }
  /**
   *
   *
   * <pre>
   * Required. The language of the supplied audio. Dialogflow does not do
   * translations. See [Language
   * Support](https://cloud.google.com/dialogflow/docs/reference/language)
   * for a list of the currently supported language codes. Note that queries in
   * the same session do not necessarily need to specify the same language.
   * </pre>
   *
   * <code>string language_code = 3;</code>
   *
   * @return The bytes for languageCode.
   */
  @java.lang.Override
  public com.google.protobuf.ByteString getLanguageCodeBytes() {
    java.lang.Object ref = languageCode_;
    if (ref instanceof java.lang.String) {
      com.google.protobuf.ByteString b =
          com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
      languageCode_ = b;
      return b;
    } else {
      return (com.google.protobuf.ByteString) ref;
    }
  }

  public static final int ENABLE_WORD_INFO_FIELD_NUMBER = 13;
  private boolean enableWordInfo_ = false;
  /**
   *
   *
   * <pre>
   * If `true`, Dialogflow returns
   * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
   * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
   * with information about the recognized speech words, e.g. start and end time
   * offsets. If false or unspecified, Speech doesn't return any word-level
   * information.
   * </pre>
   *
   * <code>bool enable_word_info = 13;</code>
   *
   * @return The enableWordInfo.
   */
  @java.lang.Override
  public boolean getEnableWordInfo() {
    return enableWordInfo_;
  }

  public static final int PHRASE_HINTS_FIELD_NUMBER = 4;

  @SuppressWarnings("serial")
  private com.google.protobuf.LazyStringList phraseHints_;
  /**
   *
   *
   * <pre>
   * A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * This field is deprecated. Please use [speech_contexts]() instead. If you
   * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
   * treat the [phrase_hints]() as a single additional [SpeechContext]().
   * </pre>
   *
   * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
   *
   * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
   *     google/cloud/dialogflow/v2/audio_config.proto;l=223
   * @return A list containing the phraseHints.
   */
  @java.lang.Deprecated
  public com.google.protobuf.ProtocolStringList getPhraseHintsList() {
    return phraseHints_;
  }
  /**
   *
   *
   * <pre>
   * A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * This field is deprecated. Please use [speech_contexts]() instead. If you
   * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
   * treat the [phrase_hints]() as a single additional [SpeechContext]().
   * </pre>
   *
   * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
   *
   * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
   *     google/cloud/dialogflow/v2/audio_config.proto;l=223
   * @return The count of phraseHints.
   */
  @java.lang.Deprecated
  public int getPhraseHintsCount() {
    return phraseHints_.size();
  }
  /**
   *
   *
   * <pre>
   * A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * This field is deprecated. Please use [speech_contexts]() instead. If you
   * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
   * treat the [phrase_hints]() as a single additional [SpeechContext]().
   * </pre>
   *
   * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
   *
   * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
   *     google/cloud/dialogflow/v2/audio_config.proto;l=223
   * @param index The index of the element to return.
   * @return The phraseHints at the given index.
   */
  @java.lang.Deprecated
  public java.lang.String getPhraseHints(int index) {
    return phraseHints_.get(index);
  }
  /**
   *
   *
   * <pre>
   * A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * This field is deprecated. Please use [speech_contexts]() instead. If you
   * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
   * treat the [phrase_hints]() as a single additional [SpeechContext]().
   * </pre>
   *
   * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
   *
   * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
   *     google/cloud/dialogflow/v2/audio_config.proto;l=223
   * @param index The index of the value to return.
   * @return The bytes of the phraseHints at the given index.
   */
  @java.lang.Deprecated
  public com.google.protobuf.ByteString getPhraseHintsBytes(int index) {
    return phraseHints_.getByteString(index);
  }

  public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 11;

  @SuppressWarnings("serial")
  private java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> speechContexts_;
  /**
   *
   *
   * <pre>
   * Context information to assist speech recognition.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * </pre>
   *
   * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
   */
  @java.lang.Override
  public java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> getSpeechContextsList() {
    return speechContexts_;
  }
  /**
   *
   *
   * <pre>
   * Context information to assist speech recognition.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * </pre>
   *
   * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
   */
  @java.lang.Override
  public java.util.List<? extends com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
      getSpeechContextsOrBuilderList() {
    return speechContexts_;
  }
  /**
   *
   *
   * <pre>
   * Context information to assist speech recognition.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * </pre>
   *
   * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
   */
  @java.lang.Override
  public int getSpeechContextsCount() {
    return speechContexts_.size();
  }
  /**
   *
   *
   * <pre>
   * Context information to assist speech recognition.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * </pre>
   *
   * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
   */
  @java.lang.Override
  public com.google.cloud.dialogflow.v2.SpeechContext getSpeechContexts(int index) {
    return speechContexts_.get(index);
  }
  /**
   *
   *
   * <pre>
   * Context information to assist speech recognition.
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * </pre>
   *
   * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
   */
  @java.lang.Override
  public com.google.cloud.dialogflow.v2.SpeechContextOrBuilder getSpeechContextsOrBuilder(
      int index) {
    return speechContexts_.get(index);
  }

  public static final int MODEL_FIELD_NUMBER = 7;

  @SuppressWarnings("serial")
  private volatile java.lang.Object model_ = "";
  /**
   *
   *
   * <pre>
   * Which Speech model to select for the given request. Select the
   * model best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the InputAudioConfig.
   * If enhanced speech model is enabled for the agent and an enhanced
   * version of the specified model for the language does not exist, then the
   * speech is recognized using the standard version of the specified model.
   * Refer to
   * [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
   * for more details.
   * If you specify a model, the following models typically have the best
   * performance:
   * - phone_call (best for Agent Assist and telephony)
   * - latest_short (best for Dialogflow non-telephony)
   * - command_and_search (best for very short utterances and commands)
   * </pre>
   *
   * <code>string model = 7;</code>
   *
   * @return The model.
   */
  @java.lang.Override
  public java.lang.String getModel() {
    java.lang.Object ref = model_;
    if (ref instanceof java.lang.String) {
      return (java.lang.String) ref;
    } else {
      com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
      java.lang.String s = bs.toStringUtf8();
      model_ = s;
      return s;
    }
  }
  /**
   *
   *
   * <pre>
   * Which Speech model to select for the given request. Select the
   * model best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the InputAudioConfig.
   * If enhanced speech model is enabled for the agent and an enhanced
   * version of the specified model for the language does not exist, then the
   * speech is recognized using the standard version of the specified model.
   * Refer to
   * [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
   * for more details.
   * If you specify a model, the following models typically have the best
   * performance:
   * - phone_call (best for Agent Assist and telephony)
   * - latest_short (best for Dialogflow non-telephony)
   * - command_and_search (best for very short utterances and commands)
   * </pre>
   *
   * <code>string model = 7;</code>
   *
   * @return The bytes for model.
   */
  @java.lang.Override
  public com.google.protobuf.ByteString getModelBytes() {
    java.lang.Object ref = model_;
    if (ref instanceof java.lang.String) {
      com.google.protobuf.ByteString b =
          com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
      model_ = b;
      return b;
    } else {
      return (com.google.protobuf.ByteString) ref;
    }
  }

  public static final int MODEL_VARIANT_FIELD_NUMBER = 10;
  private int modelVariant_ = 0;
  /**
   *
   *
   * <pre>
   * Which variant of the [Speech
   * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
   * </pre>
   *
   * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
   *
   * @return The enum numeric value on the wire for modelVariant.
   */
  @java.lang.Override
  public int getModelVariantValue() {
    return modelVariant_;
  }
  /**
   *
   *
   * <pre>
   * Which variant of the [Speech
   * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
   * </pre>
   *
   * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
   *
   * @return The modelVariant.
   */
  @java.lang.Override
  public com.google.cloud.dialogflow.v2.SpeechModelVariant getModelVariant() {
    com.google.cloud.dialogflow.v2.SpeechModelVariant result =
        com.google.cloud.dialogflow.v2.SpeechModelVariant.forNumber(modelVariant_);
    return result == null ? com.google.cloud.dialogflow.v2.SpeechModelVariant.UNRECOGNIZED : result;
  }

  public static final int SINGLE_UTTERANCE_FIELD_NUMBER = 8;
  private boolean singleUtterance_ = false;
  /**
   *
   *
   * <pre>
   * If `false` (default), recognition does not cease until the
   * client closes the stream.
   * If `true`, the recognizer will detect a single spoken utterance in input
   * audio. Recognition ceases when it detects the audio's voice has
   * stopped or paused. In this case, once a detected intent is received, the
   * client should close the stream and start a new request with a new stream as
   * needed.
   * Note: This setting is relevant only for streaming methods.
   * Note: When specified, InputAudioConfig.single_utterance takes precedence
   * over StreamingDetectIntentRequest.single_utterance.
   * </pre>
   *
   * <code>bool single_utterance = 8;</code>
   *
   * @return The singleUtterance.
   */
  @java.lang.Override
  public boolean getSingleUtterance() {
    return singleUtterance_;
  }

  public static final int DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER = 14;
  private boolean disableNoSpeechRecognizedEvent_ = false;
  /**
   *
   *
   * <pre>
   * Only used in
   * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
   * and
   * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
   * If `false` and recognition doesn't return any result, trigger
   * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
   * </pre>
   *
   * <code>bool disable_no_speech_recognized_event = 14;</code>
   *
   * @return The disableNoSpeechRecognizedEvent.
   */
  @java.lang.Override
  public boolean getDisableNoSpeechRecognizedEvent() {
    return disableNoSpeechRecognizedEvent_;
  }

  public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 17;
  private boolean enableAutomaticPunctuation_ = false;
  /**
   *
   *
   * <pre>
   * Enable automatic punctuation option at the speech backend.
   * </pre>
   *
   * <code>bool enable_automatic_punctuation = 17;</code>
   *
   * @return The enableAutomaticPunctuation.
   */
  @java.lang.Override
  public boolean getEnableAutomaticPunctuation() {
    return enableAutomaticPunctuation_;
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (audioEncoding_
        != com.google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) {
      output.writeEnum(1, audioEncoding_);
    }
    if (sampleRateHertz_ != 0) {
      output.writeInt32(2, sampleRateHertz_);
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_);
    }
    for (int i = 0; i < phraseHints_.size(); i++) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 4, phraseHints_.getRaw(i));
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 7, model_);
    }
    if (singleUtterance_ != false) {
      output.writeBool(8, singleUtterance_);
    }
    if (modelVariant_
        != com.google.cloud.dialogflow.v2.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED
            .getNumber()) {
      output.writeEnum(10, modelVariant_);
    }
    for (int i = 0; i < speechContexts_.size(); i++) {
      output.writeMessage(11, speechContexts_.get(i));
    }
    if (enableWordInfo_ != false) {
      output.writeBool(13, enableWordInfo_);
    }
    if (disableNoSpeechRecognizedEvent_ != false) {
      output.writeBool(14, disableNoSpeechRecognizedEvent_);
    }
    if (enableAutomaticPunctuation_ != false) {
      output.writeBool(17, enableAutomaticPunctuation_);
    }
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (audioEncoding_
        != com.google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, audioEncoding_);
    }
    if (sampleRateHertz_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_);
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
      size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_);
    }
    {
      int dataSize = 0;
      for (int i = 0; i < phraseHints_.size(); i++) {
        dataSize += computeStringSizeNoTag(phraseHints_.getRaw(i));
      }
      size += dataSize;
      size += 1 * getPhraseHintsList().size();
    }
    if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
      size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, model_);
    }
    if (singleUtterance_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, singleUtterance_);
    }
    if (modelVariant_
        != com.google.cloud.dialogflow.v2.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED
            .getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, modelVariant_);
    }
    for (int i = 0; i < speechContexts_.size(); i++) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, speechContexts_.get(i));
    }
    if (enableWordInfo_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, enableWordInfo_);
    }
    if (disableNoSpeechRecognizedEvent_ != false) {
      size +=
          com.google.protobuf.CodedOutputStream.computeBoolSize(
              14, disableNoSpeechRecognizedEvent_);
    }
    if (enableAutomaticPunctuation_ != false) {
      size +=
          com.google.protobuf.CodedOutputStream.computeBoolSize(17, enableAutomaticPunctuation_);
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.dialogflow.v2.InputAudioConfig)) {
      return super.equals(obj);
    }
    com.google.cloud.dialogflow.v2.InputAudioConfig other =
        (com.google.cloud.dialogflow.v2.InputAudioConfig) obj;

    if (audioEncoding_ != other.audioEncoding_) return false;
    if (getSampleRateHertz() != other.getSampleRateHertz()) return false;
    if (!getLanguageCode().equals(other.getLanguageCode())) return false;
    if (getEnableWordInfo() != other.getEnableWordInfo()) return false;
    if (!getPhraseHintsList().equals(other.getPhraseHintsList())) return false;
    if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false;
    if (!getModel().equals(other.getModel())) return false;
    if (modelVariant_ != other.modelVariant_) return false;
    if (getSingleUtterance() != other.getSingleUtterance()) return false;
    if (getDisableNoSpeechRecognizedEvent() != other.getDisableNoSpeechRecognizedEvent())
      return false;
    if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER;
    hash = (53 * hash) + audioEncoding_;
    hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
    hash = (53 * hash) + getSampleRateHertz();
    hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
    hash = (53 * hash) + getLanguageCode().hashCode();
    hash = (37 * hash) + ENABLE_WORD_INFO_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordInfo());
    if (getPhraseHintsCount() > 0) {
      hash = (37 * hash) + PHRASE_HINTS_FIELD_NUMBER;
      hash = (53 * hash) + getPhraseHintsList().hashCode();
    }
    if (getSpeechContextsCount() > 0) {
      hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER;
      hash = (53 * hash) + getSpeechContextsList().hashCode();
    }
    hash = (37 * hash) + MODEL_FIELD_NUMBER;
    hash = (53 * hash) + getModel().hashCode();
    hash = (37 * hash) + MODEL_VARIANT_FIELD_NUMBER;
    hash = (53 * hash) + modelVariant_;
    hash = (37 * hash) + SINGLE_UTTERANCE_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSingleUtterance());
    hash = (37 * hash) + DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER;
    hash =
        (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDisableNoSpeechRecognizedEvent());
    hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation());
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(java.nio.ByteBuffer data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(java.io.InputStream input)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(com.google.cloud.dialogflow.v2.InputAudioConfig prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Instructs the speech recognizer how to process the audio content.
   * </pre>
   *
   * Protobuf type {@code google.cloud.dialogflow.v2.InputAudioConfig}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.dialogflow.v2.InputAudioConfig)
      com.google.cloud.dialogflow.v2.InputAudioConfigOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.dialogflow.v2.AudioConfigProto
          .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_descriptor;
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.dialogflow.v2.AudioConfigProto
          .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.dialogflow.v2.InputAudioConfig.class,
              com.google.cloud.dialogflow.v2.InputAudioConfig.Builder.class);
    }

    // Construct using com.google.cloud.dialogflow.v2.InputAudioConfig.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      audioEncoding_ = 0;
      sampleRateHertz_ = 0;
      languageCode_ = "";
      enableWordInfo_ = false;
      phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
      bitField0_ = (bitField0_ & ~0x00000010);
      if (speechContextsBuilder_ == null) {
        speechContexts_ = java.util.Collections.emptyList();
      } else {
        speechContexts_ = null;
        speechContextsBuilder_.clear();
      }
      bitField0_ = (bitField0_ & ~0x00000020);
      model_ = "";
      modelVariant_ = 0;
      singleUtterance_ = false;
      disableNoSpeechRecognizedEvent_ = false;
      enableAutomaticPunctuation_ = false;
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.dialogflow.v2.AudioConfigProto
          .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.dialogflow.v2.InputAudioConfig getDefaultInstanceForType() {
      return com.google.cloud.dialogflow.v2.InputAudioConfig.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.dialogflow.v2.InputAudioConfig build() {
      com.google.cloud.dialogflow.v2.InputAudioConfig result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.dialogflow.v2.InputAudioConfig buildPartial() {
      com.google.cloud.dialogflow.v2.InputAudioConfig result =
          new com.google.cloud.dialogflow.v2.InputAudioConfig(this);
      buildPartialRepeatedFields(result);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      onBuilt();
      return result;
    }

    private void buildPartialRepeatedFields(
        com.google.cloud.dialogflow.v2.InputAudioConfig result) {
      if (((bitField0_ & 0x00000010) != 0)) {
        phraseHints_ = phraseHints_.getUnmodifiableView();
        bitField0_ = (bitField0_ & ~0x00000010);
      }
      result.phraseHints_ = phraseHints_;
      if (speechContextsBuilder_ == null) {
        if (((bitField0_ & 0x00000020) != 0)) {
          speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_);
          bitField0_ = (bitField0_ & ~0x00000020);
        }
        result.speechContexts_ = speechContexts_;
      } else {
        result.speechContexts_ = speechContextsBuilder_.build();
      }
    }

    private void buildPartial0(com.google.cloud.dialogflow.v2.InputAudioConfig result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000001) != 0)) {
        result.audioEncoding_ = audioEncoding_;
      }
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.sampleRateHertz_ = sampleRateHertz_;
      }
      if (((from_bitField0_ & 0x00000004) != 0)) {
        result.languageCode_ = languageCode_;
      }
      if (((from_bitField0_ & 0x00000008) != 0)) {
        result.enableWordInfo_ = enableWordInfo_;
      }
      if (((from_bitField0_ & 0x00000040) != 0)) {
        result.model_ = model_;
      }
      if (((from_bitField0_ & 0x00000080) != 0)) {
        result.modelVariant_ = modelVariant_;
      }
      if (((from_bitField0_ & 0x00000100) != 0)) {
        result.singleUtterance_ = singleUtterance_;
      }
      if (((from_bitField0_ & 0x00000200) != 0)) {
        result.disableNoSpeechRecognizedEvent_ = disableNoSpeechRecognizedEvent_;
      }
      if (((from_bitField0_ & 0x00000400) != 0)) {
        result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.dialogflow.v2.InputAudioConfig) {
        return mergeFrom((com.google.cloud.dialogflow.v2.InputAudioConfig) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.dialogflow.v2.InputAudioConfig other) {
      if (other == com.google.cloud.dialogflow.v2.InputAudioConfig.getDefaultInstance())
        return this;
      if (other.audioEncoding_ != 0) {
        setAudioEncodingValue(other.getAudioEncodingValue());
      }
      if (other.getSampleRateHertz() != 0) {
        setSampleRateHertz(other.getSampleRateHertz());
      }
      if (!other.getLanguageCode().isEmpty()) {
        languageCode_ = other.languageCode_;
        bitField0_ |= 0x00000004;
        onChanged();
      }
      if (other.getEnableWordInfo() != false) {
        setEnableWordInfo(other.getEnableWordInfo());
      }
      if (!other.phraseHints_.isEmpty()) {
        if (phraseHints_.isEmpty()) {
          phraseHints_ = other.phraseHints_;
          bitField0_ = (bitField0_ & ~0x00000010);
        } else {
          ensurePhraseHintsIsMutable();
          phraseHints_.addAll(other.phraseHints_);
        }
        onChanged();
      }
      if (speechContextsBuilder_ == null) {
        if (!other.speechContexts_.isEmpty()) {
          if (speechContexts_.isEmpty()) {
            speechContexts_ = other.speechContexts_;
            bitField0_ = (bitField0_ & ~0x00000020);
          } else {
            ensureSpeechContextsIsMutable();
            speechContexts_.addAll(other.speechContexts_);
          }
          onChanged();
        }
      } else {
        if (!other.speechContexts_.isEmpty()) {
          if (speechContextsBuilder_.isEmpty()) {
            speechContextsBuilder_.dispose();
            speechContextsBuilder_ = null;
            speechContexts_ = other.speechContexts_;
            bitField0_ = (bitField0_ & ~0x00000020);
            speechContextsBuilder_ =
                com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
                    ? getSpeechContextsFieldBuilder()
                    : null;
          } else {
            speechContextsBuilder_.addAllMessages(other.speechContexts_);
          }
        }
      }
      if (!other.getModel().isEmpty()) {
        model_ = other.model_;
        bitField0_ |= 0x00000040;
        onChanged();
      }
      if (other.modelVariant_ != 0) {
        setModelVariantValue(other.getModelVariantValue());
      }
      if (other.getSingleUtterance() != false) {
        setSingleUtterance(other.getSingleUtterance());
      }
      if (other.getDisableNoSpeechRecognizedEvent() != false) {
        setDisableNoSpeechRecognizedEvent(other.getDisableNoSpeechRecognizedEvent());
      }
      if (other.getEnableAutomaticPunctuation() != false) {
        setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation());
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 8:
              {
                audioEncoding_ = input.readEnum();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
            case 16:
              {
                sampleRateHertz_ = input.readInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
            case 26:
              {
                languageCode_ = input.readStringRequireUtf8();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
            case 34:
              {
                java.lang.String s = input.readStringRequireUtf8();
                ensurePhraseHintsIsMutable();
                phraseHints_.add(s);
                break;
              } // case 34
            case 58:
              {
                model_ = input.readStringRequireUtf8();
                bitField0_ |= 0x00000040;
                break;
              } // case 58
            case 64:
              {
                singleUtterance_ = input.readBool();
                bitField0_ |= 0x00000100;
                break;
              } // case 64
            case 80:
              {
                modelVariant_ = input.readEnum();
                bitField0_ |= 0x00000080;
                break;
              } // case 80
            case 90:
              {
                com.google.cloud.dialogflow.v2.SpeechContext m =
                    input.readMessage(
                        com.google.cloud.dialogflow.v2.SpeechContext.parser(), extensionRegistry);
                if (speechContextsBuilder_ == null) {
                  ensureSpeechContextsIsMutable();
                  speechContexts_.add(m);
                } else {
                  speechContextsBuilder_.addMessage(m);
                }
                break;
              } // case 90
            case 104:
              {
                enableWordInfo_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 104
            case 112:
              {
                disableNoSpeechRecognizedEvent_ = input.readBool();
                bitField0_ |= 0x00000200;
                break;
              } // case 112
            case 136:
              {
                enableAutomaticPunctuation_ = input.readBool();
                bitField0_ |= 0x00000400;
                break;
              } // case 136
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int bitField0_;

    private int audioEncoding_ = 0;
    /**
     *
     *
     * <pre>
     * Required. Audio encoding of the audio content to process.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
     *
     * @return The enum numeric value on the wire for audioEncoding.
     */
    @java.lang.Override
    public int getAudioEncodingValue() {
      return audioEncoding_;
    }
    /**
     *
     *
     * <pre>
     * Required. Audio encoding of the audio content to process.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
     *
     * @param value The enum numeric value on the wire for audioEncoding to set.
     * @return This builder for chaining.
     */
    public Builder setAudioEncodingValue(int value) {
      audioEncoding_ = value;
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Audio encoding of the audio content to process.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
     *
     * @return The audioEncoding.
     */
    @java.lang.Override
    public com.google.cloud.dialogflow.v2.AudioEncoding getAudioEncoding() {
      com.google.cloud.dialogflow.v2.AudioEncoding result =
          com.google.cloud.dialogflow.v2.AudioEncoding.forNumber(audioEncoding_);
      return result == null ? com.google.cloud.dialogflow.v2.AudioEncoding.UNRECOGNIZED : result;
    }
    /**
     *
     *
     * <pre>
     * Required. Audio encoding of the audio content to process.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
     *
     * @param value The audioEncoding to set.
     * @return This builder for chaining.
     */
    public Builder setAudioEncoding(com.google.cloud.dialogflow.v2.AudioEncoding value) {
      if (value == null) {
        throw new NullPointerException();
      }
      bitField0_ |= 0x00000001;
      audioEncoding_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Audio encoding of the audio content to process.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearAudioEncoding() {
      bitField0_ = (bitField0_ & ~0x00000001);
      audioEncoding_ = 0;
      onChanged();
      return this;
    }

    private int sampleRateHertz_;
    /**
     *
     *
     * <pre>
     * Required. Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 2;</code>
     *
     * @return The sampleRateHertz.
     */
    @java.lang.Override
    public int getSampleRateHertz() {
      return sampleRateHertz_;
    }
    /**
     *
     *
     * <pre>
     * Required. Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 2;</code>
     *
     * @param value The sampleRateHertz to set.
     * @return This builder for chaining.
     */
    public Builder setSampleRateHertz(int value) {

      sampleRateHertz_ = value;
      bitField0_ |= 0x00000002;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 2;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearSampleRateHertz() {
      bitField0_ = (bitField0_ & ~0x00000002);
      sampleRateHertz_ = 0;
      onChanged();
      return this;
    }

    private java.lang.Object languageCode_ = "";
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     *
     * @return The languageCode.
     */
    public java.lang.String getLanguageCode() {
      java.lang.Object ref = languageCode_;
      if (!(ref instanceof java.lang.String)) {
        com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        languageCode_ = s;
        return s;
      } else {
        return (java.lang.String) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     *
     * @return The bytes for languageCode.
     */
    public com.google.protobuf.ByteString getLanguageCodeBytes() {
      java.lang.Object ref = languageCode_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b =
            com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
        languageCode_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     *
     * @param value The languageCode to set.
     * @return This builder for chaining.
     */
    public Builder setLanguageCode(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      languageCode_ = value;
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearLanguageCode() {
      languageCode_ = getDefaultInstance().getLanguageCode();
      bitField0_ = (bitField0_ & ~0x00000004);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     *
     * @param value The bytes for languageCode to set.
     * @return This builder for chaining.
     */
    public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      languageCode_ = value;
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }

    private boolean enableWordInfo_;
    /**
     *
     *
     * <pre>
     * If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * </pre>
     *
     * <code>bool enable_word_info = 13;</code>
     *
     * @return The enableWordInfo.
     */
    @java.lang.Override
    public boolean getEnableWordInfo() {
      return enableWordInfo_;
    }
    /**
     *
     *
     * <pre>
     * If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * </pre>
     *
     * <code>bool enable_word_info = 13;</code>
     *
     * @param value The enableWordInfo to set.
     * @return This builder for chaining.
     */
    public Builder setEnableWordInfo(boolean value) {

      enableWordInfo_ = value;
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * </pre>
     *
     * <code>bool enable_word_info = 13;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableWordInfo() {
      bitField0_ = (bitField0_ & ~0x00000008);
      enableWordInfo_ = false;
      onChanged();
      return this;
    }

    private com.google.protobuf.LazyStringList phraseHints_ =
        com.google.protobuf.LazyStringArrayList.EMPTY;

    private void ensurePhraseHintsIsMutable() {
      if (!((bitField0_ & 0x00000010) != 0)) {
        phraseHints_ = new com.google.protobuf.LazyStringArrayList(phraseHints_);
        bitField0_ |= 0x00000010;
      }
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @return A list containing the phraseHints.
     */
    @java.lang.Deprecated
    public com.google.protobuf.ProtocolStringList getPhraseHintsList() {
      return phraseHints_.getUnmodifiableView();
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @return The count of phraseHints.
     */
    @java.lang.Deprecated
    public int getPhraseHintsCount() {
      return phraseHints_.size();
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @param index The index of the element to return.
     * @return The phraseHints at the given index.
     */
    @java.lang.Deprecated
    public java.lang.String getPhraseHints(int index) {
      return phraseHints_.get(index);
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @param index The index of the value to return.
     * @return The bytes of the phraseHints at the given index.
     */
    @java.lang.Deprecated
    public com.google.protobuf.ByteString getPhraseHintsBytes(int index) {
      return phraseHints_.getByteString(index);
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @param index The index to set the value at.
     * @param value The phraseHints to set.
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder setPhraseHints(int index, java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      ensurePhraseHintsIsMutable();
      phraseHints_.set(index, value);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @param value The phraseHints to add.
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder addPhraseHints(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      ensurePhraseHintsIsMutable();
      phraseHints_.add(value);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @param values The phraseHints to add.
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder addAllPhraseHints(java.lang.Iterable<java.lang.String> values) {
      ensurePhraseHintsIsMutable();
      com.google.protobuf.AbstractMessageLite.Builder.addAll(values, phraseHints_);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder clearPhraseHints() {
      phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
      bitField0_ = (bitField0_ & ~0x00000010);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * This field is deprecated. Please use [speech_contexts]() instead. If you
     * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
     * treat the [phrase_hints]() as a single additional [SpeechContext]().
     * </pre>
     *
     * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
     *
     * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
     *     google/cloud/dialogflow/v2/audio_config.proto;l=223
     * @param value The bytes of the phraseHints to add.
     * @return This builder for chaining.
     */
    @java.lang.Deprecated
    public Builder addPhraseHintsBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      ensurePhraseHintsIsMutable();
      phraseHints_.add(value);
      onChanged();
      return this;
    }

    private java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> speechContexts_ =
        java.util.Collections.emptyList();

    private void ensureSpeechContextsIsMutable() {
      if (!((bitField0_ & 0x00000020) != 0)) {
        speechContexts_ =
            new java.util.ArrayList<com.google.cloud.dialogflow.v2.SpeechContext>(speechContexts_);
        bitField0_ |= 0x00000020;
      }
    }

    private com.google.protobuf.RepeatedFieldBuilderV3<
            com.google.cloud.dialogflow.v2.SpeechContext,
            com.google.cloud.dialogflow.v2.SpeechContext.Builder,
            com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
        speechContextsBuilder_;

    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> getSpeechContextsList() {
      if (speechContextsBuilder_ == null) {
        return java.util.Collections.unmodifiableList(speechContexts_);
      } else {
        return speechContextsBuilder_.getMessageList();
      }
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public int getSpeechContextsCount() {
      if (speechContextsBuilder_ == null) {
        return speechContexts_.size();
      } else {
        return speechContextsBuilder_.getCount();
      }
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public com.google.cloud.dialogflow.v2.SpeechContext getSpeechContexts(int index) {
      if (speechContextsBuilder_ == null) {
        return speechContexts_.get(index);
      } else {
        return speechContextsBuilder_.getMessage(index);
      }
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder setSpeechContexts(
        int index, com.google.cloud.dialogflow.v2.SpeechContext value) {
      if (speechContextsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureSpeechContextsIsMutable();
        speechContexts_.set(index, value);
        onChanged();
      } else {
        speechContextsBuilder_.setMessage(index, value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder setSpeechContexts(
        int index, com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.set(index, builderForValue.build());
        onChanged();
      } else {
        speechContextsBuilder_.setMessage(index, builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder addSpeechContexts(com.google.cloud.dialogflow.v2.SpeechContext value) {
      if (speechContextsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureSpeechContextsIsMutable();
        speechContexts_.add(value);
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder addSpeechContexts(
        int index, com.google.cloud.dialogflow.v2.SpeechContext value) {
      if (speechContextsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureSpeechContextsIsMutable();
        speechContexts_.add(index, value);
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(index, value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder addSpeechContexts(
        com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.add(builderForValue.build());
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder addSpeechContexts(
        int index, com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.add(index, builderForValue.build());
        onChanged();
      } else {
        speechContextsBuilder_.addMessage(index, builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder addAllSpeechContexts(
        java.lang.Iterable<? extends com.google.cloud.dialogflow.v2.SpeechContext> values) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_);
        onChanged();
      } else {
        speechContextsBuilder_.addAllMessages(values);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder clearSpeechContexts() {
      if (speechContextsBuilder_ == null) {
        speechContexts_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
      } else {
        speechContextsBuilder_.clear();
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public Builder removeSpeechContexts(int index) {
      if (speechContextsBuilder_ == null) {
        ensureSpeechContextsIsMutable();
        speechContexts_.remove(index);
        onChanged();
      } else {
        speechContextsBuilder_.remove(index);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public com.google.cloud.dialogflow.v2.SpeechContext.Builder getSpeechContextsBuilder(
        int index) {
      return getSpeechContextsFieldBuilder().getBuilder(index);
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public com.google.cloud.dialogflow.v2.SpeechContextOrBuilder getSpeechContextsOrBuilder(
        int index) {
      if (speechContextsBuilder_ == null) {
        return speechContexts_.get(index);
      } else {
        return speechContextsBuilder_.getMessageOrBuilder(index);
      }
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public java.util.List<? extends com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
        getSpeechContextsOrBuilderList() {
      if (speechContextsBuilder_ != null) {
        return speechContextsBuilder_.getMessageOrBuilderList();
      } else {
        return java.util.Collections.unmodifiableList(speechContexts_);
      }
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public com.google.cloud.dialogflow.v2.SpeechContext.Builder addSpeechContextsBuilder() {
      return getSpeechContextsFieldBuilder()
          .addBuilder(com.google.cloud.dialogflow.v2.SpeechContext.getDefaultInstance());
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public com.google.cloud.dialogflow.v2.SpeechContext.Builder addSpeechContextsBuilder(
        int index) {
      return getSpeechContextsFieldBuilder()
          .addBuilder(index, com.google.cloud.dialogflow.v2.SpeechContext.getDefaultInstance());
    }
    /**
     *
     *
     * <pre>
     * Context information to assist speech recognition.
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * </pre>
     *
     * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
     */
    public java.util.List<com.google.cloud.dialogflow.v2.SpeechContext.Builder>
        getSpeechContextsBuilderList() {
      return getSpeechContextsFieldBuilder().getBuilderList();
    }

    private com.google.protobuf.RepeatedFieldBuilderV3<
            com.google.cloud.dialogflow.v2.SpeechContext,
            com.google.cloud.dialogflow.v2.SpeechContext.Builder,
            com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
        getSpeechContextsFieldBuilder() {
      if (speechContextsBuilder_ == null) {
        speechContextsBuilder_ =
            new com.google.protobuf.RepeatedFieldBuilderV3<
                com.google.cloud.dialogflow.v2.SpeechContext,
                com.google.cloud.dialogflow.v2.SpeechContext.Builder,
                com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>(
                speechContexts_,
                ((bitField0_ & 0x00000020) != 0),
                getParentForChildren(),
                isClean());
        speechContexts_ = null;
      }
      return speechContextsBuilder_;
    }

    private java.lang.Object model_ = "";
    /**
     *
     *
     * <pre>
     * Which Speech model to select for the given request. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the InputAudioConfig.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search (best for very short utterances and commands)
     * </pre>
     *
     * <code>string model = 7;</code>
     *
     * @return The model.
     */
    public java.lang.String getModel() {
      java.lang.Object ref = model_;
      if (!(ref instanceof java.lang.String)) {
        com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        model_ = s;
        return s;
      } else {
        return (java.lang.String) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Which Speech model to select for the given request. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the InputAudioConfig.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search (best for very short utterances and commands)
     * </pre>
     *
     * <code>string model = 7;</code>
     *
     * @return The bytes for model.
     */
    public com.google.protobuf.ByteString getModelBytes() {
      java.lang.Object ref = model_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b =
            com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
        model_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * Which Speech model to select for the given request. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the InputAudioConfig.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search (best for very short utterances and commands)
     * </pre>
     *
     * <code>string model = 7;</code>
     *
     * @param value The model to set.
     * @return This builder for chaining.
     */
    public Builder setModel(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      model_ = value;
      bitField0_ |= 0x00000040;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Which Speech model to select for the given request. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the InputAudioConfig.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search (best for very short utterances and commands)
     * </pre>
     *
     * <code>string model = 7;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearModel() {
      model_ = getDefaultInstance().getModel();
      bitField0_ = (bitField0_ & ~0x00000040);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Which Speech model to select for the given request. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the InputAudioConfig.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search (best for very short utterances and commands)
     * </pre>
     *
     * <code>string model = 7;</code>
     *
     * @param value The bytes for model to set.
     * @return This builder for chaining.
     */
    public Builder setModelBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      model_ = value;
      bitField0_ |= 0x00000040;
      onChanged();
      return this;
    }

    private int modelVariant_ = 0;
    /**
     *
     *
     * <pre>
     * Which variant of the [Speech
     * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
     *
     * @return The enum numeric value on the wire for modelVariant.
     */
    @java.lang.Override
    public int getModelVariantValue() {
      return modelVariant_;
    }
    /**
     *
     *
     * <pre>
     * Which variant of the [Speech
     * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
     *
     * @param value The enum numeric value on the wire for modelVariant to set.
     * @return This builder for chaining.
     */
    public Builder setModelVariantValue(int value) {
      modelVariant_ = value;
      bitField0_ |= 0x00000080;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Which variant of the [Speech
     * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
     *
     * @return The modelVariant.
     */
    @java.lang.Override
    public com.google.cloud.dialogflow.v2.SpeechModelVariant getModelVariant() {
      com.google.cloud.dialogflow.v2.SpeechModelVariant result =
          com.google.cloud.dialogflow.v2.SpeechModelVariant.forNumber(modelVariant_);
      return result == null
          ? com.google.cloud.dialogflow.v2.SpeechModelVariant.UNRECOGNIZED
          : result;
    }
    /**
     *
     *
     * <pre>
     * Which variant of the [Speech
     * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
     *
     * @param value The modelVariant to set.
     * @return This builder for chaining.
     */
    public Builder setModelVariant(com.google.cloud.dialogflow.v2.SpeechModelVariant value) {
      if (value == null) {
        throw new NullPointerException();
      }
      bitField0_ |= 0x00000080;
      modelVariant_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Which variant of the [Speech
     * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
     * </pre>
     *
     * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearModelVariant() {
      bitField0_ = (bitField0_ & ~0x00000080);
      modelVariant_ = 0;
      onChanged();
      return this;
    }

    private boolean singleUtterance_;
    /**
     *
     *
     * <pre>
     * If `false` (default), recognition does not cease until the
     * client closes the stream.
     * If `true`, the recognizer will detect a single spoken utterance in input
     * audio. Recognition ceases when it detects the audio's voice has
     * stopped or paused. In this case, once a detected intent is received, the
     * client should close the stream and start a new request with a new stream as
     * needed.
     * Note: This setting is relevant only for streaming methods.
     * Note: When specified, InputAudioConfig.single_utterance takes precedence
     * over StreamingDetectIntentRequest.single_utterance.
     * </pre>
     *
     * <code>bool single_utterance = 8;</code>
     *
     * @return The singleUtterance.
     */
    @java.lang.Override
    public boolean getSingleUtterance() {
      return singleUtterance_;
    }
    /**
     *
     *
     * <pre>
     * If `false` (default), recognition does not cease until the
     * client closes the stream.
     * If `true`, the recognizer will detect a single spoken utterance in input
     * audio. Recognition ceases when it detects the audio's voice has
     * stopped or paused. In this case, once a detected intent is received, the
     * client should close the stream and start a new request with a new stream as
     * needed.
     * Note: This setting is relevant only for streaming methods.
     * Note: When specified, InputAudioConfig.single_utterance takes precedence
     * over StreamingDetectIntentRequest.single_utterance.
     * </pre>
     *
     * <code>bool single_utterance = 8;</code>
     *
     * @param value The singleUtterance to set.
     * @return This builder for chaining.
     */
    public Builder setSingleUtterance(boolean value) {

      singleUtterance_ = value;
      bitField0_ |= 0x00000100;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * If `false` (default), recognition does not cease until the
     * client closes the stream.
     * If `true`, the recognizer will detect a single spoken utterance in input
     * audio. Recognition ceases when it detects the audio's voice has
     * stopped or paused. In this case, once a detected intent is received, the
     * client should close the stream and start a new request with a new stream as
     * needed.
     * Note: This setting is relevant only for streaming methods.
     * Note: When specified, InputAudioConfig.single_utterance takes precedence
     * over StreamingDetectIntentRequest.single_utterance.
     * </pre>
     *
     * <code>bool single_utterance = 8;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearSingleUtterance() {
      bitField0_ = (bitField0_ & ~0x00000100);
      singleUtterance_ = false;
      onChanged();
      return this;
    }

    private boolean disableNoSpeechRecognizedEvent_;
    /**
     *
     *
     * <pre>
     * Only used in
     * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
     * and
     * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
     * If `false` and recognition doesn't return any result, trigger
     * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
     * </pre>
     *
     * <code>bool disable_no_speech_recognized_event = 14;</code>
     *
     * @return The disableNoSpeechRecognizedEvent.
     */
    @java.lang.Override
    public boolean getDisableNoSpeechRecognizedEvent() {
      return disableNoSpeechRecognizedEvent_;
    }
    /**
     *
     *
     * <pre>
     * Only used in
     * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
     * and
     * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
     * If `false` and recognition doesn't return any result, trigger
     * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
     * </pre>
     *
     * <code>bool disable_no_speech_recognized_event = 14;</code>
     *
     * @param value The disableNoSpeechRecognizedEvent to set.
     * @return This builder for chaining.
     */
    public Builder setDisableNoSpeechRecognizedEvent(boolean value) {

      disableNoSpeechRecognizedEvent_ = value;
      bitField0_ |= 0x00000200;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Only used in
     * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
     * and
     * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
     * If `false` and recognition doesn't return any result, trigger
     * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
     * </pre>
     *
     * <code>bool disable_no_speech_recognized_event = 14;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearDisableNoSpeechRecognizedEvent() {
      bitField0_ = (bitField0_ & ~0x00000200);
      disableNoSpeechRecognizedEvent_ = false;
      onChanged();
      return this;
    }

    private boolean enableAutomaticPunctuation_;
    /**
     *
     *
     * <pre>
     * Enable automatic punctuation option at the speech backend.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 17;</code>
     *
     * @return The enableAutomaticPunctuation.
     */
    @java.lang.Override
    public boolean getEnableAutomaticPunctuation() {
      return enableAutomaticPunctuation_;
    }
    /**
     *
     *
     * <pre>
     * Enable automatic punctuation option at the speech backend.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 17;</code>
     *
     * @param value The enableAutomaticPunctuation to set.
     * @return This builder for chaining.
     */
    public Builder setEnableAutomaticPunctuation(boolean value) {

      enableAutomaticPunctuation_ = value;
      bitField0_ |= 0x00000400;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Enable automatic punctuation option at the speech backend.
     * </pre>
     *
     * <code>bool enable_automatic_punctuation = 17;</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEnableAutomaticPunctuation() {
      bitField0_ = (bitField0_ & ~0x00000400);
      enableAutomaticPunctuation_ = false;
      onChanged();
      return this;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.dialogflow.v2.InputAudioConfig)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.InputAudioConfig)
  private static final com.google.cloud.dialogflow.v2.InputAudioConfig DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.dialogflow.v2.InputAudioConfig();
  }

  public static com.google.cloud.dialogflow.v2.InputAudioConfig getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<InputAudioConfig> PARSER =
      new com.google.protobuf.AbstractParser<InputAudioConfig>() {
        @java.lang.Override
        public InputAudioConfig parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<InputAudioConfig> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<InputAudioConfig> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.dialogflow.v2.InputAudioConfig getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
