/*
 * Copyright 2019 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v1beta1/cloud_speech.proto

package com.google.cloud.speech.v1beta1;

/**
 *
 *
 * <pre>
 * Provides information to the recognizer that specifies how to process the
 * request.
 * </pre>
 *
 * Protobuf type {@code google.cloud.speech.v1beta1.RecognitionConfig}
 */
public final class RecognitionConfig extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.speech.v1beta1.RecognitionConfig)
    RecognitionConfigOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use RecognitionConfig.newBuilder() to construct.
  private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private RecognitionConfig() {
    encoding_ = 0;
    sampleRate_ = 0;
    languageCode_ = "";
    maxAlternatives_ = 0;
    profanityFilter_ = false;
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  private RecognitionConfig(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    this();
    if (extensionRegistry == null) {
      throw new java.lang.NullPointerException();
    }
    int mutable_bitField0_ = 0;
    com.google.protobuf.UnknownFieldSet.Builder unknownFields =
        com.google.protobuf.UnknownFieldSet.newBuilder();
    try {
      boolean done = false;
      while (!done) {
        int tag = input.readTag();
        switch (tag) {
          case 0:
            done = true;
            break;
          default:
            {
              if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) {
                done = true;
              }
              break;
            }
          case 8:
            {
              int rawValue = input.readEnum();

              encoding_ = rawValue;
              break;
            }
          case 16:
            {
              sampleRate_ = input.readInt32();
              break;
            }
          case 26:
            {
              java.lang.String s = input.readStringRequireUtf8();

              languageCode_ = s;
              break;
            }
          case 32:
            {
              maxAlternatives_ = input.readInt32();
              break;
            }
          case 40:
            {
              profanityFilter_ = input.readBool();
              break;
            }
          case 50:
            {
              com.google.cloud.speech.v1beta1.SpeechContext.Builder subBuilder = null;
              if (speechContext_ != null) {
                subBuilder = speechContext_.toBuilder();
              }
              speechContext_ =
                  input.readMessage(
                      com.google.cloud.speech.v1beta1.SpeechContext.parser(), extensionRegistry);
              if (subBuilder != null) {
                subBuilder.mergeFrom(speechContext_);
                speechContext_ = subBuilder.buildPartial();
              }

              break;
            }
        }
      }
    } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      throw e.setUnfinishedMessage(this);
    } catch (java.io.IOException e) {
      throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
    } finally {
      this.unknownFields = unknownFields.build();
      makeExtensionsImmutable();
    }
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.speech.v1beta1.SpeechProto
        .internal_static_google_cloud_speech_v1beta1_RecognitionConfig_descriptor;
  }

  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.speech.v1beta1.SpeechProto
        .internal_static_google_cloud_speech_v1beta1_RecognitionConfig_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.speech.v1beta1.RecognitionConfig.class,
            com.google.cloud.speech.v1beta1.RecognitionConfig.Builder.class);
  }

  /**
   *
   *
   * <pre>
   * Audio encoding of the data sent in the audio message. All encodings support
   * only 1 channel (mono) audio. Only `FLAC` includes a header that describes
   * the bytes of audio that follow the header. The other encodings are raw
   * audio bytes with no header.
   * For best results, the audio source should be captured and transmitted using
   * a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
   * reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
   * or transmit the audio, particularly if background noise is present.
   * </pre>
   *
   * Protobuf enum {@code google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding}
   */
  public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum {
    /**
     *
     *
     * <pre>
     * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
     * </pre>
     *
     * <code>ENCODING_UNSPECIFIED = 0;</code>
     */
    ENCODING_UNSPECIFIED(0),
    /**
     *
     *
     * <pre>
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * This is the only encoding that may be used by `AsyncRecognize`.
     * </pre>
     *
     * <code>LINEAR16 = 1;</code>
     */
    LINEAR16(1),
    /**
     *
     *
     * <pre>
     * This is the recommended encoding for `SyncRecognize` and
     * `StreamingRecognize` because it uses lossless compression; therefore
     * recognition accuracy is not compromised by a lossy codec.
     * The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
     * http://flac.sourceforge.net/documentation.html.
     * 16-bit and 24-bit samples are supported.
     * Not all fields in STREAMINFO are supported.
     * </pre>
     *
     * <code>FLAC = 2;</code>
     */
    FLAC(2),
    /**
     *
     *
     * <pre>
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * </pre>
     *
     * <code>MULAW = 3;</code>
     */
    MULAW(3),
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.
     * </pre>
     *
     * <code>AMR = 4;</code>
     */
    AMR(4),
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz.
     * </pre>
     *
     * <code>AMR_WB = 5;</code>
     */
    AMR_WB(5),
    UNRECOGNIZED(-1),
    ;

    /**
     *
     *
     * <pre>
     * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
     * </pre>
     *
     * <code>ENCODING_UNSPECIFIED = 0;</code>
     */
    public static final int ENCODING_UNSPECIFIED_VALUE = 0;
    /**
     *
     *
     * <pre>
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * This is the only encoding that may be used by `AsyncRecognize`.
     * </pre>
     *
     * <code>LINEAR16 = 1;</code>
     */
    public static final int LINEAR16_VALUE = 1;
    /**
     *
     *
     * <pre>
     * This is the recommended encoding for `SyncRecognize` and
     * `StreamingRecognize` because it uses lossless compression; therefore
     * recognition accuracy is not compromised by a lossy codec.
     * The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
     * http://flac.sourceforge.net/documentation.html.
     * 16-bit and 24-bit samples are supported.
     * Not all fields in STREAMINFO are supported.
     * </pre>
     *
     * <code>FLAC = 2;</code>
     */
    public static final int FLAC_VALUE = 2;
    /**
     *
     *
     * <pre>
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * </pre>
     *
     * <code>MULAW = 3;</code>
     */
    public static final int MULAW_VALUE = 3;
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.
     * </pre>
     *
     * <code>AMR = 4;</code>
     */
    public static final int AMR_VALUE = 4;
    /**
     *
     *
     * <pre>
     * Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz.
     * </pre>
     *
     * <code>AMR_WB = 5;</code>
     */
    public static final int AMR_WB_VALUE = 5;

    public final int getNumber() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalArgumentException(
            "Can't get the number of an unknown enum value.");
      }
      return value;
    }

    /** @deprecated Use {@link #forNumber(int)} instead. */
    @java.lang.Deprecated
    public static AudioEncoding valueOf(int value) {
      return forNumber(value);
    }

    public static AudioEncoding forNumber(int value) {
      switch (value) {
        case 0:
          return ENCODING_UNSPECIFIED;
        case 1:
          return LINEAR16;
        case 2:
          return FLAC;
        case 3:
          return MULAW;
        case 4:
          return AMR;
        case 5:
          return AMR_WB;
        default:
          return null;
      }
    }

    public static com.google.protobuf.Internal.EnumLiteMap<AudioEncoding> internalGetValueMap() {
      return internalValueMap;
    }

    private static final com.google.protobuf.Internal.EnumLiteMap<AudioEncoding> internalValueMap =
        new com.google.protobuf.Internal.EnumLiteMap<AudioEncoding>() {
          public AudioEncoding findValueByNumber(int number) {
            return AudioEncoding.forNumber(number);
          }
        };

    public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }

    public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
      return getDescriptor();
    }

    public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
      return com.google.cloud.speech.v1beta1.RecognitionConfig.getDescriptor()
          .getEnumTypes()
          .get(0);
    }

    private static final AudioEncoding[] VALUES = values();

    public static AudioEncoding valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
      }
      if (desc.getIndex() == -1) {
        return UNRECOGNIZED;
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private AudioEncoding(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding)
  }

  public static final int ENCODING_FIELD_NUMBER = 1;
  private int encoding_;
  /**
   *
   *
   * <pre>
   * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
   * </pre>
   *
   * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
   */
  public int getEncodingValue() {
    return encoding_;
  }
  /**
   *
   *
   * <pre>
   * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
   * </pre>
   *
   * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
   */
  public com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding getEncoding() {
    com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding result =
        com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding.valueOf(encoding_);
    return result == null
        ? com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding.UNRECOGNIZED
        : result;
  }

  public static final int SAMPLE_RATE_FIELD_NUMBER = 2;
  private int sampleRate_;
  /**
   *
   *
   * <pre>
   * *Required* Sample rate in Hertz of the audio data sent in all
   * `RecognitionAudio` messages. Valid values are: 8000-48000.
   * 16000 is optimal. For best results, set the sampling rate of the audio
   * source to 16000 Hz. If that's not possible, use the native sample rate of
   * the audio source (instead of re-sampling).
   * </pre>
   *
   * <code>int32 sample_rate = 2;</code>
   */
  public int getSampleRate() {
    return sampleRate_;
  }

  public static final int LANGUAGE_CODE_FIELD_NUMBER = 3;
  private volatile java.lang.Object languageCode_;
  /**
   *
   *
   * <pre>
   * *Optional* The language of the supplied audio as a BCP-47 language tag.
   * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
   * If omitted, defaults to "en-US". See
   * [Language Support](https://cloud.google.com/speech/docs/languages)
   * for a list of the currently supported language codes.
   * </pre>
   *
   * <code>string language_code = 3;</code>
   */
  public java.lang.String getLanguageCode() {
    java.lang.Object ref = languageCode_;
    if (ref instanceof java.lang.String) {
      return (java.lang.String) ref;
    } else {
      com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
      java.lang.String s = bs.toStringUtf8();
      languageCode_ = s;
      return s;
    }
  }
  /**
   *
   *
   * <pre>
   * *Optional* The language of the supplied audio as a BCP-47 language tag.
   * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
   * If omitted, defaults to "en-US". See
   * [Language Support](https://cloud.google.com/speech/docs/languages)
   * for a list of the currently supported language codes.
   * </pre>
   *
   * <code>string language_code = 3;</code>
   */
  public com.google.protobuf.ByteString getLanguageCodeBytes() {
    java.lang.Object ref = languageCode_;
    if (ref instanceof java.lang.String) {
      com.google.protobuf.ByteString b =
          com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
      languageCode_ = b;
      return b;
    } else {
      return (com.google.protobuf.ByteString) ref;
    }
  }

  public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 4;
  private int maxAlternatives_;
  /**
   *
   *
   * <pre>
   * *Optional* Maximum number of recognition hypotheses to be returned.
   * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   * within each `SpeechRecognitionResult`.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * </pre>
   *
   * <code>int32 max_alternatives = 4;</code>
   */
  public int getMaxAlternatives() {
    return maxAlternatives_;
  }

  public static final int PROFANITY_FILTER_FIELD_NUMBER = 5;
  private boolean profanityFilter_;
  /**
   *
   *
   * <pre>
   * *Optional* If set to `true`, the server will attempt to filter out
   * profanities, replacing all but the initial character in each filtered word
   * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * </pre>
   *
   * <code>bool profanity_filter = 5;</code>
   */
  public boolean getProfanityFilter() {
    return profanityFilter_;
  }

  public static final int SPEECH_CONTEXT_FIELD_NUMBER = 6;
  private com.google.cloud.speech.v1beta1.SpeechContext speechContext_;
  /**
   *
   *
   * <pre>
   * *Optional* A means to provide context to assist the speech recognition.
   * </pre>
   *
   * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
   */
  public boolean hasSpeechContext() {
    return speechContext_ != null;
  }
  /**
   *
   *
   * <pre>
   * *Optional* A means to provide context to assist the speech recognition.
   * </pre>
   *
   * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
   */
  public com.google.cloud.speech.v1beta1.SpeechContext getSpeechContext() {
    return speechContext_ == null
        ? com.google.cloud.speech.v1beta1.SpeechContext.getDefaultInstance()
        : speechContext_;
  }
  /**
   *
   *
   * <pre>
   * *Optional* A means to provide context to assist the speech recognition.
   * </pre>
   *
   * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
   */
  public com.google.cloud.speech.v1beta1.SpeechContextOrBuilder getSpeechContextOrBuilder() {
    return getSpeechContext();
  }

  private byte memoizedIsInitialized = -1;

  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (encoding_
        != com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
            .getNumber()) {
      output.writeEnum(1, encoding_);
    }
    if (sampleRate_ != 0) {
      output.writeInt32(2, sampleRate_);
    }
    if (!getLanguageCodeBytes().isEmpty()) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_);
    }
    if (maxAlternatives_ != 0) {
      output.writeInt32(4, maxAlternatives_);
    }
    if (profanityFilter_ != false) {
      output.writeBool(5, profanityFilter_);
    }
    if (speechContext_ != null) {
      output.writeMessage(6, getSpeechContext());
    }
    unknownFields.writeTo(output);
  }

  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (encoding_
        != com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
            .getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encoding_);
    }
    if (sampleRate_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRate_);
    }
    if (!getLanguageCodeBytes().isEmpty()) {
      size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_);
    }
    if (maxAlternatives_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, maxAlternatives_);
    }
    if (profanityFilter_ != false) {
      size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, profanityFilter_);
    }
    if (speechContext_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getSpeechContext());
    }
    size += unknownFields.getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.speech.v1beta1.RecognitionConfig)) {
      return super.equals(obj);
    }
    com.google.cloud.speech.v1beta1.RecognitionConfig other =
        (com.google.cloud.speech.v1beta1.RecognitionConfig) obj;

    boolean result = true;
    result = result && encoding_ == other.encoding_;
    result = result && (getSampleRate() == other.getSampleRate());
    result = result && getLanguageCode().equals(other.getLanguageCode());
    result = result && (getMaxAlternatives() == other.getMaxAlternatives());
    result = result && (getProfanityFilter() == other.getProfanityFilter());
    result = result && (hasSpeechContext() == other.hasSpeechContext());
    if (hasSpeechContext()) {
      result = result && getSpeechContext().equals(other.getSpeechContext());
    }
    result = result && unknownFields.equals(other.unknownFields);
    return result;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    hash = (37 * hash) + ENCODING_FIELD_NUMBER;
    hash = (53 * hash) + encoding_;
    hash = (37 * hash) + SAMPLE_RATE_FIELD_NUMBER;
    hash = (53 * hash) + getSampleRate();
    hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
    hash = (53 * hash) + getLanguageCode().hashCode();
    hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER;
    hash = (53 * hash) + getMaxAlternatives();
    hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER;
    hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter());
    if (hasSpeechContext()) {
      hash = (37 * hash) + SPEECH_CONTEXT_FIELD_NUMBER;
      hash = (53 * hash) + getSpeechContext().hashCode();
    }
    hash = (29 * hash) + unknownFields.hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(com.google.cloud.speech.v1beta1.RecognitionConfig prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Provides information to the recognizer that specifies how to process the
   * request.
   * </pre>
   *
   * Protobuf type {@code google.cloud.speech.v1beta1.RecognitionConfig}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.speech.v1beta1.RecognitionConfig)
      com.google.cloud.speech.v1beta1.RecognitionConfigOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.speech.v1beta1.SpeechProto
          .internal_static_google_cloud_speech_v1beta1_RecognitionConfig_descriptor;
    }

    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.speech.v1beta1.SpeechProto
          .internal_static_google_cloud_speech_v1beta1_RecognitionConfig_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.speech.v1beta1.RecognitionConfig.class,
              com.google.cloud.speech.v1beta1.RecognitionConfig.Builder.class);
    }

    // Construct using com.google.cloud.speech.v1beta1.RecognitionConfig.newBuilder()
    private Builder() {
      maybeForceBuilderInitialization();
    }

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
      maybeForceBuilderInitialization();
    }

    private void maybeForceBuilderInitialization() {
      if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
    }

    public Builder clear() {
      super.clear();
      encoding_ = 0;

      sampleRate_ = 0;

      languageCode_ = "";

      maxAlternatives_ = 0;

      profanityFilter_ = false;

      if (speechContextBuilder_ == null) {
        speechContext_ = null;
      } else {
        speechContext_ = null;
        speechContextBuilder_ = null;
      }
      return this;
    }

    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.speech.v1beta1.SpeechProto
          .internal_static_google_cloud_speech_v1beta1_RecognitionConfig_descriptor;
    }

    public com.google.cloud.speech.v1beta1.RecognitionConfig getDefaultInstanceForType() {
      return com.google.cloud.speech.v1beta1.RecognitionConfig.getDefaultInstance();
    }

    public com.google.cloud.speech.v1beta1.RecognitionConfig build() {
      com.google.cloud.speech.v1beta1.RecognitionConfig result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    public com.google.cloud.speech.v1beta1.RecognitionConfig buildPartial() {
      com.google.cloud.speech.v1beta1.RecognitionConfig result =
          new com.google.cloud.speech.v1beta1.RecognitionConfig(this);
      result.encoding_ = encoding_;
      result.sampleRate_ = sampleRate_;
      result.languageCode_ = languageCode_;
      result.maxAlternatives_ = maxAlternatives_;
      result.profanityFilter_ = profanityFilter_;
      if (speechContextBuilder_ == null) {
        result.speechContext_ = speechContext_;
      } else {
        result.speechContext_ = speechContextBuilder_.build();
      }
      onBuilt();
      return result;
    }

    public Builder clone() {
      return (Builder) super.clone();
    }

    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return (Builder) super.setField(field, value);
    }

    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return (Builder) super.clearField(field);
    }

    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return (Builder) super.clearOneof(oneof);
    }

    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return (Builder) super.setRepeatedField(field, index, value);
    }

    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return (Builder) super.addRepeatedField(field, value);
    }

    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.speech.v1beta1.RecognitionConfig) {
        return mergeFrom((com.google.cloud.speech.v1beta1.RecognitionConfig) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.speech.v1beta1.RecognitionConfig other) {
      if (other == com.google.cloud.speech.v1beta1.RecognitionConfig.getDefaultInstance())
        return this;
      if (other.encoding_ != 0) {
        setEncodingValue(other.getEncodingValue());
      }
      if (other.getSampleRate() != 0) {
        setSampleRate(other.getSampleRate());
      }
      if (!other.getLanguageCode().isEmpty()) {
        languageCode_ = other.languageCode_;
        onChanged();
      }
      if (other.getMaxAlternatives() != 0) {
        setMaxAlternatives(other.getMaxAlternatives());
      }
      if (other.getProfanityFilter() != false) {
        setProfanityFilter(other.getProfanityFilter());
      }
      if (other.hasSpeechContext()) {
        mergeSpeechContext(other.getSpeechContext());
      }
      this.mergeUnknownFields(other.unknownFields);
      onChanged();
      return this;
    }

    public final boolean isInitialized() {
      return true;
    }

    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      com.google.cloud.speech.v1beta1.RecognitionConfig parsedMessage = null;
      try {
        parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        parsedMessage =
            (com.google.cloud.speech.v1beta1.RecognitionConfig) e.getUnfinishedMessage();
        throw e.unwrapIOException();
      } finally {
        if (parsedMessage != null) {
          mergeFrom(parsedMessage);
        }
      }
      return this;
    }

    private int encoding_ = 0;
    /**
     *
     *
     * <pre>
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     */
    public int getEncodingValue() {
      return encoding_;
    }
    /**
     *
     *
     * <pre>
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     */
    public Builder setEncodingValue(int value) {
      encoding_ = value;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     */
    public com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding getEncoding() {
      com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding result =
          com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding.valueOf(encoding_);
      return result == null
          ? com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding.UNRECOGNIZED
          : result;
    }
    /**
     *
     *
     * <pre>
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     */
    public Builder setEncoding(
        com.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding value) {
      if (value == null) {
        throw new NullPointerException();
      }

      encoding_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.RecognitionConfig.AudioEncoding encoding = 1;</code>
     */
    public Builder clearEncoding() {

      encoding_ = 0;
      onChanged();
      return this;
    }

    private int sampleRate_;
    /**
     *
     *
     * <pre>
     * *Required* Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * </pre>
     *
     * <code>int32 sample_rate = 2;</code>
     */
    public int getSampleRate() {
      return sampleRate_;
    }
    /**
     *
     *
     * <pre>
     * *Required* Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * </pre>
     *
     * <code>int32 sample_rate = 2;</code>
     */
    public Builder setSampleRate(int value) {

      sampleRate_ = value;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Required* Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * </pre>
     *
     * <code>int32 sample_rate = 2;</code>
     */
    public Builder clearSampleRate() {

      sampleRate_ = 0;
      onChanged();
      return this;
    }

    private java.lang.Object languageCode_ = "";
    /**
     *
     *
     * <pre>
     * *Optional* The language of the supplied audio as a BCP-47 language tag.
     * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
     * If omitted, defaults to "en-US". See
     * [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     */
    public java.lang.String getLanguageCode() {
      java.lang.Object ref = languageCode_;
      if (!(ref instanceof java.lang.String)) {
        com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        languageCode_ = s;
        return s;
      } else {
        return (java.lang.String) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * *Optional* The language of the supplied audio as a BCP-47 language tag.
     * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
     * If omitted, defaults to "en-US". See
     * [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     */
    public com.google.protobuf.ByteString getLanguageCodeBytes() {
      java.lang.Object ref = languageCode_;
      if (ref instanceof String) {
        com.google.protobuf.ByteString b =
            com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
        languageCode_ = b;
        return b;
      } else {
        return (com.google.protobuf.ByteString) ref;
      }
    }
    /**
     *
     *
     * <pre>
     * *Optional* The language of the supplied audio as a BCP-47 language tag.
     * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
     * If omitted, defaults to "en-US". See
     * [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     */
    public Builder setLanguageCode(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }

      languageCode_ = value;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* The language of the supplied audio as a BCP-47 language tag.
     * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
     * If omitted, defaults to "en-US". See
     * [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     */
    public Builder clearLanguageCode() {

      languageCode_ = getDefaultInstance().getLanguageCode();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* The language of the supplied audio as a BCP-47 language tag.
     * Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
     * If omitted, defaults to "en-US". See
     * [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * </pre>
     *
     * <code>string language_code = 3;</code>
     */
    public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);

      languageCode_ = value;
      onChanged();
      return this;
    }

    private int maxAlternatives_;
    /**
     *
     *
     * <pre>
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 4;</code>
     */
    public int getMaxAlternatives() {
      return maxAlternatives_;
    }
    /**
     *
     *
     * <pre>
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 4;</code>
     */
    public Builder setMaxAlternatives(int value) {

      maxAlternatives_ = value;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * </pre>
     *
     * <code>int32 max_alternatives = 4;</code>
     */
    public Builder clearMaxAlternatives() {

      maxAlternatives_ = 0;
      onChanged();
      return this;
    }

    private boolean profanityFilter_;
    /**
     *
     *
     * <pre>
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 5;</code>
     */
    public boolean getProfanityFilter() {
      return profanityFilter_;
    }
    /**
     *
     *
     * <pre>
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 5;</code>
     */
    public Builder setProfanityFilter(boolean value) {

      profanityFilter_ = value;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * </pre>
     *
     * <code>bool profanity_filter = 5;</code>
     */
    public Builder clearProfanityFilter() {

      profanityFilter_ = false;
      onChanged();
      return this;
    }

    private com.google.cloud.speech.v1beta1.SpeechContext speechContext_ = null;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1beta1.SpeechContext,
            com.google.cloud.speech.v1beta1.SpeechContext.Builder,
            com.google.cloud.speech.v1beta1.SpeechContextOrBuilder>
        speechContextBuilder_;
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public boolean hasSpeechContext() {
      return speechContextBuilder_ != null || speechContext_ != null;
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public com.google.cloud.speech.v1beta1.SpeechContext getSpeechContext() {
      if (speechContextBuilder_ == null) {
        return speechContext_ == null
            ? com.google.cloud.speech.v1beta1.SpeechContext.getDefaultInstance()
            : speechContext_;
      } else {
        return speechContextBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public Builder setSpeechContext(com.google.cloud.speech.v1beta1.SpeechContext value) {
      if (speechContextBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        speechContext_ = value;
        onChanged();
      } else {
        speechContextBuilder_.setMessage(value);
      }

      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public Builder setSpeechContext(
        com.google.cloud.speech.v1beta1.SpeechContext.Builder builderForValue) {
      if (speechContextBuilder_ == null) {
        speechContext_ = builderForValue.build();
        onChanged();
      } else {
        speechContextBuilder_.setMessage(builderForValue.build());
      }

      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public Builder mergeSpeechContext(com.google.cloud.speech.v1beta1.SpeechContext value) {
      if (speechContextBuilder_ == null) {
        if (speechContext_ != null) {
          speechContext_ =
              com.google.cloud.speech.v1beta1.SpeechContext.newBuilder(speechContext_)
                  .mergeFrom(value)
                  .buildPartial();
        } else {
          speechContext_ = value;
        }
        onChanged();
      } else {
        speechContextBuilder_.mergeFrom(value);
      }

      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public Builder clearSpeechContext() {
      if (speechContextBuilder_ == null) {
        speechContext_ = null;
        onChanged();
      } else {
        speechContext_ = null;
        speechContextBuilder_ = null;
      }

      return this;
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public com.google.cloud.speech.v1beta1.SpeechContext.Builder getSpeechContextBuilder() {

      onChanged();
      return getSpeechContextFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    public com.google.cloud.speech.v1beta1.SpeechContextOrBuilder getSpeechContextOrBuilder() {
      if (speechContextBuilder_ != null) {
        return speechContextBuilder_.getMessageOrBuilder();
      } else {
        return speechContext_ == null
            ? com.google.cloud.speech.v1beta1.SpeechContext.getDefaultInstance()
            : speechContext_;
      }
    }
    /**
     *
     *
     * <pre>
     * *Optional* A means to provide context to assist the speech recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v1beta1.SpeechContext speech_context = 6;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v1beta1.SpeechContext,
            com.google.cloud.speech.v1beta1.SpeechContext.Builder,
            com.google.cloud.speech.v1beta1.SpeechContextOrBuilder>
        getSpeechContextFieldBuilder() {
      if (speechContextBuilder_ == null) {
        speechContextBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v1beta1.SpeechContext,
                com.google.cloud.speech.v1beta1.SpeechContext.Builder,
                com.google.cloud.speech.v1beta1.SpeechContextOrBuilder>(
                getSpeechContext(), getParentForChildren(), isClean());
        speechContext_ = null;
      }
      return speechContextBuilder_;
    }

    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFieldsProto3(unknownFields);
    }

    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1beta1.RecognitionConfig)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.speech.v1beta1.RecognitionConfig)
  private static final com.google.cloud.speech.v1beta1.RecognitionConfig DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.speech.v1beta1.RecognitionConfig();
  }

  public static com.google.cloud.speech.v1beta1.RecognitionConfig getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<RecognitionConfig> PARSER =
      new com.google.protobuf.AbstractParser<RecognitionConfig>() {
        public RecognitionConfig parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          return new RecognitionConfig(input, extensionRegistry);
        }
      };

  public static com.google.protobuf.Parser<RecognitionConfig> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<RecognitionConfig> getParserForType() {
    return PARSER;
  }

  public com.google.cloud.speech.v1beta1.RecognitionConfig getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
