/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/texttospeech/v1beta1/cloud_tts.proto

package com.google.cloud.texttospeech.v1beta1;

/**
 *
 *
 * <pre>
 * Description of audio data to be synthesized.
 * </pre>
 *
 * Protobuf type {@code google.cloud.texttospeech.v1beta1.AudioConfig}
 */
public final class AudioConfig extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1beta1.AudioConfig)
    AudioConfigOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use AudioConfig.newBuilder() to construct.
  private AudioConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private AudioConfig() {
    audioEncoding_ = 0;
    effectsProfileId_ = com.google.protobuf.LazyStringArrayList.EMPTY;
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new AudioConfig();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
        .internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
        .internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.texttospeech.v1beta1.AudioConfig.class,
            com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder.class);
  }

  public static final int AUDIO_ENCODING_FIELD_NUMBER = 1;
  private int audioEncoding_ = 0;
  /**
   *
   *
   * <pre>
   * Required. The format of the audio byte stream.
   * </pre>
   *
   * <code>
   * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
   * </code>
   *
   * @return The enum numeric value on the wire for audioEncoding.
   */
  @java.lang.Override
  public int getAudioEncodingValue() {
    return audioEncoding_;
  }
  /**
   *
   *
   * <pre>
   * Required. The format of the audio byte stream.
   * </pre>
   *
   * <code>
   * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
   * </code>
   *
   * @return The audioEncoding.
   */
  @java.lang.Override
  public com.google.cloud.texttospeech.v1beta1.AudioEncoding getAudioEncoding() {
    com.google.cloud.texttospeech.v1beta1.AudioEncoding result =
        com.google.cloud.texttospeech.v1beta1.AudioEncoding.forNumber(audioEncoding_);
    return result == null
        ? com.google.cloud.texttospeech.v1beta1.AudioEncoding.UNRECOGNIZED
        : result;
  }

  public static final int SPEAKING_RATE_FIELD_NUMBER = 2;
  private double speakingRate_ = 0D;
  /**
   *
   *
   * <pre>
   * Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is
   * the normal native speed supported by the specific voice. 2.0 is twice as
   * fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0
   * speed. Any other values &lt; 0.25 or &gt; 4.0 will return an error.
   * </pre>
   *
   * <code>
   * double speaking_rate = 2 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @return The speakingRate.
   */
  @java.lang.Override
  public double getSpeakingRate() {
    return speakingRate_;
  }

  public static final int PITCH_FIELD_NUMBER = 3;
  private double pitch_ = 0D;
  /**
   *
   *
   * <pre>
   * Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means
   * increase 20 semitones from the original pitch. -20 means decrease 20
   * semitones from the original pitch.
   * </pre>
   *
   * <code>
   * double pitch = 3 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @return The pitch.
   */
  @java.lang.Override
  public double getPitch() {
    return pitch_;
  }

  public static final int VOLUME_GAIN_DB_FIELD_NUMBER = 4;
  private double volumeGainDb_ = 0D;
  /**
   *
   *
   * <pre>
   * Optional. Input only. Volume gain (in dB) of the normal native volume
   * supported by the specific voice, in the range [-96.0, 16.0]. If unset, or
   * set to a value of 0.0 (dB), will play at normal native signal amplitude. A
   * value of -6.0 (dB) will play at approximately half the amplitude of the
   * normal native signal amplitude. A value of +6.0 (dB) will play at
   * approximately twice the amplitude of the normal native signal amplitude.
   * Strongly recommend not to exceed +10 (dB) as there's usually no effective
   * increase in loudness for any value greater than that.
   * </pre>
   *
   * <code>
   * double volume_gain_db = 4 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @return The volumeGainDb.
   */
  @java.lang.Override
  public double getVolumeGainDb() {
    return volumeGainDb_;
  }

  public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 5;
  private int sampleRateHertz_ = 0;
  /**
   *
   *
   * <pre>
   * Optional. The synthesis sample rate (in hertz) for this audio. When this is
   * specified in SynthesizeSpeechRequest, if this is different from the voice's
   * natural sample rate, then the synthesizer will honor this request by
   * converting to the desired sample rate (which might result in worse audio
   * quality), unless the specified sample rate is not supported for the
   * encoding chosen, in which case it will fail the request and return
   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
   * </pre>
   *
   * <code>int32 sample_rate_hertz = 5 [(.google.api.field_behavior) = OPTIONAL];</code>
   *
   * @return The sampleRateHertz.
   */
  @java.lang.Override
  public int getSampleRateHertz() {
    return sampleRateHertz_;
  }

  public static final int EFFECTS_PROFILE_ID_FIELD_NUMBER = 6;

  @SuppressWarnings("serial")
  private com.google.protobuf.LazyStringList effectsProfileId_;
  /**
   *
   *
   * <pre>
   * Optional. Input only. An identifier which selects 'audio effects' profiles
   * that are applied on (post synthesized) text to speech. Effects are applied
   * on top of each other in the order they are given. See
   * [audio
   * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
   * current supported profile ids.
   * </pre>
   *
   * <code>
   * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @return A list containing the effectsProfileId.
   */
  public com.google.protobuf.ProtocolStringList getEffectsProfileIdList() {
    return effectsProfileId_;
  }
  /**
   *
   *
   * <pre>
   * Optional. Input only. An identifier which selects 'audio effects' profiles
   * that are applied on (post synthesized) text to speech. Effects are applied
   * on top of each other in the order they are given. See
   * [audio
   * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
   * current supported profile ids.
   * </pre>
   *
   * <code>
   * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @return The count of effectsProfileId.
   */
  public int getEffectsProfileIdCount() {
    return effectsProfileId_.size();
  }
  /**
   *
   *
   * <pre>
   * Optional. Input only. An identifier which selects 'audio effects' profiles
   * that are applied on (post synthesized) text to speech. Effects are applied
   * on top of each other in the order they are given. See
   * [audio
   * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
   * current supported profile ids.
   * </pre>
   *
   * <code>
   * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @param index The index of the element to return.
   * @return The effectsProfileId at the given index.
   */
  public java.lang.String getEffectsProfileId(int index) {
    return effectsProfileId_.get(index);
  }
  /**
   *
   *
   * <pre>
   * Optional. Input only. An identifier which selects 'audio effects' profiles
   * that are applied on (post synthesized) text to speech. Effects are applied
   * on top of each other in the order they are given. See
   * [audio
   * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
   * current supported profile ids.
   * </pre>
   *
   * <code>
   * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @param index The index of the value to return.
   * @return The bytes of the effectsProfileId at the given index.
   */
  public com.google.protobuf.ByteString getEffectsProfileIdBytes(int index) {
    return effectsProfileId_.getByteString(index);
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (audioEncoding_
        != com.google.cloud.texttospeech.v1beta1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED
            .getNumber()) {
      output.writeEnum(1, audioEncoding_);
    }
    if (java.lang.Double.doubleToRawLongBits(speakingRate_) != 0) {
      output.writeDouble(2, speakingRate_);
    }
    if (java.lang.Double.doubleToRawLongBits(pitch_) != 0) {
      output.writeDouble(3, pitch_);
    }
    if (java.lang.Double.doubleToRawLongBits(volumeGainDb_) != 0) {
      output.writeDouble(4, volumeGainDb_);
    }
    if (sampleRateHertz_ != 0) {
      output.writeInt32(5, sampleRateHertz_);
    }
    for (int i = 0; i < effectsProfileId_.size(); i++) {
      com.google.protobuf.GeneratedMessageV3.writeString(output, 6, effectsProfileId_.getRaw(i));
    }
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (audioEncoding_
        != com.google.cloud.texttospeech.v1beta1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED
            .getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, audioEncoding_);
    }
    if (java.lang.Double.doubleToRawLongBits(speakingRate_) != 0) {
      size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, speakingRate_);
    }
    if (java.lang.Double.doubleToRawLongBits(pitch_) != 0) {
      size += com.google.protobuf.CodedOutputStream.computeDoubleSize(3, pitch_);
    }
    if (java.lang.Double.doubleToRawLongBits(volumeGainDb_) != 0) {
      size += com.google.protobuf.CodedOutputStream.computeDoubleSize(4, volumeGainDb_);
    }
    if (sampleRateHertz_ != 0) {
      size += com.google.protobuf.CodedOutputStream.computeInt32Size(5, sampleRateHertz_);
    }
    {
      int dataSize = 0;
      for (int i = 0; i < effectsProfileId_.size(); i++) {
        dataSize += computeStringSizeNoTag(effectsProfileId_.getRaw(i));
      }
      size += dataSize;
      size += 1 * getEffectsProfileIdList().size();
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.texttospeech.v1beta1.AudioConfig)) {
      return super.equals(obj);
    }
    com.google.cloud.texttospeech.v1beta1.AudioConfig other =
        (com.google.cloud.texttospeech.v1beta1.AudioConfig) obj;

    if (audioEncoding_ != other.audioEncoding_) return false;
    if (java.lang.Double.doubleToLongBits(getSpeakingRate())
        != java.lang.Double.doubleToLongBits(other.getSpeakingRate())) return false;
    if (java.lang.Double.doubleToLongBits(getPitch())
        != java.lang.Double.doubleToLongBits(other.getPitch())) return false;
    if (java.lang.Double.doubleToLongBits(getVolumeGainDb())
        != java.lang.Double.doubleToLongBits(other.getVolumeGainDb())) return false;
    if (getSampleRateHertz() != other.getSampleRateHertz()) return false;
    if (!getEffectsProfileIdList().equals(other.getEffectsProfileIdList())) return false;
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER;
    hash = (53 * hash) + audioEncoding_;
    hash = (37 * hash) + SPEAKING_RATE_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(
                java.lang.Double.doubleToLongBits(getSpeakingRate()));
    hash = (37 * hash) + PITCH_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(java.lang.Double.doubleToLongBits(getPitch()));
    hash = (37 * hash) + VOLUME_GAIN_DB_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(
                java.lang.Double.doubleToLongBits(getVolumeGainDb()));
    hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
    hash = (53 * hash) + getSampleRateHertz();
    if (getEffectsProfileIdCount() > 0) {
      hash = (37 * hash) + EFFECTS_PROFILE_ID_FIELD_NUMBER;
      hash = (53 * hash) + getEffectsProfileIdList().hashCode();
    }
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(com.google.cloud.texttospeech.v1beta1.AudioConfig prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Description of audio data to be synthesized.
   * </pre>
   *
   * Protobuf type {@code google.cloud.texttospeech.v1beta1.AudioConfig}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1beta1.AudioConfig)
      com.google.cloud.texttospeech.v1beta1.AudioConfigOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
          .internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor;
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
          .internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.texttospeech.v1beta1.AudioConfig.class,
              com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder.class);
    }

    // Construct using com.google.cloud.texttospeech.v1beta1.AudioConfig.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      audioEncoding_ = 0;
      speakingRate_ = 0D;
      pitch_ = 0D;
      volumeGainDb_ = 0D;
      sampleRateHertz_ = 0;
      effectsProfileId_ = com.google.protobuf.LazyStringArrayList.EMPTY;
      bitField0_ = (bitField0_ & ~0x00000020);
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
          .internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.texttospeech.v1beta1.AudioConfig getDefaultInstanceForType() {
      return com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.texttospeech.v1beta1.AudioConfig build() {
      com.google.cloud.texttospeech.v1beta1.AudioConfig result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.texttospeech.v1beta1.AudioConfig buildPartial() {
      com.google.cloud.texttospeech.v1beta1.AudioConfig result =
          new com.google.cloud.texttospeech.v1beta1.AudioConfig(this);
      buildPartialRepeatedFields(result);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      onBuilt();
      return result;
    }

    private void buildPartialRepeatedFields(
        com.google.cloud.texttospeech.v1beta1.AudioConfig result) {
      if (((bitField0_ & 0x00000020) != 0)) {
        effectsProfileId_ = effectsProfileId_.getUnmodifiableView();
        bitField0_ = (bitField0_ & ~0x00000020);
      }
      result.effectsProfileId_ = effectsProfileId_;
    }

    private void buildPartial0(com.google.cloud.texttospeech.v1beta1.AudioConfig result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000001) != 0)) {
        result.audioEncoding_ = audioEncoding_;
      }
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.speakingRate_ = speakingRate_;
      }
      if (((from_bitField0_ & 0x00000004) != 0)) {
        result.pitch_ = pitch_;
      }
      if (((from_bitField0_ & 0x00000008) != 0)) {
        result.volumeGainDb_ = volumeGainDb_;
      }
      if (((from_bitField0_ & 0x00000010) != 0)) {
        result.sampleRateHertz_ = sampleRateHertz_;
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.texttospeech.v1beta1.AudioConfig) {
        return mergeFrom((com.google.cloud.texttospeech.v1beta1.AudioConfig) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.texttospeech.v1beta1.AudioConfig other) {
      if (other == com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance())
        return this;
      if (other.audioEncoding_ != 0) {
        setAudioEncodingValue(other.getAudioEncodingValue());
      }
      if (other.getSpeakingRate() != 0D) {
        setSpeakingRate(other.getSpeakingRate());
      }
      if (other.getPitch() != 0D) {
        setPitch(other.getPitch());
      }
      if (other.getVolumeGainDb() != 0D) {
        setVolumeGainDb(other.getVolumeGainDb());
      }
      if (other.getSampleRateHertz() != 0) {
        setSampleRateHertz(other.getSampleRateHertz());
      }
      if (!other.effectsProfileId_.isEmpty()) {
        if (effectsProfileId_.isEmpty()) {
          effectsProfileId_ = other.effectsProfileId_;
          bitField0_ = (bitField0_ & ~0x00000020);
        } else {
          ensureEffectsProfileIdIsMutable();
          effectsProfileId_.addAll(other.effectsProfileId_);
        }
        onChanged();
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 8:
              {
                audioEncoding_ = input.readEnum();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
            case 17:
              {
                speakingRate_ = input.readDouble();
                bitField0_ |= 0x00000002;
                break;
              } // case 17
            case 25:
              {
                pitch_ = input.readDouble();
                bitField0_ |= 0x00000004;
                break;
              } // case 25
            case 33:
              {
                volumeGainDb_ = input.readDouble();
                bitField0_ |= 0x00000008;
                break;
              } // case 33
            case 40:
              {
                sampleRateHertz_ = input.readInt32();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
            case 50:
              {
                java.lang.String s = input.readStringRequireUtf8();
                ensureEffectsProfileIdIsMutable();
                effectsProfileId_.add(s);
                break;
              } // case 50
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int bitField0_;

    private int audioEncoding_ = 0;
    /**
     *
     *
     * <pre>
     * Required. The format of the audio byte stream.
     * </pre>
     *
     * <code>
     * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @return The enum numeric value on the wire for audioEncoding.
     */
    @java.lang.Override
    public int getAudioEncodingValue() {
      return audioEncoding_;
    }
    /**
     *
     *
     * <pre>
     * Required. The format of the audio byte stream.
     * </pre>
     *
     * <code>
     * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @param value The enum numeric value on the wire for audioEncoding to set.
     * @return This builder for chaining.
     */
    public Builder setAudioEncodingValue(int value) {
      audioEncoding_ = value;
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. The format of the audio byte stream.
     * </pre>
     *
     * <code>
     * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @return The audioEncoding.
     */
    @java.lang.Override
    public com.google.cloud.texttospeech.v1beta1.AudioEncoding getAudioEncoding() {
      com.google.cloud.texttospeech.v1beta1.AudioEncoding result =
          com.google.cloud.texttospeech.v1beta1.AudioEncoding.forNumber(audioEncoding_);
      return result == null
          ? com.google.cloud.texttospeech.v1beta1.AudioEncoding.UNRECOGNIZED
          : result;
    }
    /**
     *
     *
     * <pre>
     * Required. The format of the audio byte stream.
     * </pre>
     *
     * <code>
     * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @param value The audioEncoding to set.
     * @return This builder for chaining.
     */
    public Builder setAudioEncoding(com.google.cloud.texttospeech.v1beta1.AudioEncoding value) {
      if (value == null) {
        throw new NullPointerException();
      }
      bitField0_ |= 0x00000001;
      audioEncoding_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. The format of the audio byte stream.
     * </pre>
     *
     * <code>
     * .google.cloud.texttospeech.v1beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearAudioEncoding() {
      bitField0_ = (bitField0_ & ~0x00000001);
      audioEncoding_ = 0;
      onChanged();
      return this;
    }

    private double speakingRate_;
    /**
     *
     *
     * <pre>
     * Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is
     * the normal native speed supported by the specific voice. 2.0 is twice as
     * fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0
     * speed. Any other values &lt; 0.25 or &gt; 4.0 will return an error.
     * </pre>
     *
     * <code>
     * double speaking_rate = 2 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return The speakingRate.
     */
    @java.lang.Override
    public double getSpeakingRate() {
      return speakingRate_;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is
     * the normal native speed supported by the specific voice. 2.0 is twice as
     * fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0
     * speed. Any other values &lt; 0.25 or &gt; 4.0 will return an error.
     * </pre>
     *
     * <code>
     * double speaking_rate = 2 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The speakingRate to set.
     * @return This builder for chaining.
     */
    public Builder setSpeakingRate(double value) {

      speakingRate_ = value;
      bitField0_ |= 0x00000002;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is
     * the normal native speed supported by the specific voice. 2.0 is twice as
     * fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0
     * speed. Any other values &lt; 0.25 or &gt; 4.0 will return an error.
     * </pre>
     *
     * <code>
     * double speaking_rate = 2 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearSpeakingRate() {
      bitField0_ = (bitField0_ & ~0x00000002);
      speakingRate_ = 0D;
      onChanged();
      return this;
    }

    private double pitch_;
    /**
     *
     *
     * <pre>
     * Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means
     * increase 20 semitones from the original pitch. -20 means decrease 20
     * semitones from the original pitch.
     * </pre>
     *
     * <code>
     * double pitch = 3 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return The pitch.
     */
    @java.lang.Override
    public double getPitch() {
      return pitch_;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means
     * increase 20 semitones from the original pitch. -20 means decrease 20
     * semitones from the original pitch.
     * </pre>
     *
     * <code>
     * double pitch = 3 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The pitch to set.
     * @return This builder for chaining.
     */
    public Builder setPitch(double value) {

      pitch_ = value;
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means
     * increase 20 semitones from the original pitch. -20 means decrease 20
     * semitones from the original pitch.
     * </pre>
     *
     * <code>
     * double pitch = 3 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearPitch() {
      bitField0_ = (bitField0_ & ~0x00000004);
      pitch_ = 0D;
      onChanged();
      return this;
    }

    private double volumeGainDb_;
    /**
     *
     *
     * <pre>
     * Optional. Input only. Volume gain (in dB) of the normal native volume
     * supported by the specific voice, in the range [-96.0, 16.0]. If unset, or
     * set to a value of 0.0 (dB), will play at normal native signal amplitude. A
     * value of -6.0 (dB) will play at approximately half the amplitude of the
     * normal native signal amplitude. A value of +6.0 (dB) will play at
     * approximately twice the amplitude of the normal native signal amplitude.
     * Strongly recommend not to exceed +10 (dB) as there's usually no effective
     * increase in loudness for any value greater than that.
     * </pre>
     *
     * <code>
     * double volume_gain_db = 4 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return The volumeGainDb.
     */
    @java.lang.Override
    public double getVolumeGainDb() {
      return volumeGainDb_;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. Volume gain (in dB) of the normal native volume
     * supported by the specific voice, in the range [-96.0, 16.0]. If unset, or
     * set to a value of 0.0 (dB), will play at normal native signal amplitude. A
     * value of -6.0 (dB) will play at approximately half the amplitude of the
     * normal native signal amplitude. A value of +6.0 (dB) will play at
     * approximately twice the amplitude of the normal native signal amplitude.
     * Strongly recommend not to exceed +10 (dB) as there's usually no effective
     * increase in loudness for any value greater than that.
     * </pre>
     *
     * <code>
     * double volume_gain_db = 4 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The volumeGainDb to set.
     * @return This builder for chaining.
     */
    public Builder setVolumeGainDb(double value) {

      volumeGainDb_ = value;
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. Volume gain (in dB) of the normal native volume
     * supported by the specific voice, in the range [-96.0, 16.0]. If unset, or
     * set to a value of 0.0 (dB), will play at normal native signal amplitude. A
     * value of -6.0 (dB) will play at approximately half the amplitude of the
     * normal native signal amplitude. A value of +6.0 (dB) will play at
     * approximately twice the amplitude of the normal native signal amplitude.
     * Strongly recommend not to exceed +10 (dB) as there's usually no effective
     * increase in loudness for any value greater than that.
     * </pre>
     *
     * <code>
     * double volume_gain_db = 4 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearVolumeGainDb() {
      bitField0_ = (bitField0_ & ~0x00000008);
      volumeGainDb_ = 0D;
      onChanged();
      return this;
    }

    private int sampleRateHertz_;
    /**
     *
     *
     * <pre>
     * Optional. The synthesis sample rate (in hertz) for this audio. When this is
     * specified in SynthesizeSpeechRequest, if this is different from the voice's
     * natural sample rate, then the synthesizer will honor this request by
     * converting to the desired sample rate (which might result in worse audio
     * quality), unless the specified sample rate is not supported for the
     * encoding chosen, in which case it will fail the request and return
     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 5 [(.google.api.field_behavior) = OPTIONAL];</code>
     *
     * @return The sampleRateHertz.
     */
    @java.lang.Override
    public int getSampleRateHertz() {
      return sampleRateHertz_;
    }
    /**
     *
     *
     * <pre>
     * Optional. The synthesis sample rate (in hertz) for this audio. When this is
     * specified in SynthesizeSpeechRequest, if this is different from the voice's
     * natural sample rate, then the synthesizer will honor this request by
     * converting to the desired sample rate (which might result in worse audio
     * quality), unless the specified sample rate is not supported for the
     * encoding chosen, in which case it will fail the request and return
     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 5 [(.google.api.field_behavior) = OPTIONAL];</code>
     *
     * @param value The sampleRateHertz to set.
     * @return This builder for chaining.
     */
    public Builder setSampleRateHertz(int value) {

      sampleRateHertz_ = value;
      bitField0_ |= 0x00000010;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. The synthesis sample rate (in hertz) for this audio. When this is
     * specified in SynthesizeSpeechRequest, if this is different from the voice's
     * natural sample rate, then the synthesizer will honor this request by
     * converting to the desired sample rate (which might result in worse audio
     * quality), unless the specified sample rate is not supported for the
     * encoding chosen, in which case it will fail the request and return
     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
     * </pre>
     *
     * <code>int32 sample_rate_hertz = 5 [(.google.api.field_behavior) = OPTIONAL];</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearSampleRateHertz() {
      bitField0_ = (bitField0_ & ~0x00000010);
      sampleRateHertz_ = 0;
      onChanged();
      return this;
    }

    private com.google.protobuf.LazyStringList effectsProfileId_ =
        com.google.protobuf.LazyStringArrayList.EMPTY;

    private void ensureEffectsProfileIdIsMutable() {
      if (!((bitField0_ & 0x00000020) != 0)) {
        effectsProfileId_ = new com.google.protobuf.LazyStringArrayList(effectsProfileId_);
        bitField0_ |= 0x00000020;
      }
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return A list containing the effectsProfileId.
     */
    public com.google.protobuf.ProtocolStringList getEffectsProfileIdList() {
      return effectsProfileId_.getUnmodifiableView();
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return The count of effectsProfileId.
     */
    public int getEffectsProfileIdCount() {
      return effectsProfileId_.size();
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param index The index of the element to return.
     * @return The effectsProfileId at the given index.
     */
    public java.lang.String getEffectsProfileId(int index) {
      return effectsProfileId_.get(index);
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param index The index of the value to return.
     * @return The bytes of the effectsProfileId at the given index.
     */
    public com.google.protobuf.ByteString getEffectsProfileIdBytes(int index) {
      return effectsProfileId_.getByteString(index);
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param index The index to set the value at.
     * @param value The effectsProfileId to set.
     * @return This builder for chaining.
     */
    public Builder setEffectsProfileId(int index, java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      ensureEffectsProfileIdIsMutable();
      effectsProfileId_.set(index, value);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The effectsProfileId to add.
     * @return This builder for chaining.
     */
    public Builder addEffectsProfileId(java.lang.String value) {
      if (value == null) {
        throw new NullPointerException();
      }
      ensureEffectsProfileIdIsMutable();
      effectsProfileId_.add(value);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param values The effectsProfileId to add.
     * @return This builder for chaining.
     */
    public Builder addAllEffectsProfileId(java.lang.Iterable<java.lang.String> values) {
      ensureEffectsProfileIdIsMutable();
      com.google.protobuf.AbstractMessageLite.Builder.addAll(values, effectsProfileId_);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearEffectsProfileId() {
      effectsProfileId_ = com.google.protobuf.LazyStringArrayList.EMPTY;
      bitField0_ = (bitField0_ & ~0x00000020);
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Input only. An identifier which selects 'audio effects' profiles
     * that are applied on (post synthesized) text to speech. Effects are applied
     * on top of each other in the order they are given. See
     * [audio
     * profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for
     * current supported profile ids.
     * </pre>
     *
     * <code>
     * repeated string effects_profile_id = 6 [(.google.api.field_behavior) = INPUT_ONLY, (.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The bytes of the effectsProfileId to add.
     * @return This builder for chaining.
     */
    public Builder addEffectsProfileIdBytes(com.google.protobuf.ByteString value) {
      if (value == null) {
        throw new NullPointerException();
      }
      checkByteStringIsUtf8(value);
      ensureEffectsProfileIdIsMutable();
      effectsProfileId_.add(value);
      onChanged();
      return this;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1beta1.AudioConfig)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1beta1.AudioConfig)
  private static final com.google.cloud.texttospeech.v1beta1.AudioConfig DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1beta1.AudioConfig();
  }

  public static com.google.cloud.texttospeech.v1beta1.AudioConfig getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<AudioConfig> PARSER =
      new com.google.protobuf.AbstractParser<AudioConfig>() {
        @java.lang.Override
        public AudioConfig parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<AudioConfig> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<AudioConfig> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.texttospeech.v1beta1.AudioConfig getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
