/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/dataproc/v1/autoscaling_policies.proto

package com.google.cloud.dataproc.v1;

/**
 *
 *
 * <pre>
 * Basic autoscaling configurations for YARN.
 * </pre>
 *
 * Protobuf type {@code google.cloud.dataproc.v1.BasicYarnAutoscalingConfig}
 */
public final class BasicYarnAutoscalingConfig extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.BasicYarnAutoscalingConfig)
    BasicYarnAutoscalingConfigOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use BasicYarnAutoscalingConfig.newBuilder() to construct.
  private BasicYarnAutoscalingConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private BasicYarnAutoscalingConfig() {}

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new BasicYarnAutoscalingConfig();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.dataproc.v1.AutoscalingPoliciesProto
        .internal_static_google_cloud_dataproc_v1_BasicYarnAutoscalingConfig_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.dataproc.v1.AutoscalingPoliciesProto
        .internal_static_google_cloud_dataproc_v1_BasicYarnAutoscalingConfig_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.class,
            com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.Builder.class);
  }

  public static final int GRACEFUL_DECOMMISSION_TIMEOUT_FIELD_NUMBER = 5;
  private com.google.protobuf.Duration gracefulDecommissionTimeout_;
  /**
   *
   *
   * <pre>
   * Required. Timeout for YARN graceful decommissioning of Node Managers.
   * Specifies the duration to wait for jobs to complete before forcefully
   * removing workers (and potentially interrupting jobs). Only applicable to
   * downscaling operations.
   * Bounds: [0s, 1d].
   * </pre>
   *
   * <code>
   * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
   * </code>
   *
   * @return Whether the gracefulDecommissionTimeout field is set.
   */
  @java.lang.Override
  public boolean hasGracefulDecommissionTimeout() {
    return gracefulDecommissionTimeout_ != null;
  }
  /**
   *
   *
   * <pre>
   * Required. Timeout for YARN graceful decommissioning of Node Managers.
   * Specifies the duration to wait for jobs to complete before forcefully
   * removing workers (and potentially interrupting jobs). Only applicable to
   * downscaling operations.
   * Bounds: [0s, 1d].
   * </pre>
   *
   * <code>
   * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
   * </code>
   *
   * @return The gracefulDecommissionTimeout.
   */
  @java.lang.Override
  public com.google.protobuf.Duration getGracefulDecommissionTimeout() {
    return gracefulDecommissionTimeout_ == null
        ? com.google.protobuf.Duration.getDefaultInstance()
        : gracefulDecommissionTimeout_;
  }
  /**
   *
   *
   * <pre>
   * Required. Timeout for YARN graceful decommissioning of Node Managers.
   * Specifies the duration to wait for jobs to complete before forcefully
   * removing workers (and potentially interrupting jobs). Only applicable to
   * downscaling operations.
   * Bounds: [0s, 1d].
   * </pre>
   *
   * <code>
   * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
   * </code>
   */
  @java.lang.Override
  public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBuilder() {
    return gracefulDecommissionTimeout_ == null
        ? com.google.protobuf.Duration.getDefaultInstance()
        : gracefulDecommissionTimeout_;
  }

  public static final int SCALE_UP_FACTOR_FIELD_NUMBER = 1;
  private double scaleUpFactor_ = 0D;
  /**
   *
   *
   * <pre>
   * Required. Fraction of average YARN pending memory in the last cooldown
   * period for which to add workers. A scale-up factor of 1.0 will result in
   * scaling up so that there is no pending memory remaining after the update
   * (more aggressive scaling). A scale-up factor closer to 0 will result in a
   * smaller magnitude of scaling up (less aggressive scaling). See [How
   * autoscaling
   * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
   * for more information.
   * Bounds: [0.0, 1.0].
   * </pre>
   *
   * <code>double scale_up_factor = 1 [(.google.api.field_behavior) = REQUIRED];</code>
   *
   * @return The scaleUpFactor.
   */
  @java.lang.Override
  public double getScaleUpFactor() {
    return scaleUpFactor_;
  }

  public static final int SCALE_DOWN_FACTOR_FIELD_NUMBER = 2;
  private double scaleDownFactor_ = 0D;
  /**
   *
   *
   * <pre>
   * Required. Fraction of average YARN pending memory in the last cooldown
   * period for which to remove workers. A scale-down factor of 1 will result in
   * scaling down so that there is no available memory remaining after the
   * update (more aggressive scaling). A scale-down factor of 0 disables
   * removing workers, which can be beneficial for autoscaling a single job.
   * See [How autoscaling
   * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
   * for more information.
   * Bounds: [0.0, 1.0].
   * </pre>
   *
   * <code>double scale_down_factor = 2 [(.google.api.field_behavior) = REQUIRED];</code>
   *
   * @return The scaleDownFactor.
   */
  @java.lang.Override
  public double getScaleDownFactor() {
    return scaleDownFactor_;
  }

  public static final int SCALE_UP_MIN_WORKER_FRACTION_FIELD_NUMBER = 3;
  private double scaleUpMinWorkerFraction_ = 0D;
  /**
   *
   *
   * <pre>
   * Optional. Minimum scale-up threshold as a fraction of total cluster size
   * before scaling occurs. For example, in a 20-worker cluster, a threshold of
   * 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
   * the cluster to scale. A threshold of 0 means the autoscaler will scale up
   * on any recommended change.
   * Bounds: [0.0, 1.0]. Default: 0.0.
   * </pre>
   *
   * <code>double scale_up_min_worker_fraction = 3 [(.google.api.field_behavior) = OPTIONAL];</code>
   *
   * @return The scaleUpMinWorkerFraction.
   */
  @java.lang.Override
  public double getScaleUpMinWorkerFraction() {
    return scaleUpMinWorkerFraction_;
  }

  public static final int SCALE_DOWN_MIN_WORKER_FRACTION_FIELD_NUMBER = 4;
  private double scaleDownMinWorkerFraction_ = 0D;
  /**
   *
   *
   * <pre>
   * Optional. Minimum scale-down threshold as a fraction of total cluster size
   * before scaling occurs. For example, in a 20-worker cluster, a threshold of
   * 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
   * the cluster to scale. A threshold of 0 means the autoscaler will scale down
   * on any recommended change.
   * Bounds: [0.0, 1.0]. Default: 0.0.
   * </pre>
   *
   * <code>double scale_down_min_worker_fraction = 4 [(.google.api.field_behavior) = OPTIONAL];
   * </code>
   *
   * @return The scaleDownMinWorkerFraction.
   */
  @java.lang.Override
  public double getScaleDownMinWorkerFraction() {
    return scaleDownMinWorkerFraction_;
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (java.lang.Double.doubleToRawLongBits(scaleUpFactor_) != 0) {
      output.writeDouble(1, scaleUpFactor_);
    }
    if (java.lang.Double.doubleToRawLongBits(scaleDownFactor_) != 0) {
      output.writeDouble(2, scaleDownFactor_);
    }
    if (java.lang.Double.doubleToRawLongBits(scaleUpMinWorkerFraction_) != 0) {
      output.writeDouble(3, scaleUpMinWorkerFraction_);
    }
    if (java.lang.Double.doubleToRawLongBits(scaleDownMinWorkerFraction_) != 0) {
      output.writeDouble(4, scaleDownMinWorkerFraction_);
    }
    if (gracefulDecommissionTimeout_ != null) {
      output.writeMessage(5, getGracefulDecommissionTimeout());
    }
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (java.lang.Double.doubleToRawLongBits(scaleUpFactor_) != 0) {
      size += com.google.protobuf.CodedOutputStream.computeDoubleSize(1, scaleUpFactor_);
    }
    if (java.lang.Double.doubleToRawLongBits(scaleDownFactor_) != 0) {
      size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, scaleDownFactor_);
    }
    if (java.lang.Double.doubleToRawLongBits(scaleUpMinWorkerFraction_) != 0) {
      size += com.google.protobuf.CodedOutputStream.computeDoubleSize(3, scaleUpMinWorkerFraction_);
    }
    if (java.lang.Double.doubleToRawLongBits(scaleDownMinWorkerFraction_) != 0) {
      size +=
          com.google.protobuf.CodedOutputStream.computeDoubleSize(4, scaleDownMinWorkerFraction_);
    }
    if (gracefulDecommissionTimeout_ != null) {
      size +=
          com.google.protobuf.CodedOutputStream.computeMessageSize(
              5, getGracefulDecommissionTimeout());
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig)) {
      return super.equals(obj);
    }
    com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig other =
        (com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig) obj;

    if (hasGracefulDecommissionTimeout() != other.hasGracefulDecommissionTimeout()) return false;
    if (hasGracefulDecommissionTimeout()) {
      if (!getGracefulDecommissionTimeout().equals(other.getGracefulDecommissionTimeout()))
        return false;
    }
    if (java.lang.Double.doubleToLongBits(getScaleUpFactor())
        != java.lang.Double.doubleToLongBits(other.getScaleUpFactor())) return false;
    if (java.lang.Double.doubleToLongBits(getScaleDownFactor())
        != java.lang.Double.doubleToLongBits(other.getScaleDownFactor())) return false;
    if (java.lang.Double.doubleToLongBits(getScaleUpMinWorkerFraction())
        != java.lang.Double.doubleToLongBits(other.getScaleUpMinWorkerFraction())) return false;
    if (java.lang.Double.doubleToLongBits(getScaleDownMinWorkerFraction())
        != java.lang.Double.doubleToLongBits(other.getScaleDownMinWorkerFraction())) return false;
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    if (hasGracefulDecommissionTimeout()) {
      hash = (37 * hash) + GRACEFUL_DECOMMISSION_TIMEOUT_FIELD_NUMBER;
      hash = (53 * hash) + getGracefulDecommissionTimeout().hashCode();
    }
    hash = (37 * hash) + SCALE_UP_FACTOR_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(
                java.lang.Double.doubleToLongBits(getScaleUpFactor()));
    hash = (37 * hash) + SCALE_DOWN_FACTOR_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(
                java.lang.Double.doubleToLongBits(getScaleDownFactor()));
    hash = (37 * hash) + SCALE_UP_MIN_WORKER_FRACTION_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(
                java.lang.Double.doubleToLongBits(getScaleUpMinWorkerFraction()));
    hash = (37 * hash) + SCALE_DOWN_MIN_WORKER_FRACTION_FIELD_NUMBER;
    hash =
        (53 * hash)
            + com.google.protobuf.Internal.hashLong(
                java.lang.Double.doubleToLongBits(getScaleDownMinWorkerFraction()));
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(
      com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Basic autoscaling configurations for YARN.
   * </pre>
   *
   * Protobuf type {@code google.cloud.dataproc.v1.BasicYarnAutoscalingConfig}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.BasicYarnAutoscalingConfig)
      com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfigOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.dataproc.v1.AutoscalingPoliciesProto
          .internal_static_google_cloud_dataproc_v1_BasicYarnAutoscalingConfig_descriptor;
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.dataproc.v1.AutoscalingPoliciesProto
          .internal_static_google_cloud_dataproc_v1_BasicYarnAutoscalingConfig_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.class,
              com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.Builder.class);
    }

    // Construct using com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      gracefulDecommissionTimeout_ = null;
      if (gracefulDecommissionTimeoutBuilder_ != null) {
        gracefulDecommissionTimeoutBuilder_.dispose();
        gracefulDecommissionTimeoutBuilder_ = null;
      }
      scaleUpFactor_ = 0D;
      scaleDownFactor_ = 0D;
      scaleUpMinWorkerFraction_ = 0D;
      scaleDownMinWorkerFraction_ = 0D;
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.dataproc.v1.AutoscalingPoliciesProto
          .internal_static_google_cloud_dataproc_v1_BasicYarnAutoscalingConfig_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig getDefaultInstanceForType() {
      return com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig build() {
      com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig buildPartial() {
      com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig result =
          new com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig(this);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      onBuilt();
      return result;
    }

    private void buildPartial0(com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000001) != 0)) {
        result.gracefulDecommissionTimeout_ =
            gracefulDecommissionTimeoutBuilder_ == null
                ? gracefulDecommissionTimeout_
                : gracefulDecommissionTimeoutBuilder_.build();
      }
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.scaleUpFactor_ = scaleUpFactor_;
      }
      if (((from_bitField0_ & 0x00000004) != 0)) {
        result.scaleDownFactor_ = scaleDownFactor_;
      }
      if (((from_bitField0_ & 0x00000008) != 0)) {
        result.scaleUpMinWorkerFraction_ = scaleUpMinWorkerFraction_;
      }
      if (((from_bitField0_ & 0x00000010) != 0)) {
        result.scaleDownMinWorkerFraction_ = scaleDownMinWorkerFraction_;
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig) {
        return mergeFrom((com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig other) {
      if (other == com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig.getDefaultInstance())
        return this;
      if (other.hasGracefulDecommissionTimeout()) {
        mergeGracefulDecommissionTimeout(other.getGracefulDecommissionTimeout());
      }
      if (other.getScaleUpFactor() != 0D) {
        setScaleUpFactor(other.getScaleUpFactor());
      }
      if (other.getScaleDownFactor() != 0D) {
        setScaleDownFactor(other.getScaleDownFactor());
      }
      if (other.getScaleUpMinWorkerFraction() != 0D) {
        setScaleUpMinWorkerFraction(other.getScaleUpMinWorkerFraction());
      }
      if (other.getScaleDownMinWorkerFraction() != 0D) {
        setScaleDownMinWorkerFraction(other.getScaleDownMinWorkerFraction());
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 9:
              {
                scaleUpFactor_ = input.readDouble();
                bitField0_ |= 0x00000002;
                break;
              } // case 9
            case 17:
              {
                scaleDownFactor_ = input.readDouble();
                bitField0_ |= 0x00000004;
                break;
              } // case 17
            case 25:
              {
                scaleUpMinWorkerFraction_ = input.readDouble();
                bitField0_ |= 0x00000008;
                break;
              } // case 25
            case 33:
              {
                scaleDownMinWorkerFraction_ = input.readDouble();
                bitField0_ |= 0x00000010;
                break;
              } // case 33
            case 42:
              {
                input.readMessage(
                    getGracefulDecommissionTimeoutFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 42
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int bitField0_;

    private com.google.protobuf.Duration gracefulDecommissionTimeout_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.Duration,
            com.google.protobuf.Duration.Builder,
            com.google.protobuf.DurationOrBuilder>
        gracefulDecommissionTimeoutBuilder_;
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @return Whether the gracefulDecommissionTimeout field is set.
     */
    public boolean hasGracefulDecommissionTimeout() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     *
     * @return The gracefulDecommissionTimeout.
     */
    public com.google.protobuf.Duration getGracefulDecommissionTimeout() {
      if (gracefulDecommissionTimeoutBuilder_ == null) {
        return gracefulDecommissionTimeout_ == null
            ? com.google.protobuf.Duration.getDefaultInstance()
            : gracefulDecommissionTimeout_;
      } else {
        return gracefulDecommissionTimeoutBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    public Builder setGracefulDecommissionTimeout(com.google.protobuf.Duration value) {
      if (gracefulDecommissionTimeoutBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        gracefulDecommissionTimeout_ = value;
      } else {
        gracefulDecommissionTimeoutBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    public Builder setGracefulDecommissionTimeout(
        com.google.protobuf.Duration.Builder builderForValue) {
      if (gracefulDecommissionTimeoutBuilder_ == null) {
        gracefulDecommissionTimeout_ = builderForValue.build();
      } else {
        gracefulDecommissionTimeoutBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    public Builder mergeGracefulDecommissionTimeout(com.google.protobuf.Duration value) {
      if (gracefulDecommissionTimeoutBuilder_ == null) {
        if (((bitField0_ & 0x00000001) != 0)
            && gracefulDecommissionTimeout_ != null
            && gracefulDecommissionTimeout_ != com.google.protobuf.Duration.getDefaultInstance()) {
          getGracefulDecommissionTimeoutBuilder().mergeFrom(value);
        } else {
          gracefulDecommissionTimeout_ = value;
        }
      } else {
        gracefulDecommissionTimeoutBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00000001;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    public Builder clearGracefulDecommissionTimeout() {
      bitField0_ = (bitField0_ & ~0x00000001);
      gracefulDecommissionTimeout_ = null;
      if (gracefulDecommissionTimeoutBuilder_ != null) {
        gracefulDecommissionTimeoutBuilder_.dispose();
        gracefulDecommissionTimeoutBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    public com.google.protobuf.Duration.Builder getGracefulDecommissionTimeoutBuilder() {
      bitField0_ |= 0x00000001;
      onChanged();
      return getGracefulDecommissionTimeoutFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBuilder() {
      if (gracefulDecommissionTimeoutBuilder_ != null) {
        return gracefulDecommissionTimeoutBuilder_.getMessageOrBuilder();
      } else {
        return gracefulDecommissionTimeout_ == null
            ? com.google.protobuf.Duration.getDefaultInstance()
            : gracefulDecommissionTimeout_;
      }
    }
    /**
     *
     *
     * <pre>
     * Required. Timeout for YARN graceful decommissioning of Node Managers.
     * Specifies the duration to wait for jobs to complete before forcefully
     * removing workers (and potentially interrupting jobs). Only applicable to
     * downscaling operations.
     * Bounds: [0s, 1d].
     * </pre>
     *
     * <code>
     * .google.protobuf.Duration graceful_decommission_timeout = 5 [(.google.api.field_behavior) = REQUIRED];
     * </code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.Duration,
            com.google.protobuf.Duration.Builder,
            com.google.protobuf.DurationOrBuilder>
        getGracefulDecommissionTimeoutFieldBuilder() {
      if (gracefulDecommissionTimeoutBuilder_ == null) {
        gracefulDecommissionTimeoutBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.protobuf.Duration,
                com.google.protobuf.Duration.Builder,
                com.google.protobuf.DurationOrBuilder>(
                getGracefulDecommissionTimeout(), getParentForChildren(), isClean());
        gracefulDecommissionTimeout_ = null;
      }
      return gracefulDecommissionTimeoutBuilder_;
    }

    private double scaleUpFactor_;
    /**
     *
     *
     * <pre>
     * Required. Fraction of average YARN pending memory in the last cooldown
     * period for which to add workers. A scale-up factor of 1.0 will result in
     * scaling up so that there is no pending memory remaining after the update
     * (more aggressive scaling). A scale-up factor closer to 0 will result in a
     * smaller magnitude of scaling up (less aggressive scaling). See [How
     * autoscaling
     * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
     * for more information.
     * Bounds: [0.0, 1.0].
     * </pre>
     *
     * <code>double scale_up_factor = 1 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return The scaleUpFactor.
     */
    @java.lang.Override
    public double getScaleUpFactor() {
      return scaleUpFactor_;
    }
    /**
     *
     *
     * <pre>
     * Required. Fraction of average YARN pending memory in the last cooldown
     * period for which to add workers. A scale-up factor of 1.0 will result in
     * scaling up so that there is no pending memory remaining after the update
     * (more aggressive scaling). A scale-up factor closer to 0 will result in a
     * smaller magnitude of scaling up (less aggressive scaling). See [How
     * autoscaling
     * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
     * for more information.
     * Bounds: [0.0, 1.0].
     * </pre>
     *
     * <code>double scale_up_factor = 1 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @param value The scaleUpFactor to set.
     * @return This builder for chaining.
     */
    public Builder setScaleUpFactor(double value) {

      scaleUpFactor_ = value;
      bitField0_ |= 0x00000002;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Fraction of average YARN pending memory in the last cooldown
     * period for which to add workers. A scale-up factor of 1.0 will result in
     * scaling up so that there is no pending memory remaining after the update
     * (more aggressive scaling). A scale-up factor closer to 0 will result in a
     * smaller magnitude of scaling up (less aggressive scaling). See [How
     * autoscaling
     * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
     * for more information.
     * Bounds: [0.0, 1.0].
     * </pre>
     *
     * <code>double scale_up_factor = 1 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearScaleUpFactor() {
      bitField0_ = (bitField0_ & ~0x00000002);
      scaleUpFactor_ = 0D;
      onChanged();
      return this;
    }

    private double scaleDownFactor_;
    /**
     *
     *
     * <pre>
     * Required. Fraction of average YARN pending memory in the last cooldown
     * period for which to remove workers. A scale-down factor of 1 will result in
     * scaling down so that there is no available memory remaining after the
     * update (more aggressive scaling). A scale-down factor of 0 disables
     * removing workers, which can be beneficial for autoscaling a single job.
     * See [How autoscaling
     * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
     * for more information.
     * Bounds: [0.0, 1.0].
     * </pre>
     *
     * <code>double scale_down_factor = 2 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return The scaleDownFactor.
     */
    @java.lang.Override
    public double getScaleDownFactor() {
      return scaleDownFactor_;
    }
    /**
     *
     *
     * <pre>
     * Required. Fraction of average YARN pending memory in the last cooldown
     * period for which to remove workers. A scale-down factor of 1 will result in
     * scaling down so that there is no available memory remaining after the
     * update (more aggressive scaling). A scale-down factor of 0 disables
     * removing workers, which can be beneficial for autoscaling a single job.
     * See [How autoscaling
     * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
     * for more information.
     * Bounds: [0.0, 1.0].
     * </pre>
     *
     * <code>double scale_down_factor = 2 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @param value The scaleDownFactor to set.
     * @return This builder for chaining.
     */
    public Builder setScaleDownFactor(double value) {

      scaleDownFactor_ = value;
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Required. Fraction of average YARN pending memory in the last cooldown
     * period for which to remove workers. A scale-down factor of 1 will result in
     * scaling down so that there is no available memory remaining after the
     * update (more aggressive scaling). A scale-down factor of 0 disables
     * removing workers, which can be beneficial for autoscaling a single job.
     * See [How autoscaling
     * works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
     * for more information.
     * Bounds: [0.0, 1.0].
     * </pre>
     *
     * <code>double scale_down_factor = 2 [(.google.api.field_behavior) = REQUIRED];</code>
     *
     * @return This builder for chaining.
     */
    public Builder clearScaleDownFactor() {
      bitField0_ = (bitField0_ & ~0x00000004);
      scaleDownFactor_ = 0D;
      onChanged();
      return this;
    }

    private double scaleUpMinWorkerFraction_;
    /**
     *
     *
     * <pre>
     * Optional. Minimum scale-up threshold as a fraction of total cluster size
     * before scaling occurs. For example, in a 20-worker cluster, a threshold of
     * 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
     * the cluster to scale. A threshold of 0 means the autoscaler will scale up
     * on any recommended change.
     * Bounds: [0.0, 1.0]. Default: 0.0.
     * </pre>
     *
     * <code>double scale_up_min_worker_fraction = 3 [(.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return The scaleUpMinWorkerFraction.
     */
    @java.lang.Override
    public double getScaleUpMinWorkerFraction() {
      return scaleUpMinWorkerFraction_;
    }
    /**
     *
     *
     * <pre>
     * Optional. Minimum scale-up threshold as a fraction of total cluster size
     * before scaling occurs. For example, in a 20-worker cluster, a threshold of
     * 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
     * the cluster to scale. A threshold of 0 means the autoscaler will scale up
     * on any recommended change.
     * Bounds: [0.0, 1.0]. Default: 0.0.
     * </pre>
     *
     * <code>double scale_up_min_worker_fraction = 3 [(.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The scaleUpMinWorkerFraction to set.
     * @return This builder for chaining.
     */
    public Builder setScaleUpMinWorkerFraction(double value) {

      scaleUpMinWorkerFraction_ = value;
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Minimum scale-up threshold as a fraction of total cluster size
     * before scaling occurs. For example, in a 20-worker cluster, a threshold of
     * 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
     * the cluster to scale. A threshold of 0 means the autoscaler will scale up
     * on any recommended change.
     * Bounds: [0.0, 1.0]. Default: 0.0.
     * </pre>
     *
     * <code>double scale_up_min_worker_fraction = 3 [(.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearScaleUpMinWorkerFraction() {
      bitField0_ = (bitField0_ & ~0x00000008);
      scaleUpMinWorkerFraction_ = 0D;
      onChanged();
      return this;
    }

    private double scaleDownMinWorkerFraction_;
    /**
     *
     *
     * <pre>
     * Optional. Minimum scale-down threshold as a fraction of total cluster size
     * before scaling occurs. For example, in a 20-worker cluster, a threshold of
     * 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
     * the cluster to scale. A threshold of 0 means the autoscaler will scale down
     * on any recommended change.
     * Bounds: [0.0, 1.0]. Default: 0.0.
     * </pre>
     *
     * <code>double scale_down_min_worker_fraction = 4 [(.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return The scaleDownMinWorkerFraction.
     */
    @java.lang.Override
    public double getScaleDownMinWorkerFraction() {
      return scaleDownMinWorkerFraction_;
    }
    /**
     *
     *
     * <pre>
     * Optional. Minimum scale-down threshold as a fraction of total cluster size
     * before scaling occurs. For example, in a 20-worker cluster, a threshold of
     * 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
     * the cluster to scale. A threshold of 0 means the autoscaler will scale down
     * on any recommended change.
     * Bounds: [0.0, 1.0]. Default: 0.0.
     * </pre>
     *
     * <code>double scale_down_min_worker_fraction = 4 [(.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @param value The scaleDownMinWorkerFraction to set.
     * @return This builder for chaining.
     */
    public Builder setScaleDownMinWorkerFraction(double value) {

      scaleDownMinWorkerFraction_ = value;
      bitField0_ |= 0x00000010;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Optional. Minimum scale-down threshold as a fraction of total cluster size
     * before scaling occurs. For example, in a 20-worker cluster, a threshold of
     * 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
     * the cluster to scale. A threshold of 0 means the autoscaler will scale down
     * on any recommended change.
     * Bounds: [0.0, 1.0]. Default: 0.0.
     * </pre>
     *
     * <code>double scale_down_min_worker_fraction = 4 [(.google.api.field_behavior) = OPTIONAL];
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearScaleDownMinWorkerFraction() {
      bitField0_ = (bitField0_ & ~0x00000010);
      scaleDownMinWorkerFraction_ = 0D;
      onChanged();
      return this;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.BasicYarnAutoscalingConfig)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.BasicYarnAutoscalingConfig)
  private static final com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig();
  }

  public static com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<BasicYarnAutoscalingConfig> PARSER =
      new com.google.protobuf.AbstractParser<BasicYarnAutoscalingConfig>() {
        @java.lang.Override
        public BasicYarnAutoscalingConfig parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<BasicYarnAutoscalingConfig> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<BasicYarnAutoscalingConfig> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.dataproc.v1.BasicYarnAutoscalingConfig getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
