/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v2/cloud_speech.proto

package com.google.cloud.speech.v2;

/**
 *
 *
 * <pre>
 * `StreamingRecognizeResponse` is the only message returned to the client by
 * `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
 * messages are streamed back to the client. If there is no recognizable
 * audio then no messages are streamed back to the client.
 * Here are some examples of `StreamingRecognizeResponse`s that might
 * be returned while processing audio:
 * 1. results { alternatives { transcript: "tube" } stability: 0.01 }
 * 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
 * 3. results { alternatives { transcript: "to be" } stability: 0.9 }
 *    results { alternatives { transcript: " or not to be" } stability: 0.01 }
 * 4. results { alternatives { transcript: "to be or not to be"
 *                             confidence: 0.92 }
 *              alternatives { transcript: "to bee or not to bee" }
 *              is_final: true }
 * 5. results { alternatives { transcript: " that's" } stability: 0.01 }
 * 6. results { alternatives { transcript: " that is" } stability: 0.9 }
 *    results { alternatives { transcript: " the question" } stability: 0.01 }
 * 7. results { alternatives { transcript: " that is the question"
 *                             confidence: 0.98 }
 *              alternatives { transcript: " that was the question" }
 *              is_final: true }
 * Notes:
 * - Only two of the above responses #4 and #7 contain final results; they are
 *   indicated by `is_final: true`. Concatenating these together generates the
 *   full transcript: "to be or not to be that is the question".
 * - The others contain interim `results`. #3 and #6 contain two interim
 *   `results`: the first portion has a high stability and is less likely to
 *   change; the second portion has a low stability and is very likely to
 *   change. A UI designer might choose to show only high stability `results`.
 * - The specific `stability` and `confidence` values shown above are only for
 *   illustrative purposes. Actual values may vary.
 * - In each response, only one of these fields will be set:
 *     `error`,
 *     `speech_event_type`, or
 *     one or more (repeated) `results`.
 * </pre>
 *
 * Protobuf type {@code google.cloud.speech.v2.StreamingRecognizeResponse}
 */
public final class StreamingRecognizeResponse extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.speech.v2.StreamingRecognizeResponse)
    StreamingRecognizeResponseOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use StreamingRecognizeResponse.newBuilder() to construct.
  private StreamingRecognizeResponse(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private StreamingRecognizeResponse() {
    results_ = java.util.Collections.emptyList();
    speechEventType_ = 0;
  }

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new StreamingRecognizeResponse();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.speech.v2.CloudSpeechProto
        .internal_static_google_cloud_speech_v2_StreamingRecognizeResponse_descriptor;
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.speech.v2.CloudSpeechProto
        .internal_static_google_cloud_speech_v2_StreamingRecognizeResponse_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.speech.v2.StreamingRecognizeResponse.class,
            com.google.cloud.speech.v2.StreamingRecognizeResponse.Builder.class);
  }

  /**
   *
   *
   * <pre>
   * Indicates the type of speech event.
   * </pre>
   *
   * Protobuf enum {@code google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType}
   */
  public enum SpeechEventType implements com.google.protobuf.ProtocolMessageEnum {
    /**
     *
     *
     * <pre>
     * No speech event specified.
     * </pre>
     *
     * <code>SPEECH_EVENT_TYPE_UNSPECIFIED = 0;</code>
     */
    SPEECH_EVENT_TYPE_UNSPECIFIED(0),
    /**
     *
     *
     * <pre>
     * This event indicates that the server has detected the end of the user's
     * speech utterance and expects no additional speech. Therefore, the server
     * will not process additional audio and will close the gRPC bidirectional
     * stream. This event is only sent if there was a force cutoff due to
     * silence being detected early. This event is only available through the
     * `latest_short` [model][google.cloud.speech.v2.Recognizer.model].
     * </pre>
     *
     * <code>END_OF_SINGLE_UTTERANCE = 1;</code>
     */
    END_OF_SINGLE_UTTERANCE(1),
    /**
     *
     *
     * <pre>
     * This event indicates that the server has detected the beginning of human
     * voice activity in the stream. This event can be returned multiple times
     * if speech starts and stops repeatedly throughout the stream. This event
     * is only sent if `voice_activity_events` is set to true.
     * </pre>
     *
     * <code>SPEECH_ACTIVITY_BEGIN = 2;</code>
     */
    SPEECH_ACTIVITY_BEGIN(2),
    /**
     *
     *
     * <pre>
     * This event indicates that the server has detected the end of human voice
     * activity in the stream. This event can be returned multiple times if
     * speech starts and stops repeatedly throughout the stream. This event is
     * only sent if `voice_activity_events` is set to true.
     * </pre>
     *
     * <code>SPEECH_ACTIVITY_END = 3;</code>
     */
    SPEECH_ACTIVITY_END(3),
    UNRECOGNIZED(-1),
    ;

    /**
     *
     *
     * <pre>
     * No speech event specified.
     * </pre>
     *
     * <code>SPEECH_EVENT_TYPE_UNSPECIFIED = 0;</code>
     */
    public static final int SPEECH_EVENT_TYPE_UNSPECIFIED_VALUE = 0;
    /**
     *
     *
     * <pre>
     * This event indicates that the server has detected the end of the user's
     * speech utterance and expects no additional speech. Therefore, the server
     * will not process additional audio and will close the gRPC bidirectional
     * stream. This event is only sent if there was a force cutoff due to
     * silence being detected early. This event is only available through the
     * `latest_short` [model][google.cloud.speech.v2.Recognizer.model].
     * </pre>
     *
     * <code>END_OF_SINGLE_UTTERANCE = 1;</code>
     */
    public static final int END_OF_SINGLE_UTTERANCE_VALUE = 1;
    /**
     *
     *
     * <pre>
     * This event indicates that the server has detected the beginning of human
     * voice activity in the stream. This event can be returned multiple times
     * if speech starts and stops repeatedly throughout the stream. This event
     * is only sent if `voice_activity_events` is set to true.
     * </pre>
     *
     * <code>SPEECH_ACTIVITY_BEGIN = 2;</code>
     */
    public static final int SPEECH_ACTIVITY_BEGIN_VALUE = 2;
    /**
     *
     *
     * <pre>
     * This event indicates that the server has detected the end of human voice
     * activity in the stream. This event can be returned multiple times if
     * speech starts and stops repeatedly throughout the stream. This event is
     * only sent if `voice_activity_events` is set to true.
     * </pre>
     *
     * <code>SPEECH_ACTIVITY_END = 3;</code>
     */
    public static final int SPEECH_ACTIVITY_END_VALUE = 3;

    public final int getNumber() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalArgumentException(
            "Can't get the number of an unknown enum value.");
      }
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static SpeechEventType valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static SpeechEventType forNumber(int value) {
      switch (value) {
        case 0:
          return SPEECH_EVENT_TYPE_UNSPECIFIED;
        case 1:
          return END_OF_SINGLE_UTTERANCE;
        case 2:
          return SPEECH_ACTIVITY_BEGIN;
        case 3:
          return SPEECH_ACTIVITY_END;
        default:
          return null;
      }
    }

    public static com.google.protobuf.Internal.EnumLiteMap<SpeechEventType> internalGetValueMap() {
      return internalValueMap;
    }

    private static final com.google.protobuf.Internal.EnumLiteMap<SpeechEventType>
        internalValueMap =
            new com.google.protobuf.Internal.EnumLiteMap<SpeechEventType>() {
              public SpeechEventType findValueByNumber(int number) {
                return SpeechEventType.forNumber(number);
              }
            };

    public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
      if (this == UNRECOGNIZED) {
        throw new java.lang.IllegalStateException(
            "Can't get the descriptor of an unrecognized enum value.");
      }
      return getDescriptor().getValues().get(ordinal());
    }

    public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
      return getDescriptor();
    }

    public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
      return com.google.cloud.speech.v2.StreamingRecognizeResponse.getDescriptor()
          .getEnumTypes()
          .get(0);
    }

    private static final SpeechEventType[] VALUES = values();

    public static SpeechEventType valueOf(
        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
      }
      if (desc.getIndex() == -1) {
        return UNRECOGNIZED;
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private SpeechEventType(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType)
  }

  public static final int RESULTS_FIELD_NUMBER = 6;

  @SuppressWarnings("serial")
  private java.util.List<com.google.cloud.speech.v2.StreamingRecognitionResult> results_;
  /**
   *
   *
   * <pre>
   * This repeated list contains zero or more results that
   * correspond to consecutive portions of the audio currently being processed.
   * It contains zero or one
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
   * result (the newly settled portion), followed by zero or more
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
   * results (the interim results).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
   */
  @java.lang.Override
  public java.util.List<com.google.cloud.speech.v2.StreamingRecognitionResult> getResultsList() {
    return results_;
  }
  /**
   *
   *
   * <pre>
   * This repeated list contains zero or more results that
   * correspond to consecutive portions of the audio currently being processed.
   * It contains zero or one
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
   * result (the newly settled portion), followed by zero or more
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
   * results (the interim results).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
   */
  @java.lang.Override
  public java.util.List<? extends com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder>
      getResultsOrBuilderList() {
    return results_;
  }
  /**
   *
   *
   * <pre>
   * This repeated list contains zero or more results that
   * correspond to consecutive portions of the audio currently being processed.
   * It contains zero or one
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
   * result (the newly settled portion), followed by zero or more
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
   * results (the interim results).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
   */
  @java.lang.Override
  public int getResultsCount() {
    return results_.size();
  }
  /**
   *
   *
   * <pre>
   * This repeated list contains zero or more results that
   * correspond to consecutive portions of the audio currently being processed.
   * It contains zero or one
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
   * result (the newly settled portion), followed by zero or more
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
   * results (the interim results).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.StreamingRecognitionResult getResults(int index) {
    return results_.get(index);
  }
  /**
   *
   *
   * <pre>
   * This repeated list contains zero or more results that
   * correspond to consecutive portions of the audio currently being processed.
   * It contains zero or one
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
   * result (the newly settled portion), followed by zero or more
   * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
   * results (the interim results).
   * </pre>
   *
   * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder getResultsOrBuilder(
      int index) {
    return results_.get(index);
  }

  public static final int SPEECH_EVENT_TYPE_FIELD_NUMBER = 3;
  private int speechEventType_ = 0;
  /**
   *
   *
   * <pre>
   * Indicates the type of speech event.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
   * </code>
   *
   * @return The enum numeric value on the wire for speechEventType.
   */
  @java.lang.Override
  public int getSpeechEventTypeValue() {
    return speechEventType_;
  }
  /**
   *
   *
   * <pre>
   * Indicates the type of speech event.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
   * </code>
   *
   * @return The speechEventType.
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType
      getSpeechEventType() {
    com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType result =
        com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType.forNumber(
            speechEventType_);
    return result == null
        ? com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType.UNRECOGNIZED
        : result;
  }

  public static final int SPEECH_EVENT_OFFSET_FIELD_NUMBER = 7;
  private com.google.protobuf.Duration speechEventOffset_;
  /**
   *
   *
   * <pre>
   * Time offset between the beginning of the audio and event emission.
   * </pre>
   *
   * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
   *
   * @return Whether the speechEventOffset field is set.
   */
  @java.lang.Override
  public boolean hasSpeechEventOffset() {
    return speechEventOffset_ != null;
  }
  /**
   *
   *
   * <pre>
   * Time offset between the beginning of the audio and event emission.
   * </pre>
   *
   * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
   *
   * @return The speechEventOffset.
   */
  @java.lang.Override
  public com.google.protobuf.Duration getSpeechEventOffset() {
    return speechEventOffset_ == null
        ? com.google.protobuf.Duration.getDefaultInstance()
        : speechEventOffset_;
  }
  /**
   *
   *
   * <pre>
   * Time offset between the beginning of the audio and event emission.
   * </pre>
   *
   * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
   */
  @java.lang.Override
  public com.google.protobuf.DurationOrBuilder getSpeechEventOffsetOrBuilder() {
    return speechEventOffset_ == null
        ? com.google.protobuf.Duration.getDefaultInstance()
        : speechEventOffset_;
  }

  public static final int METADATA_FIELD_NUMBER = 5;
  private com.google.cloud.speech.v2.RecognitionResponseMetadata metadata_;
  /**
   *
   *
   * <pre>
   * Metadata about the recognition.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
   *
   * @return Whether the metadata field is set.
   */
  @java.lang.Override
  public boolean hasMetadata() {
    return metadata_ != null;
  }
  /**
   *
   *
   * <pre>
   * Metadata about the recognition.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
   *
   * @return The metadata.
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.RecognitionResponseMetadata getMetadata() {
    return metadata_ == null
        ? com.google.cloud.speech.v2.RecognitionResponseMetadata.getDefaultInstance()
        : metadata_;
  }
  /**
   *
   *
   * <pre>
   * Metadata about the recognition.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
   */
  @java.lang.Override
  public com.google.cloud.speech.v2.RecognitionResponseMetadataOrBuilder getMetadataOrBuilder() {
    return metadata_ == null
        ? com.google.cloud.speech.v2.RecognitionResponseMetadata.getDefaultInstance()
        : metadata_;
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (speechEventType_
        != com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType
            .SPEECH_EVENT_TYPE_UNSPECIFIED
            .getNumber()) {
      output.writeEnum(3, speechEventType_);
    }
    if (metadata_ != null) {
      output.writeMessage(5, getMetadata());
    }
    for (int i = 0; i < results_.size(); i++) {
      output.writeMessage(6, results_.get(i));
    }
    if (speechEventOffset_ != null) {
      output.writeMessage(7, getSpeechEventOffset());
    }
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (speechEventType_
        != com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType
            .SPEECH_EVENT_TYPE_UNSPECIFIED
            .getNumber()) {
      size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, speechEventType_);
    }
    if (metadata_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getMetadata());
    }
    for (int i = 0; i < results_.size(); i++) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, results_.get(i));
    }
    if (speechEventOffset_ != null) {
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getSpeechEventOffset());
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.speech.v2.StreamingRecognizeResponse)) {
      return super.equals(obj);
    }
    com.google.cloud.speech.v2.StreamingRecognizeResponse other =
        (com.google.cloud.speech.v2.StreamingRecognizeResponse) obj;

    if (!getResultsList().equals(other.getResultsList())) return false;
    if (speechEventType_ != other.speechEventType_) return false;
    if (hasSpeechEventOffset() != other.hasSpeechEventOffset()) return false;
    if (hasSpeechEventOffset()) {
      if (!getSpeechEventOffset().equals(other.getSpeechEventOffset())) return false;
    }
    if (hasMetadata() != other.hasMetadata()) return false;
    if (hasMetadata()) {
      if (!getMetadata().equals(other.getMetadata())) return false;
    }
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    if (getResultsCount() > 0) {
      hash = (37 * hash) + RESULTS_FIELD_NUMBER;
      hash = (53 * hash) + getResultsList().hashCode();
    }
    hash = (37 * hash) + SPEECH_EVENT_TYPE_FIELD_NUMBER;
    hash = (53 * hash) + speechEventType_;
    if (hasSpeechEventOffset()) {
      hash = (37 * hash) + SPEECH_EVENT_OFFSET_FIELD_NUMBER;
      hash = (53 * hash) + getSpeechEventOffset().hashCode();
    }
    if (hasMetadata()) {
      hash = (37 * hash) + METADATA_FIELD_NUMBER;
      hash = (53 * hash) + getMetadata().hashCode();
    }
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseDelimitedFrom(
      java.io.InputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(
      com.google.cloud.speech.v2.StreamingRecognizeResponse prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * `StreamingRecognizeResponse` is the only message returned to the client by
   * `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
   * messages are streamed back to the client. If there is no recognizable
   * audio then no messages are streamed back to the client.
   * Here are some examples of `StreamingRecognizeResponse`s that might
   * be returned while processing audio:
   * 1. results { alternatives { transcript: "tube" } stability: 0.01 }
   * 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
   * 3. results { alternatives { transcript: "to be" } stability: 0.9 }
   *    results { alternatives { transcript: " or not to be" } stability: 0.01 }
   * 4. results { alternatives { transcript: "to be or not to be"
   *                             confidence: 0.92 }
   *              alternatives { transcript: "to bee or not to bee" }
   *              is_final: true }
   * 5. results { alternatives { transcript: " that's" } stability: 0.01 }
   * 6. results { alternatives { transcript: " that is" } stability: 0.9 }
   *    results { alternatives { transcript: " the question" } stability: 0.01 }
   * 7. results { alternatives { transcript: " that is the question"
   *                             confidence: 0.98 }
   *              alternatives { transcript: " that was the question" }
   *              is_final: true }
   * Notes:
   * - Only two of the above responses #4 and #7 contain final results; they are
   *   indicated by `is_final: true`. Concatenating these together generates the
   *   full transcript: "to be or not to be that is the question".
   * - The others contain interim `results`. #3 and #6 contain two interim
   *   `results`: the first portion has a high stability and is less likely to
   *   change; the second portion has a low stability and is very likely to
   *   change. A UI designer might choose to show only high stability `results`.
   * - The specific `stability` and `confidence` values shown above are only for
   *   illustrative purposes. Actual values may vary.
   * - In each response, only one of these fields will be set:
   *     `error`,
   *     `speech_event_type`, or
   *     one or more (repeated) `results`.
   * </pre>
   *
   * Protobuf type {@code google.cloud.speech.v2.StreamingRecognizeResponse}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.speech.v2.StreamingRecognizeResponse)
      com.google.cloud.speech.v2.StreamingRecognizeResponseOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.speech.v2.CloudSpeechProto
          .internal_static_google_cloud_speech_v2_StreamingRecognizeResponse_descriptor;
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.speech.v2.CloudSpeechProto
          .internal_static_google_cloud_speech_v2_StreamingRecognizeResponse_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.speech.v2.StreamingRecognizeResponse.class,
              com.google.cloud.speech.v2.StreamingRecognizeResponse.Builder.class);
    }

    // Construct using com.google.cloud.speech.v2.StreamingRecognizeResponse.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      if (resultsBuilder_ == null) {
        results_ = java.util.Collections.emptyList();
      } else {
        results_ = null;
        resultsBuilder_.clear();
      }
      bitField0_ = (bitField0_ & ~0x00000001);
      speechEventType_ = 0;
      speechEventOffset_ = null;
      if (speechEventOffsetBuilder_ != null) {
        speechEventOffsetBuilder_.dispose();
        speechEventOffsetBuilder_ = null;
      }
      metadata_ = null;
      if (metadataBuilder_ != null) {
        metadataBuilder_.dispose();
        metadataBuilder_ = null;
      }
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.speech.v2.CloudSpeechProto
          .internal_static_google_cloud_speech_v2_StreamingRecognizeResponse_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.speech.v2.StreamingRecognizeResponse getDefaultInstanceForType() {
      return com.google.cloud.speech.v2.StreamingRecognizeResponse.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.speech.v2.StreamingRecognizeResponse build() {
      com.google.cloud.speech.v2.StreamingRecognizeResponse result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.speech.v2.StreamingRecognizeResponse buildPartial() {
      com.google.cloud.speech.v2.StreamingRecognizeResponse result =
          new com.google.cloud.speech.v2.StreamingRecognizeResponse(this);
      buildPartialRepeatedFields(result);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      onBuilt();
      return result;
    }

    private void buildPartialRepeatedFields(
        com.google.cloud.speech.v2.StreamingRecognizeResponse result) {
      if (resultsBuilder_ == null) {
        if (((bitField0_ & 0x00000001) != 0)) {
          results_ = java.util.Collections.unmodifiableList(results_);
          bitField0_ = (bitField0_ & ~0x00000001);
        }
        result.results_ = results_;
      } else {
        result.results_ = resultsBuilder_.build();
      }
    }

    private void buildPartial0(com.google.cloud.speech.v2.StreamingRecognizeResponse result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.speechEventType_ = speechEventType_;
      }
      if (((from_bitField0_ & 0x00000004) != 0)) {
        result.speechEventOffset_ =
            speechEventOffsetBuilder_ == null
                ? speechEventOffset_
                : speechEventOffsetBuilder_.build();
      }
      if (((from_bitField0_ & 0x00000008) != 0)) {
        result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build();
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.speech.v2.StreamingRecognizeResponse) {
        return mergeFrom((com.google.cloud.speech.v2.StreamingRecognizeResponse) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.speech.v2.StreamingRecognizeResponse other) {
      if (other == com.google.cloud.speech.v2.StreamingRecognizeResponse.getDefaultInstance())
        return this;
      if (resultsBuilder_ == null) {
        if (!other.results_.isEmpty()) {
          if (results_.isEmpty()) {
            results_ = other.results_;
            bitField0_ = (bitField0_ & ~0x00000001);
          } else {
            ensureResultsIsMutable();
            results_.addAll(other.results_);
          }
          onChanged();
        }
      } else {
        if (!other.results_.isEmpty()) {
          if (resultsBuilder_.isEmpty()) {
            resultsBuilder_.dispose();
            resultsBuilder_ = null;
            results_ = other.results_;
            bitField0_ = (bitField0_ & ~0x00000001);
            resultsBuilder_ =
                com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
                    ? getResultsFieldBuilder()
                    : null;
          } else {
            resultsBuilder_.addAllMessages(other.results_);
          }
        }
      }
      if (other.speechEventType_ != 0) {
        setSpeechEventTypeValue(other.getSpeechEventTypeValue());
      }
      if (other.hasSpeechEventOffset()) {
        mergeSpeechEventOffset(other.getSpeechEventOffset());
      }
      if (other.hasMetadata()) {
        mergeMetadata(other.getMetadata());
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 24:
              {
                speechEventType_ = input.readEnum();
                bitField0_ |= 0x00000002;
                break;
              } // case 24
            case 42:
              {
                input.readMessage(getMetadataFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 42
            case 50:
              {
                com.google.cloud.speech.v2.StreamingRecognitionResult m =
                    input.readMessage(
                        com.google.cloud.speech.v2.StreamingRecognitionResult.parser(),
                        extensionRegistry);
                if (resultsBuilder_ == null) {
                  ensureResultsIsMutable();
                  results_.add(m);
                } else {
                  resultsBuilder_.addMessage(m);
                }
                break;
              } // case 50
            case 58:
              {
                input.readMessage(
                    getSpeechEventOffsetFieldBuilder().getBuilder(), extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 58
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int bitField0_;

    private java.util.List<com.google.cloud.speech.v2.StreamingRecognitionResult> results_ =
        java.util.Collections.emptyList();

    private void ensureResultsIsMutable() {
      if (!((bitField0_ & 0x00000001) != 0)) {
        results_ =
            new java.util.ArrayList<com.google.cloud.speech.v2.StreamingRecognitionResult>(
                results_);
        bitField0_ |= 0x00000001;
      }
    }

    private com.google.protobuf.RepeatedFieldBuilderV3<
            com.google.cloud.speech.v2.StreamingRecognitionResult,
            com.google.cloud.speech.v2.StreamingRecognitionResult.Builder,
            com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder>
        resultsBuilder_;

    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public java.util.List<com.google.cloud.speech.v2.StreamingRecognitionResult> getResultsList() {
      if (resultsBuilder_ == null) {
        return java.util.Collections.unmodifiableList(results_);
      } else {
        return resultsBuilder_.getMessageList();
      }
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public int getResultsCount() {
      if (resultsBuilder_ == null) {
        return results_.size();
      } else {
        return resultsBuilder_.getCount();
      }
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public com.google.cloud.speech.v2.StreamingRecognitionResult getResults(int index) {
      if (resultsBuilder_ == null) {
        return results_.get(index);
      } else {
        return resultsBuilder_.getMessage(index);
      }
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder setResults(
        int index, com.google.cloud.speech.v2.StreamingRecognitionResult value) {
      if (resultsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureResultsIsMutable();
        results_.set(index, value);
        onChanged();
      } else {
        resultsBuilder_.setMessage(index, value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder setResults(
        int index, com.google.cloud.speech.v2.StreamingRecognitionResult.Builder builderForValue) {
      if (resultsBuilder_ == null) {
        ensureResultsIsMutable();
        results_.set(index, builderForValue.build());
        onChanged();
      } else {
        resultsBuilder_.setMessage(index, builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder addResults(com.google.cloud.speech.v2.StreamingRecognitionResult value) {
      if (resultsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureResultsIsMutable();
        results_.add(value);
        onChanged();
      } else {
        resultsBuilder_.addMessage(value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder addResults(
        int index, com.google.cloud.speech.v2.StreamingRecognitionResult value) {
      if (resultsBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureResultsIsMutable();
        results_.add(index, value);
        onChanged();
      } else {
        resultsBuilder_.addMessage(index, value);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder addResults(
        com.google.cloud.speech.v2.StreamingRecognitionResult.Builder builderForValue) {
      if (resultsBuilder_ == null) {
        ensureResultsIsMutable();
        results_.add(builderForValue.build());
        onChanged();
      } else {
        resultsBuilder_.addMessage(builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder addResults(
        int index, com.google.cloud.speech.v2.StreamingRecognitionResult.Builder builderForValue) {
      if (resultsBuilder_ == null) {
        ensureResultsIsMutable();
        results_.add(index, builderForValue.build());
        onChanged();
      } else {
        resultsBuilder_.addMessage(index, builderForValue.build());
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder addAllResults(
        java.lang.Iterable<? extends com.google.cloud.speech.v2.StreamingRecognitionResult>
            values) {
      if (resultsBuilder_ == null) {
        ensureResultsIsMutable();
        com.google.protobuf.AbstractMessageLite.Builder.addAll(values, results_);
        onChanged();
      } else {
        resultsBuilder_.addAllMessages(values);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder clearResults() {
      if (resultsBuilder_ == null) {
        results_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
      } else {
        resultsBuilder_.clear();
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public Builder removeResults(int index) {
      if (resultsBuilder_ == null) {
        ensureResultsIsMutable();
        results_.remove(index);
        onChanged();
      } else {
        resultsBuilder_.remove(index);
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public com.google.cloud.speech.v2.StreamingRecognitionResult.Builder getResultsBuilder(
        int index) {
      return getResultsFieldBuilder().getBuilder(index);
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder getResultsOrBuilder(
        int index) {
      if (resultsBuilder_ == null) {
        return results_.get(index);
      } else {
        return resultsBuilder_.getMessageOrBuilder(index);
      }
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public java.util.List<? extends com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder>
        getResultsOrBuilderList() {
      if (resultsBuilder_ != null) {
        return resultsBuilder_.getMessageOrBuilderList();
      } else {
        return java.util.Collections.unmodifiableList(results_);
      }
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public com.google.cloud.speech.v2.StreamingRecognitionResult.Builder addResultsBuilder() {
      return getResultsFieldBuilder()
          .addBuilder(com.google.cloud.speech.v2.StreamingRecognitionResult.getDefaultInstance());
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public com.google.cloud.speech.v2.StreamingRecognitionResult.Builder addResultsBuilder(
        int index) {
      return getResultsFieldBuilder()
          .addBuilder(
              index, com.google.cloud.speech.v2.StreamingRecognitionResult.getDefaultInstance());
    }
    /**
     *
     *
     * <pre>
     * This repeated list contains zero or more results that
     * correspond to consecutive portions of the audio currently being processed.
     * It contains zero or one
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`true`
     * result (the newly settled portion), followed by zero or more
     * [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`
     * results (the interim results).
     * </pre>
     *
     * <code>repeated .google.cloud.speech.v2.StreamingRecognitionResult results = 6;</code>
     */
    public java.util.List<com.google.cloud.speech.v2.StreamingRecognitionResult.Builder>
        getResultsBuilderList() {
      return getResultsFieldBuilder().getBuilderList();
    }

    private com.google.protobuf.RepeatedFieldBuilderV3<
            com.google.cloud.speech.v2.StreamingRecognitionResult,
            com.google.cloud.speech.v2.StreamingRecognitionResult.Builder,
            com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder>
        getResultsFieldBuilder() {
      if (resultsBuilder_ == null) {
        resultsBuilder_ =
            new com.google.protobuf.RepeatedFieldBuilderV3<
                com.google.cloud.speech.v2.StreamingRecognitionResult,
                com.google.cloud.speech.v2.StreamingRecognitionResult.Builder,
                com.google.cloud.speech.v2.StreamingRecognitionResultOrBuilder>(
                results_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean());
        results_ = null;
      }
      return resultsBuilder_;
    }

    private int speechEventType_ = 0;
    /**
     *
     *
     * <pre>
     * Indicates the type of speech event.
     * </pre>
     *
     * <code>
     * .google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
     * </code>
     *
     * @return The enum numeric value on the wire for speechEventType.
     */
    @java.lang.Override
    public int getSpeechEventTypeValue() {
      return speechEventType_;
    }
    /**
     *
     *
     * <pre>
     * Indicates the type of speech event.
     * </pre>
     *
     * <code>
     * .google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
     * </code>
     *
     * @param value The enum numeric value on the wire for speechEventType to set.
     * @return This builder for chaining.
     */
    public Builder setSpeechEventTypeValue(int value) {
      speechEventType_ = value;
      bitField0_ |= 0x00000002;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Indicates the type of speech event.
     * </pre>
     *
     * <code>
     * .google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
     * </code>
     *
     * @return The speechEventType.
     */
    @java.lang.Override
    public com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType
        getSpeechEventType() {
      com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType result =
          com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType.forNumber(
              speechEventType_);
      return result == null
          ? com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType.UNRECOGNIZED
          : result;
    }
    /**
     *
     *
     * <pre>
     * Indicates the type of speech event.
     * </pre>
     *
     * <code>
     * .google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
     * </code>
     *
     * @param value The speechEventType to set.
     * @return This builder for chaining.
     */
    public Builder setSpeechEventType(
        com.google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType value) {
      if (value == null) {
        throw new NullPointerException();
      }
      bitField0_ |= 0x00000002;
      speechEventType_ = value.getNumber();
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Indicates the type of speech event.
     * </pre>
     *
     * <code>
     * .google.cloud.speech.v2.StreamingRecognizeResponse.SpeechEventType speech_event_type = 3;
     * </code>
     *
     * @return This builder for chaining.
     */
    public Builder clearSpeechEventType() {
      bitField0_ = (bitField0_ & ~0x00000002);
      speechEventType_ = 0;
      onChanged();
      return this;
    }

    private com.google.protobuf.Duration speechEventOffset_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.Duration,
            com.google.protobuf.Duration.Builder,
            com.google.protobuf.DurationOrBuilder>
        speechEventOffsetBuilder_;
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     *
     * @return Whether the speechEventOffset field is set.
     */
    public boolean hasSpeechEventOffset() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     *
     * @return The speechEventOffset.
     */
    public com.google.protobuf.Duration getSpeechEventOffset() {
      if (speechEventOffsetBuilder_ == null) {
        return speechEventOffset_ == null
            ? com.google.protobuf.Duration.getDefaultInstance()
            : speechEventOffset_;
      } else {
        return speechEventOffsetBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    public Builder setSpeechEventOffset(com.google.protobuf.Duration value) {
      if (speechEventOffsetBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        speechEventOffset_ = value;
      } else {
        speechEventOffsetBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    public Builder setSpeechEventOffset(com.google.protobuf.Duration.Builder builderForValue) {
      if (speechEventOffsetBuilder_ == null) {
        speechEventOffset_ = builderForValue.build();
      } else {
        speechEventOffsetBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    public Builder mergeSpeechEventOffset(com.google.protobuf.Duration value) {
      if (speechEventOffsetBuilder_ == null) {
        if (((bitField0_ & 0x00000004) != 0)
            && speechEventOffset_ != null
            && speechEventOffset_ != com.google.protobuf.Duration.getDefaultInstance()) {
          getSpeechEventOffsetBuilder().mergeFrom(value);
        } else {
          speechEventOffset_ = value;
        }
      } else {
        speechEventOffsetBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00000004;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    public Builder clearSpeechEventOffset() {
      bitField0_ = (bitField0_ & ~0x00000004);
      speechEventOffset_ = null;
      if (speechEventOffsetBuilder_ != null) {
        speechEventOffsetBuilder_.dispose();
        speechEventOffsetBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    public com.google.protobuf.Duration.Builder getSpeechEventOffsetBuilder() {
      bitField0_ |= 0x00000004;
      onChanged();
      return getSpeechEventOffsetFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    public com.google.protobuf.DurationOrBuilder getSpeechEventOffsetOrBuilder() {
      if (speechEventOffsetBuilder_ != null) {
        return speechEventOffsetBuilder_.getMessageOrBuilder();
      } else {
        return speechEventOffset_ == null
            ? com.google.protobuf.Duration.getDefaultInstance()
            : speechEventOffset_;
      }
    }
    /**
     *
     *
     * <pre>
     * Time offset between the beginning of the audio and event emission.
     * </pre>
     *
     * <code>.google.protobuf.Duration speech_event_offset = 7;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.protobuf.Duration,
            com.google.protobuf.Duration.Builder,
            com.google.protobuf.DurationOrBuilder>
        getSpeechEventOffsetFieldBuilder() {
      if (speechEventOffsetBuilder_ == null) {
        speechEventOffsetBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.protobuf.Duration,
                com.google.protobuf.Duration.Builder,
                com.google.protobuf.DurationOrBuilder>(
                getSpeechEventOffset(), getParentForChildren(), isClean());
        speechEventOffset_ = null;
      }
      return speechEventOffsetBuilder_;
    }

    private com.google.cloud.speech.v2.RecognitionResponseMetadata metadata_;
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v2.RecognitionResponseMetadata,
            com.google.cloud.speech.v2.RecognitionResponseMetadata.Builder,
            com.google.cloud.speech.v2.RecognitionResponseMetadataOrBuilder>
        metadataBuilder_;
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     *
     * @return Whether the metadata field is set.
     */
    public boolean hasMetadata() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     *
     * @return The metadata.
     */
    public com.google.cloud.speech.v2.RecognitionResponseMetadata getMetadata() {
      if (metadataBuilder_ == null) {
        return metadata_ == null
            ? com.google.cloud.speech.v2.RecognitionResponseMetadata.getDefaultInstance()
            : metadata_;
      } else {
        return metadataBuilder_.getMessage();
      }
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    public Builder setMetadata(com.google.cloud.speech.v2.RecognitionResponseMetadata value) {
      if (metadataBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        metadata_ = value;
      } else {
        metadataBuilder_.setMessage(value);
      }
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    public Builder setMetadata(
        com.google.cloud.speech.v2.RecognitionResponseMetadata.Builder builderForValue) {
      if (metadataBuilder_ == null) {
        metadata_ = builderForValue.build();
      } else {
        metadataBuilder_.setMessage(builderForValue.build());
      }
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    public Builder mergeMetadata(com.google.cloud.speech.v2.RecognitionResponseMetadata value) {
      if (metadataBuilder_ == null) {
        if (((bitField0_ & 0x00000008) != 0)
            && metadata_ != null
            && metadata_
                != com.google.cloud.speech.v2.RecognitionResponseMetadata.getDefaultInstance()) {
          getMetadataBuilder().mergeFrom(value);
        } else {
          metadata_ = value;
        }
      } else {
        metadataBuilder_.mergeFrom(value);
      }
      bitField0_ |= 0x00000008;
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    public Builder clearMetadata() {
      bitField0_ = (bitField0_ & ~0x00000008);
      metadata_ = null;
      if (metadataBuilder_ != null) {
        metadataBuilder_.dispose();
        metadataBuilder_ = null;
      }
      onChanged();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    public com.google.cloud.speech.v2.RecognitionResponseMetadata.Builder getMetadataBuilder() {
      bitField0_ |= 0x00000008;
      onChanged();
      return getMetadataFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    public com.google.cloud.speech.v2.RecognitionResponseMetadataOrBuilder getMetadataOrBuilder() {
      if (metadataBuilder_ != null) {
        return metadataBuilder_.getMessageOrBuilder();
      } else {
        return metadata_ == null
            ? com.google.cloud.speech.v2.RecognitionResponseMetadata.getDefaultInstance()
            : metadata_;
      }
    }
    /**
     *
     *
     * <pre>
     * Metadata about the recognition.
     * </pre>
     *
     * <code>.google.cloud.speech.v2.RecognitionResponseMetadata metadata = 5;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.speech.v2.RecognitionResponseMetadata,
            com.google.cloud.speech.v2.RecognitionResponseMetadata.Builder,
            com.google.cloud.speech.v2.RecognitionResponseMetadataOrBuilder>
        getMetadataFieldBuilder() {
      if (metadataBuilder_ == null) {
        metadataBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.speech.v2.RecognitionResponseMetadata,
                com.google.cloud.speech.v2.RecognitionResponseMetadata.Builder,
                com.google.cloud.speech.v2.RecognitionResponseMetadataOrBuilder>(
                getMetadata(), getParentForChildren(), isClean());
        metadata_ = null;
      }
      return metadataBuilder_;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.speech.v2.StreamingRecognizeResponse)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.speech.v2.StreamingRecognizeResponse)
  private static final com.google.cloud.speech.v2.StreamingRecognizeResponse DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.speech.v2.StreamingRecognizeResponse();
  }

  public static com.google.cloud.speech.v2.StreamingRecognizeResponse getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<StreamingRecognizeResponse> PARSER =
      new com.google.protobuf.AbstractParser<StreamingRecognizeResponse>() {
        @java.lang.Override
        public StreamingRecognizeResponse parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<StreamingRecognizeResponse> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<StreamingRecognizeResponse> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.speech.v2.StreamingRecognizeResponse getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
