/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v2/cloud_speech.proto

package com.google.cloud.speech.v2;

public interface RecognitionFeaturesOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.speech.v2.RecognitionFeatures)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * <pre>
   * If set to `true`, the server will attempt to filter out profanities,
   * replacing all but the initial character in each filtered word with
   * asterisks, for instance, "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * </pre>
   *
   * <code>bool profanity_filter = 1;</code>
   *
   * @return The profanityFilter.
   */
  boolean getProfanityFilter();

  /**
   *
   *
   * <pre>
   * If `true`, the top result includes a list of words and the start and end
   * time offsets (timestamps) for those words. If `false`, no word-level time
   * offset information is returned. The default is `false`.
   * </pre>
   *
   * <code>bool enable_word_time_offsets = 2;</code>
   *
   * @return The enableWordTimeOffsets.
   */
  boolean getEnableWordTimeOffsets();

  /**
   *
   *
   * <pre>
   * If `true`, the top result includes a list of words and the confidence for
   * those words. If `false`, no word-level confidence information is returned.
   * The default is `false`.
   * </pre>
   *
   * <code>bool enable_word_confidence = 3;</code>
   *
   * @return The enableWordConfidence.
   */
  boolean getEnableWordConfidence();

  /**
   *
   *
   * <pre>
   * If `true`, adds punctuation to recognition result hypotheses. This feature
   * is only available in select languages. The default `false` value does not
   * add punctuation to result hypotheses.
   * </pre>
   *
   * <code>bool enable_automatic_punctuation = 4;</code>
   *
   * @return The enableAutomaticPunctuation.
   */
  boolean getEnableAutomaticPunctuation();

  /**
   *
   *
   * <pre>
   * The spoken punctuation behavior for the call. If `true`, replaces spoken
   * punctuation with the corresponding symbols in the request. For example,
   * "how are you question mark" becomes "how are you?". See
   * https://cloud.google.com/speech-to-text/docs/spoken-punctuation for
   * support. If `false`, spoken punctuation is not replaced.
   * </pre>
   *
   * <code>bool enable_spoken_punctuation = 14;</code>
   *
   * @return The enableSpokenPunctuation.
   */
  boolean getEnableSpokenPunctuation();

  /**
   *
   *
   * <pre>
   * The spoken emoji behavior for the call. If `true`, adds spoken emoji
   * formatting for the request. This will replace spoken emojis with the
   * corresponding Unicode symbols in the final transcript. If `false`, spoken
   * emojis are not replaced.
   * </pre>
   *
   * <code>bool enable_spoken_emojis = 15;</code>
   *
   * @return The enableSpokenEmojis.
   */
  boolean getEnableSpokenEmojis();

  /**
   *
   *
   * <pre>
   * Mode for recognizing multi-channel audio.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
   * </code>
   *
   * @return The enum numeric value on the wire for multiChannelMode.
   */
  int getMultiChannelModeValue();
  /**
   *
   *
   * <pre>
   * Mode for recognizing multi-channel audio.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode multi_channel_mode = 17;
   * </code>
   *
   * @return The multiChannelMode.
   */
  com.google.cloud.speech.v2.RecognitionFeatures.MultiChannelMode getMultiChannelMode();

  /**
   *
   *
   * <pre>
   * Configuration to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
   *
   * @return Whether the diarizationConfig field is set.
   */
  boolean hasDiarizationConfig();
  /**
   *
   *
   * <pre>
   * Configuration to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
   *
   * @return The diarizationConfig.
   */
  com.google.cloud.speech.v2.SpeakerDiarizationConfig getDiarizationConfig();
  /**
   *
   *
   * <pre>
   * Configuration to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * </pre>
   *
   * <code>.google.cloud.speech.v2.SpeakerDiarizationConfig diarization_config = 9;</code>
   */
  com.google.cloud.speech.v2.SpeakerDiarizationConfigOrBuilder getDiarizationConfigOrBuilder();

  /**
   *
   *
   * <pre>
   * Maximum number of recognition hypotheses to be returned.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * </pre>
   *
   * <code>int32 max_alternatives = 16;</code>
   *
   * @return The maxAlternatives.
   */
  int getMaxAlternatives();
}
