/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/automl/v1/io.proto

package com.google.cloud.automl.v1;

/**
 *
 *
 * <pre>
 * Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
 * The format of input depends on dataset_metadata the Dataset into which
 * the import is happening has. As input source the
 * [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
 * is expected, unless specified otherwise. Additionally any input .CSV file
 * by itself must be 100MB or smaller, unless specified otherwise.
 * If an "example" file (that is, image, video etc.) with identical content
 * (even if it had different `GCS_FILE_PATH`) is mentioned multiple times, then
 * its label, bounding boxes etc. are appended. The same file should be always
 * provided with the same `ML_USE` and `GCS_FILE_PATH`, if it is not, then
 * these values are nondeterministically selected from the given ones.
 * The formats are represented in EBNF with commas being literal and with
 * non-terminal symbols defined near the end of this comment. The formats are:
 * &lt;h4&gt;AutoML Vision&lt;/h4&gt;
 * &lt;div class="ds-selector-tabs"&gt;&lt;section&gt;&lt;h5&gt;Classification&lt;/h5&gt;
 * See [Preparing your training
 * data](https://cloud.google.com/vision/automl/docs/prepare) for more
 * information.
 * CSV file(s) with each line in format:
 *     ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
 * *   `ML_USE` - Identifies the data set that the current row (file) applies
 * to.
 *     This value can be one of the following:
 *     * `TRAIN` - Rows in this file are used to train the model.
 *     * `TEST` - Rows in this file are used to test the model during training.
 *     * `UNASSIGNED` - Rows in this file are not categorized. They are
 *        Automatically divided into train and test data. 80% for training and
 *        20% for testing.
 * *   `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
 *      30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP,
 *      .TIFF, .ICO.
 * *   `LABEL` - A label that identifies the object in the image.
 * For the `MULTICLASS` classification type, at most one `LABEL` is allowed
 * per image. If an image has not yet been labeled, then it should be
 * mentioned just once with no `LABEL`.
 * Some sample rows:
 *     TRAIN,gs://folder/image1.jpg,daisy
 *     TEST,gs://folder/image2.jpg,dandelion,tulip,rose
 *     UNASSIGNED,gs://folder/image3.jpg,daisy
 *     UNASSIGNED,gs://folder/image4.jpg
 * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Object Detection&lt;/h5&gt;
 * See [Preparing your training
 * data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
 * for more information.
 * A CSV file(s) with each line in format:
 *     ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
 * *   `ML_USE` - Identifies the data set that the current row (file) applies
 * to.
 *     This value can be one of the following:
 *     * `TRAIN` - Rows in this file are used to train the model.
 *     * `TEST` - Rows in this file are used to test the model during training.
 *     * `UNASSIGNED` - Rows in this file are not categorized. They are
 *        Automatically divided into train and test data. 80% for training and
 *        20% for testing.
 * *  `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
 *     30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image
 *     is assumed to be exhaustively labeled.
 * *  `LABEL` - A label that identifies the object in the image specified by the
 *    `BOUNDING_BOX`.
 * *  `BOUNDING BOX` - The vertices of an object in the example image.
 *    The minimum allowed `BOUNDING_BOX` edge length is 0.01, and no more than
 *    500 `BOUNDING_BOX` instances per image are allowed (one `BOUNDING_BOX`
 *    per line). If an image has no looked for objects then it should be
 *    mentioned just once with no LABEL and the ",,,,,,," in place of the
 *   `BOUNDING_BOX`.
 * **Four sample rows:**
 *     TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
 *     TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
 *     UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
 *     TEST,gs://folder/im3.png,,,,,,,,,
 *   &lt;/section&gt;
 * &lt;/div&gt;
 * &lt;h4&gt;AutoML Video Intelligence&lt;/h4&gt;
 * &lt;div class="ds-selector-tabs"&gt;&lt;section&gt;&lt;h5&gt;Classification&lt;/h5&gt;
 * See [Preparing your training
 * data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
 * more information.
 * CSV file(s) with each line in format:
 *     ML_USE,GCS_FILE_PATH
 * For `ML_USE`, do not use `VALIDATE`.
 * `GCS_FILE_PATH` is the path to another .csv file that describes training
 * example for a given `ML_USE`, using the following row format:
 *     GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
 * Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
 * to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
 * `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
 * length of the video, and the end time must be after the start time. Any
 * segment of a video which has one or more labels on it, is considered a
 * hard negative for all other labels. Any segment with no labels on
 * it is considered to be unknown. If a whole video is unknown, then
 * it should be mentioned just once with ",," in place of `LABEL,
 * TIME_SEGMENT_START,TIME_SEGMENT_END`.
 * Sample top level CSV file:
 *     TRAIN,gs://folder/train_videos.csv
 *     TEST,gs://folder/test_videos.csv
 *     UNASSIGNED,gs://folder/other_videos.csv
 * Sample rows of a CSV file for a particular ML_USE:
 *     gs://folder/video1.avi,car,120,180.000021
 *     gs://folder/video1.avi,bike,150,180.000021
 *     gs://folder/vid2.avi,car,0,60.5
 *     gs://folder/vid3.avi,,,
 * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Object Tracking&lt;/h5&gt;
 * See [Preparing your training
 * data](/video-intelligence/automl/object-tracking/docs/prepare) for more
 * information.
 * CSV file(s) with each line in format:
 *     ML_USE,GCS_FILE_PATH
 * For `ML_USE`, do not use `VALIDATE`.
 * `GCS_FILE_PATH` is the path to another .csv file that describes training
 * example for a given `ML_USE`, using the following row format:
 *     GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
 * or
 *     GCS_FILE_PATH,,,,,,,,,,
 * Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
 * to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
 * Providing `INSTANCE_ID`s can help to obtain a better model. When
 * a specific labeled entity leaves the video frame, and shows up
 * afterwards it is not required, albeit preferable, that the same
 * `INSTANCE_ID` is given to it.
 * `TIMESTAMP` must be within the length of the video, the
 * `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
 * to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
 * to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
 * frame are allowed. If a whole video is unknown, then it should be
 * mentioned just once with ",,,,,,,,,," in place of `LABEL,
 * [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
 * Sample top level CSV file:
 *      TRAIN,gs://folder/train_videos.csv
 *      TEST,gs://folder/test_videos.csv
 *      UNASSIGNED,gs://folder/other_videos.csv
 * Seven sample rows of a CSV file for a particular ML_USE:
 *      gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
 *      gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
 *      gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
 *      gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
 *      gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
 *      gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
 *      gs://folder/video2.avi,,,,,,,,,,,
 *   &lt;/section&gt;
 * &lt;/div&gt;
 * &lt;h4&gt;AutoML Natural Language&lt;/h4&gt;
 * &lt;div class="ds-selector-tabs"&gt;&lt;section&gt;&lt;h5&gt;Entity Extraction&lt;/h5&gt;
 * See [Preparing your training
 * data](/natural-language/automl/entity-analysis/docs/prepare) for more
 * information.
 * One or more CSV file(s) with each line in the following format:
 *     ML_USE,GCS_FILE_PATH
 * *   `ML_USE` - Identifies the data set that the current row (file) applies
 * to.
 *     This value can be one of the following:
 *     * `TRAIN` - Rows in this file are used to train the model.
 *     * `TEST` - Rows in this file are used to test the model during training.
 *     * `UNASSIGNED` - Rows in this file are not categorized. They are
 *        Automatically divided into train and test data. 80% for training and
 *        20% for testing..
 * *   `GCS_FILE_PATH` - a Identifies JSON Lines (.JSONL) file stored in
 *      Google Cloud Storage that contains in-line text in-line as documents
 *      for model training.
 * After the training data set has been determined from the `TRAIN` and
 * `UNASSIGNED` CSV files, the training data is divided into train and
 * validation data sets. 70% for training and 30% for validation.
 * For example:
 *     TRAIN,gs://folder/file1.jsonl
 *     VALIDATE,gs://folder/file2.jsonl
 *     TEST,gs://folder/file3.jsonl
 * **In-line JSONL files**
 * In-line .JSONL files contain, per line, a JSON document that wraps a
 * [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by
 * one or more [`annotations`][google.cloud.automl.v1.AnnotationPayload]
 * fields, which have `display_name` and `text_extraction` fields to describe
 * the entity from the text snippet. Multiple JSON documents can be separated
 * using line breaks (&#92;n).
 * The supplied text must be annotated exhaustively. For example, if you
 * include the text "horse", but do not label it as "animal",
 * then "horse" is assumed to not be an "animal".
 * Any given text snippet content must have 30,000 characters or
 * less, and also be UTF-8 NFC encoded. ASCII is accepted as it is
 * UTF-8 NFC encoded.
 * For example:
 *     {
 *       "text_snippet": {
 *         "content": "dog car cat"
 *       },
 *       "annotations": [
 *          {
 *            "display_name": "animal",
 *            "text_extraction": {
 *              "text_segment": {"start_offset": 0, "end_offset": 2}
 *           }
 *          },
 *          {
 *           "display_name": "vehicle",
 *            "text_extraction": {
 *              "text_segment": {"start_offset": 4, "end_offset": 6}
 *            }
 *          },
 *          {
 *            "display_name": "animal",
 *            "text_extraction": {
 *              "text_segment": {"start_offset": 8, "end_offset": 10}
 *            }
 *          }
 *      ]
 *     }&#92;n
 *     {
 *        "text_snippet": {
 *          "content": "This dog is good."
 *        },
 *        "annotations": [
 *           {
 *             "display_name": "animal",
 *             "text_extraction": {
 *               "text_segment": {"start_offset": 5, "end_offset": 7}
 *             }
 *           }
 *        ]
 *     }
 * **JSONL files that reference documents**
 * .JSONL files contain, per line, a JSON document that wraps a
 * `input_config` that contains the path to a source document.
 * Multiple JSON documents can be separated using line breaks (&#92;n).
 * Supported document extensions: .PDF, .TIF, .TIFF
 * For example:
 *     {
 *       "document": {
 *         "input_config": {
 *           "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
 *           }
 *         }
 *       }
 *     }&#92;n
 *     {
 *       "document": {
 *         "input_config": {
 *           "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
 *           }
 *         }
 *       }
 *     }
 * **In-line JSONL files with document layout information**
 * **Note:** You can only annotate documents using the UI. The format described
 * below applies to annotated documents exported using the UI or `exportData`.
 * In-line .JSONL files for documents contain, per line, a JSON document
 * that wraps a `document` field that provides the textual content of the
 * document and the layout information.
 * For example:
 *     {
 *       "document": {
 *               "document_text": {
 *                 "content": "dog car cat"
 *               }
 *               "layout": [
 *                 {
 *                   "text_segment": {
 *                     "start_offset": 0,
 *                     "end_offset": 11,
 *                    },
 *                    "page_number": 1,
 *                    "bounding_poly": {
 *                       "normalized_vertices": [
 *                         {"x": 0.1, "y": 0.1},
 *                         {"x": 0.1, "y": 0.3},
 *                         {"x": 0.3, "y": 0.3},
 *                         {"x": 0.3, "y": 0.1},
 *                       ],
 *                     },
 *                     "text_segment_type": TOKEN,
 *                 }
 *               ],
 *               "document_dimensions": {
 *                 "width": 8.27,
 *                 "height": 11.69,
 *                 "unit": INCH,
 *               }
 *               "page_count": 3,
 *             },
 *             "annotations": [
 *               {
 *                 "display_name": "animal",
 *                 "text_extraction": {
 *                   "text_segment": {"start_offset": 0, "end_offset": 3}
 *                 }
 *               },
 *               {
 *                 "display_name": "vehicle",
 *                 "text_extraction": {
 *                   "text_segment": {"start_offset": 4, "end_offset": 7}
 *                 }
 *               },
 *               {
 *                 "display_name": "animal",
 *                 "text_extraction": {
 *                   "text_segment": {"start_offset": 8, "end_offset": 11}
 *                 }
 *               },
 *             ],
 * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Classification&lt;/h5&gt;
 * See [Preparing your training
 * data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
 * information.
 * One or more CSV file(s) with each line in the following format:
 *     ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
 * *   `ML_USE` - Identifies the data set that the current row (file) applies
 * to.
 *     This value can be one of the following:
 *     * `TRAIN` - Rows in this file are used to train the model.
 *     * `TEST` - Rows in this file are used to test the model during training.
 *     * `UNASSIGNED` - Rows in this file are not categorized. They are
 *        Automatically divided into train and test data. 80% for training and
 *        20% for testing.
 * *   `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
 *     the column content is a valid Google Cloud Storage file path, that is,
 *     prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
 *     the content is enclosed in double quotes (""), it is treated as a
 *     `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
 *     file with supported extension and UTF-8 encoding, for example,
 *     "gs://folder/content.txt" AutoML imports the file content
 *     as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
 *     excluding quotes. In both cases, size of the content must be 10MB or
 *     less in size. For zip files, the size of each file inside the zip must be
 *     10MB or less in size.
 *     For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
 *     The `ML_USE` and `LABEL` columns are optional.
 *     Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
 * A maximum of 100 unique labels are allowed per CSV row.
 * Sample rows:
 *     TRAIN,"They have bad food and very rude",RudeService,BadFood
 *     gs://folder/content.txt,SlowService
 *     TEST,gs://folder/document.pdf
 *     VALIDATE,gs://folder/text_files.zip,BadFood
 * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Sentiment Analysis&lt;/h5&gt;
 * See [Preparing your training
 * data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
 * information.
 * CSV file(s) with each line in format:
 *     ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
 * *   `ML_USE` - Identifies the data set that the current row (file) applies
 * to.
 *     This value can be one of the following:
 *     * `TRAIN` - Rows in this file are used to train the model.
 *     * `TEST` - Rows in this file are used to test the model during training.
 *     * `UNASSIGNED` - Rows in this file are not categorized. They are
 *        Automatically divided into train and test data. 80% for training and
 *        20% for testing.
 * *   `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
 *     the column content is a valid  Google Cloud Storage file path, that is,
 *     prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
 *     the content is enclosed in double quotes (""), it is treated as a
 *     `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
 *     file with supported extension and UTF-8 encoding, for example,
 *     "gs://folder/content.txt" AutoML imports the file content
 *     as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
 *     excluding quotes. In both cases, size of the content must be 128kB or
 *     less in size. For zip files, the size of each file inside the zip must be
 *     128kB or less in size.
 *     The `ML_USE` and `SENTIMENT` columns are optional.
 *     Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
 * *  `SENTIMENT` - An integer between 0 and
 *     Dataset.text_sentiment_dataset_metadata.sentiment_max
 *     (inclusive). Describes the ordinal of the sentiment - higher
 *     value means a more positive sentiment. All the values are
 *     completely relative, i.e. neither 0 needs to mean a negative or
 *     neutral sentiment nor sentiment_max needs to mean a positive one -
 *     it is just required that 0 is the least positive sentiment
 *     in the data, and sentiment_max is the  most positive one.
 *     The SENTIMENT shouldn't be confused with "score" or "magnitude"
 *     from the previous Natural Language Sentiment Analysis API.
 *     All SENTIMENT values between 0 and sentiment_max must be
 *     represented in the imported data. On prediction the same 0 to
 *     sentiment_max range will be used. The difference between
 *     neighboring sentiment values needs not to be uniform, e.g. 1 and
 *     2 may be similar whereas the difference between 2 and 3 may be
 *     large.
 * Sample rows:
 *     TRAIN,"&#64;freewrytin this is way too good for your product",2
 *     gs://folder/content.txt,3
 *     TEST,gs://folder/document.pdf
 *     VALIDATE,gs://folder/text_files.zip,2
 *   &lt;/section&gt;
 * &lt;/div&gt;
 * &lt;h4&gt;AutoML Tables&lt;/h4&gt;&lt;div class="ui-datasection-main"&gt;&lt;section
 * class="selected"&gt;
 * See [Preparing your training
 * data](https://cloud.google.com/automl-tables/docs/prepare) for more
 * information.
 * You can use either
 * [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
 * [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
 * All input is concatenated into a
 * single
 * [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
 * **For gcs_source:**
 * CSV file(s), where the first row of the first file is the header,
 * containing unique column names. If the first row of a subsequent
 * file is the same as the header, then it is also treated as a
 * header. All other rows contain values for the corresponding
 * columns.
 * Each .CSV file by itself must be 10GB or smaller, and their total
 * size must be 100GB or smaller.
 * First three sample rows of a CSV file:
 * &lt;pre&gt;
 * "Id","First Name","Last Name","Dob","Addresses"
 * "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
 * "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
 * &lt;/pre&gt;
 * **For bigquery_source:**
 * An URI of a BigQuery table. The user data size of the BigQuery
 * table must be 100GB or smaller.
 * An imported table must have between 2 and 1,000 columns, inclusive,
 * and between 1000 and 100,000,000 rows, inclusive. There are at most 5
 * import data running in parallel.
 *   &lt;/section&gt;
 * &lt;/div&gt;
 * **Input field definitions:**
 * `ML_USE`
 * : ("TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED")
 *   Describes how the given example (file) should be used for model
 *   training. "UNASSIGNED" can be used when user has no preference.
 * `GCS_FILE_PATH`
 * : The path to a file on Google Cloud Storage. For example,
 *   "gs://folder/image1.png".
 * `LABEL`
 * : A display name of an object on an image, video etc., e.g. "dog".
 *   Must be up to 32 characters long and can consist only of ASCII
 *   Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
 *   For each label an AnnotationSpec is created which display_name
 *   becomes the label; AnnotationSpecs are given back in predictions.
 * `INSTANCE_ID`
 * : A positive integer that identifies a specific instance of a
 *   labeled entity on an example. Used e.g. to track two cars on
 *   a video while being able to tell apart which one is which.
 * `BOUNDING_BOX`
 * : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
 *   A rectangle parallel to the frame of the example (image,
 *   video). If 4 vertices are given they are connected by edges
 *   in the order provided, if 2 are given they are recognized
 *   as diagonally opposite vertices of the rectangle.
 * `VERTEX`
 * : (`COORDINATE,COORDINATE`)
 *   First coordinate is horizontal (x), the second is vertical (y).
 * `COORDINATE`
 * : A float in 0 to 1 range, relative to total length of
 *   image or video in given dimension. For fractions the
 *   leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
 *   Point 0,0 is in top left.
 * `TIME_SEGMENT_START`
 * : (`TIME_OFFSET`)
 *   Expresses a beginning, inclusive, of a time segment
 *   within an example that has a time dimension
 *   (e.g. video).
 * `TIME_SEGMENT_END`
 * : (`TIME_OFFSET`)
 *   Expresses an end, exclusive, of a time segment within
 *   n example that has a time dimension (e.g. video).
 * `TIME_OFFSET`
 * : A number of seconds as measured from the start of an
 *   example (e.g. video). Fractions are allowed, up to a
 *   microsecond precision. "inf" is allowed, and it means the end
 *   of the example.
 * `TEXT_SNIPPET`
 * : The content of a text snippet, UTF-8 encoded, enclosed within
 *   double quotes ("").
 * `DOCUMENT`
 * : A field that provides the textual content with document and the layout
 *   information.
 *  **Errors:**
 *  If any of the provided CSV files can't be parsed or if more than certain
 *  percent of CSV rows cannot be processed then the operation fails and
 *  nothing is imported. Regardless of overall success or failure the per-row
 *  failures, up to a certain count cap, is listed in
 *  Operation.metadata.partial_failures.
 * </pre>
 *
 * Protobuf type {@code google.cloud.automl.v1.InputConfig}
 */
public final class InputConfig extends com.google.protobuf.GeneratedMessageV3
    implements
    // @@protoc_insertion_point(message_implements:google.cloud.automl.v1.InputConfig)
    InputConfigOrBuilder {
  private static final long serialVersionUID = 0L;
  // Use InputConfig.newBuilder() to construct.
  private InputConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
    super(builder);
  }

  private InputConfig() {}

  @java.lang.Override
  @SuppressWarnings({"unused"})
  protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
    return new InputConfig();
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
    return this.unknownFields;
  }

  public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    return com.google.cloud.automl.v1.Io
        .internal_static_google_cloud_automl_v1_InputConfig_descriptor;
  }

  @SuppressWarnings({"rawtypes"})
  @java.lang.Override
  protected com.google.protobuf.MapField internalGetMapField(int number) {
    switch (number) {
      case 2:
        return internalGetParams();
      default:
        throw new RuntimeException("Invalid map field number: " + number);
    }
  }

  @java.lang.Override
  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return com.google.cloud.automl.v1.Io
        .internal_static_google_cloud_automl_v1_InputConfig_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            com.google.cloud.automl.v1.InputConfig.class,
            com.google.cloud.automl.v1.InputConfig.Builder.class);
  }

  private int sourceCase_ = 0;
  private java.lang.Object source_;

  public enum SourceCase
      implements
          com.google.protobuf.Internal.EnumLite,
          com.google.protobuf.AbstractMessage.InternalOneOfEnum {
    GCS_SOURCE(1),
    SOURCE_NOT_SET(0);
    private final int value;

    private SourceCase(int value) {
      this.value = value;
    }
    /**
     * @param value The number of the enum to look for.
     * @return The enum associated with the given number.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static SourceCase valueOf(int value) {
      return forNumber(value);
    }

    public static SourceCase forNumber(int value) {
      switch (value) {
        case 1:
          return GCS_SOURCE;
        case 0:
          return SOURCE_NOT_SET;
        default:
          return null;
      }
    }

    public int getNumber() {
      return this.value;
    }
  };

  public SourceCase getSourceCase() {
    return SourceCase.forNumber(sourceCase_);
  }

  public static final int GCS_SOURCE_FIELD_NUMBER = 1;
  /**
   *
   *
   * <pre>
   * The Google Cloud Storage location for the input content.
   * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
   * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
   * </pre>
   *
   * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
   *
   * @return Whether the gcsSource field is set.
   */
  @java.lang.Override
  public boolean hasGcsSource() {
    return sourceCase_ == 1;
  }
  /**
   *
   *
   * <pre>
   * The Google Cloud Storage location for the input content.
   * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
   * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
   * </pre>
   *
   * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
   *
   * @return The gcsSource.
   */
  @java.lang.Override
  public com.google.cloud.automl.v1.GcsSource getGcsSource() {
    if (sourceCase_ == 1) {
      return (com.google.cloud.automl.v1.GcsSource) source_;
    }
    return com.google.cloud.automl.v1.GcsSource.getDefaultInstance();
  }
  /**
   *
   *
   * <pre>
   * The Google Cloud Storage location for the input content.
   * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
   * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
   * </pre>
   *
   * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
   */
  @java.lang.Override
  public com.google.cloud.automl.v1.GcsSourceOrBuilder getGcsSourceOrBuilder() {
    if (sourceCase_ == 1) {
      return (com.google.cloud.automl.v1.GcsSource) source_;
    }
    return com.google.cloud.automl.v1.GcsSource.getDefaultInstance();
  }

  public static final int PARAMS_FIELD_NUMBER = 2;

  private static final class ParamsDefaultEntryHolder {
    static final com.google.protobuf.MapEntry<java.lang.String, java.lang.String> defaultEntry =
        com.google.protobuf.MapEntry.<java.lang.String, java.lang.String>newDefaultInstance(
            com.google.cloud.automl.v1.Io
                .internal_static_google_cloud_automl_v1_InputConfig_ParamsEntry_descriptor,
            com.google.protobuf.WireFormat.FieldType.STRING,
            "",
            com.google.protobuf.WireFormat.FieldType.STRING,
            "");
  }

  @SuppressWarnings("serial")
  private com.google.protobuf.MapField<java.lang.String, java.lang.String> params_;

  private com.google.protobuf.MapField<java.lang.String, java.lang.String> internalGetParams() {
    if (params_ == null) {
      return com.google.protobuf.MapField.emptyMapField(ParamsDefaultEntryHolder.defaultEntry);
    }
    return params_;
  }

  public int getParamsCount() {
    return internalGetParams().getMap().size();
  }
  /**
   *
   *
   * <pre>
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
   * `schema_inference_version`
   * : (integer) This value must be supplied.
   *   The version of the
   *   algorithm to use for the initial inference of the
   *   column data types of the imported table. Allowed values: "1".
   * </pre>
   *
   * <code>map&lt;string, string&gt; params = 2;</code>
   */
  @java.lang.Override
  public boolean containsParams(java.lang.String key) {
    if (key == null) {
      throw new NullPointerException("map key");
    }
    return internalGetParams().getMap().containsKey(key);
  }
  /** Use {@link #getParamsMap()} instead. */
  @java.lang.Override
  @java.lang.Deprecated
  public java.util.Map<java.lang.String, java.lang.String> getParams() {
    return getParamsMap();
  }
  /**
   *
   *
   * <pre>
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
   * `schema_inference_version`
   * : (integer) This value must be supplied.
   *   The version of the
   *   algorithm to use for the initial inference of the
   *   column data types of the imported table. Allowed values: "1".
   * </pre>
   *
   * <code>map&lt;string, string&gt; params = 2;</code>
   */
  @java.lang.Override
  public java.util.Map<java.lang.String, java.lang.String> getParamsMap() {
    return internalGetParams().getMap();
  }
  /**
   *
   *
   * <pre>
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
   * `schema_inference_version`
   * : (integer) This value must be supplied.
   *   The version of the
   *   algorithm to use for the initial inference of the
   *   column data types of the imported table. Allowed values: "1".
   * </pre>
   *
   * <code>map&lt;string, string&gt; params = 2;</code>
   */
  @java.lang.Override
  public /* nullable */ java.lang.String getParamsOrDefault(
      java.lang.String key,
      /* nullable */
      java.lang.String defaultValue) {
    if (key == null) {
      throw new NullPointerException("map key");
    }
    java.util.Map<java.lang.String, java.lang.String> map = internalGetParams().getMap();
    return map.containsKey(key) ? map.get(key) : defaultValue;
  }
  /**
   *
   *
   * <pre>
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
   * `schema_inference_version`
   * : (integer) This value must be supplied.
   *   The version of the
   *   algorithm to use for the initial inference of the
   *   column data types of the imported table. Allowed values: "1".
   * </pre>
   *
   * <code>map&lt;string, string&gt; params = 2;</code>
   */
  @java.lang.Override
  public java.lang.String getParamsOrThrow(java.lang.String key) {
    if (key == null) {
      throw new NullPointerException("map key");
    }
    java.util.Map<java.lang.String, java.lang.String> map = internalGetParams().getMap();
    if (!map.containsKey(key)) {
      throw new java.lang.IllegalArgumentException();
    }
    return map.get(key);
  }

  private byte memoizedIsInitialized = -1;

  @java.lang.Override
  public final boolean isInitialized() {
    byte isInitialized = memoizedIsInitialized;
    if (isInitialized == 1) return true;
    if (isInitialized == 0) return false;

    memoizedIsInitialized = 1;
    return true;
  }

  @java.lang.Override
  public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
    if (sourceCase_ == 1) {
      output.writeMessage(1, (com.google.cloud.automl.v1.GcsSource) source_);
    }
    com.google.protobuf.GeneratedMessageV3.serializeStringMapTo(
        output, internalGetParams(), ParamsDefaultEntryHolder.defaultEntry, 2);
    getUnknownFields().writeTo(output);
  }

  @java.lang.Override
  public int getSerializedSize() {
    int size = memoizedSize;
    if (size != -1) return size;

    size = 0;
    if (sourceCase_ == 1) {
      size +=
          com.google.protobuf.CodedOutputStream.computeMessageSize(
              1, (com.google.cloud.automl.v1.GcsSource) source_);
    }
    for (java.util.Map.Entry<java.lang.String, java.lang.String> entry :
        internalGetParams().getMap().entrySet()) {
      com.google.protobuf.MapEntry<java.lang.String, java.lang.String> params__ =
          ParamsDefaultEntryHolder.defaultEntry
              .newBuilderForType()
              .setKey(entry.getKey())
              .setValue(entry.getValue())
              .build();
      size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, params__);
    }
    size += getUnknownFields().getSerializedSize();
    memoizedSize = size;
    return size;
  }

  @java.lang.Override
  public boolean equals(final java.lang.Object obj) {
    if (obj == this) {
      return true;
    }
    if (!(obj instanceof com.google.cloud.automl.v1.InputConfig)) {
      return super.equals(obj);
    }
    com.google.cloud.automl.v1.InputConfig other = (com.google.cloud.automl.v1.InputConfig) obj;

    if (!internalGetParams().equals(other.internalGetParams())) return false;
    if (!getSourceCase().equals(other.getSourceCase())) return false;
    switch (sourceCase_) {
      case 1:
        if (!getGcsSource().equals(other.getGcsSource())) return false;
        break;
      case 0:
      default:
    }
    if (!getUnknownFields().equals(other.getUnknownFields())) return false;
    return true;
  }

  @java.lang.Override
  public int hashCode() {
    if (memoizedHashCode != 0) {
      return memoizedHashCode;
    }
    int hash = 41;
    hash = (19 * hash) + getDescriptor().hashCode();
    if (!internalGetParams().getMap().isEmpty()) {
      hash = (37 * hash) + PARAMS_FIELD_NUMBER;
      hash = (53 * hash) + internalGetParams().hashCode();
    }
    switch (sourceCase_) {
      case 1:
        hash = (37 * hash) + GCS_SOURCE_FIELD_NUMBER;
        hash = (53 * hash) + getGcsSource().hashCode();
        break;
      case 0:
      default:
    }
    hash = (29 * hash) + getUnknownFields().hashCode();
    memoizedHashCode = hash;
    return hash;
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(java.nio.ByteBuffer data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      com.google.protobuf.ByteString data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      com.google.protobuf.ByteString data,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(byte[] data)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    return PARSER.parseFrom(data, extensionRegistry);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(java.io.InputStream input)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.automl.v1.InputConfig parseDelimitedFrom(java.io.InputStream input)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
  }

  public static com.google.cloud.automl.v1.InputConfig parseDelimitedFrom(
      java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
        PARSER, input, extensionRegistry);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      com.google.protobuf.CodedInputStream input) throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
  }

  public static com.google.cloud.automl.v1.InputConfig parseFrom(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws java.io.IOException {
    return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
        PARSER, input, extensionRegistry);
  }

  @java.lang.Override
  public Builder newBuilderForType() {
    return newBuilder();
  }

  public static Builder newBuilder() {
    return DEFAULT_INSTANCE.toBuilder();
  }

  public static Builder newBuilder(com.google.cloud.automl.v1.InputConfig prototype) {
    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
  }

  @java.lang.Override
  public Builder toBuilder() {
    return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
  }

  @java.lang.Override
  protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
    Builder builder = new Builder(parent);
    return builder;
  }
  /**
   *
   *
   * <pre>
   * Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
   * The format of input depends on dataset_metadata the Dataset into which
   * the import is happening has. As input source the
   * [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
   * is expected, unless specified otherwise. Additionally any input .CSV file
   * by itself must be 100MB or smaller, unless specified otherwise.
   * If an "example" file (that is, image, video etc.) with identical content
   * (even if it had different `GCS_FILE_PATH`) is mentioned multiple times, then
   * its label, bounding boxes etc. are appended. The same file should be always
   * provided with the same `ML_USE` and `GCS_FILE_PATH`, if it is not, then
   * these values are nondeterministically selected from the given ones.
   * The formats are represented in EBNF with commas being literal and with
   * non-terminal symbols defined near the end of this comment. The formats are:
   * &lt;h4&gt;AutoML Vision&lt;/h4&gt;
   * &lt;div class="ds-selector-tabs"&gt;&lt;section&gt;&lt;h5&gt;Classification&lt;/h5&gt;
   * See [Preparing your training
   * data](https://cloud.google.com/vision/automl/docs/prepare) for more
   * information.
   * CSV file(s) with each line in format:
   *     ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
   * *   `ML_USE` - Identifies the data set that the current row (file) applies
   * to.
   *     This value can be one of the following:
   *     * `TRAIN` - Rows in this file are used to train the model.
   *     * `TEST` - Rows in this file are used to test the model during training.
   *     * `UNASSIGNED` - Rows in this file are not categorized. They are
   *        Automatically divided into train and test data. 80% for training and
   *        20% for testing.
   * *   `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
   *      30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP,
   *      .TIFF, .ICO.
   * *   `LABEL` - A label that identifies the object in the image.
   * For the `MULTICLASS` classification type, at most one `LABEL` is allowed
   * per image. If an image has not yet been labeled, then it should be
   * mentioned just once with no `LABEL`.
   * Some sample rows:
   *     TRAIN,gs://folder/image1.jpg,daisy
   *     TEST,gs://folder/image2.jpg,dandelion,tulip,rose
   *     UNASSIGNED,gs://folder/image3.jpg,daisy
   *     UNASSIGNED,gs://folder/image4.jpg
   * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Object Detection&lt;/h5&gt;
   * See [Preparing your training
   * data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
   * for more information.
   * A CSV file(s) with each line in format:
   *     ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
   * *   `ML_USE` - Identifies the data set that the current row (file) applies
   * to.
   *     This value can be one of the following:
   *     * `TRAIN` - Rows in this file are used to train the model.
   *     * `TEST` - Rows in this file are used to test the model during training.
   *     * `UNASSIGNED` - Rows in this file are not categorized. They are
   *        Automatically divided into train and test data. 80% for training and
   *        20% for testing.
   * *  `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
   *     30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image
   *     is assumed to be exhaustively labeled.
   * *  `LABEL` - A label that identifies the object in the image specified by the
   *    `BOUNDING_BOX`.
   * *  `BOUNDING BOX` - The vertices of an object in the example image.
   *    The minimum allowed `BOUNDING_BOX` edge length is 0.01, and no more than
   *    500 `BOUNDING_BOX` instances per image are allowed (one `BOUNDING_BOX`
   *    per line). If an image has no looked for objects then it should be
   *    mentioned just once with no LABEL and the ",,,,,,," in place of the
   *   `BOUNDING_BOX`.
   * **Four sample rows:**
   *     TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
   *     TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
   *     UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
   *     TEST,gs://folder/im3.png,,,,,,,,,
   *   &lt;/section&gt;
   * &lt;/div&gt;
   * &lt;h4&gt;AutoML Video Intelligence&lt;/h4&gt;
   * &lt;div class="ds-selector-tabs"&gt;&lt;section&gt;&lt;h5&gt;Classification&lt;/h5&gt;
   * See [Preparing your training
   * data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
   * more information.
   * CSV file(s) with each line in format:
   *     ML_USE,GCS_FILE_PATH
   * For `ML_USE`, do not use `VALIDATE`.
   * `GCS_FILE_PATH` is the path to another .csv file that describes training
   * example for a given `ML_USE`, using the following row format:
   *     GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
   * Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
   * to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
   * `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
   * length of the video, and the end time must be after the start time. Any
   * segment of a video which has one or more labels on it, is considered a
   * hard negative for all other labels. Any segment with no labels on
   * it is considered to be unknown. If a whole video is unknown, then
   * it should be mentioned just once with ",," in place of `LABEL,
   * TIME_SEGMENT_START,TIME_SEGMENT_END`.
   * Sample top level CSV file:
   *     TRAIN,gs://folder/train_videos.csv
   *     TEST,gs://folder/test_videos.csv
   *     UNASSIGNED,gs://folder/other_videos.csv
   * Sample rows of a CSV file for a particular ML_USE:
   *     gs://folder/video1.avi,car,120,180.000021
   *     gs://folder/video1.avi,bike,150,180.000021
   *     gs://folder/vid2.avi,car,0,60.5
   *     gs://folder/vid3.avi,,,
   * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Object Tracking&lt;/h5&gt;
   * See [Preparing your training
   * data](/video-intelligence/automl/object-tracking/docs/prepare) for more
   * information.
   * CSV file(s) with each line in format:
   *     ML_USE,GCS_FILE_PATH
   * For `ML_USE`, do not use `VALIDATE`.
   * `GCS_FILE_PATH` is the path to another .csv file that describes training
   * example for a given `ML_USE`, using the following row format:
   *     GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
   * or
   *     GCS_FILE_PATH,,,,,,,,,,
   * Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
   * to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
   * Providing `INSTANCE_ID`s can help to obtain a better model. When
   * a specific labeled entity leaves the video frame, and shows up
   * afterwards it is not required, albeit preferable, that the same
   * `INSTANCE_ID` is given to it.
   * `TIMESTAMP` must be within the length of the video, the
   * `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
   * to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
   * to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
   * frame are allowed. If a whole video is unknown, then it should be
   * mentioned just once with ",,,,,,,,,," in place of `LABEL,
   * [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
   * Sample top level CSV file:
   *      TRAIN,gs://folder/train_videos.csv
   *      TEST,gs://folder/test_videos.csv
   *      UNASSIGNED,gs://folder/other_videos.csv
   * Seven sample rows of a CSV file for a particular ML_USE:
   *      gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
   *      gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
   *      gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
   *      gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
   *      gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
   *      gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
   *      gs://folder/video2.avi,,,,,,,,,,,
   *   &lt;/section&gt;
   * &lt;/div&gt;
   * &lt;h4&gt;AutoML Natural Language&lt;/h4&gt;
   * &lt;div class="ds-selector-tabs"&gt;&lt;section&gt;&lt;h5&gt;Entity Extraction&lt;/h5&gt;
   * See [Preparing your training
   * data](/natural-language/automl/entity-analysis/docs/prepare) for more
   * information.
   * One or more CSV file(s) with each line in the following format:
   *     ML_USE,GCS_FILE_PATH
   * *   `ML_USE` - Identifies the data set that the current row (file) applies
   * to.
   *     This value can be one of the following:
   *     * `TRAIN` - Rows in this file are used to train the model.
   *     * `TEST` - Rows in this file are used to test the model during training.
   *     * `UNASSIGNED` - Rows in this file are not categorized. They are
   *        Automatically divided into train and test data. 80% for training and
   *        20% for testing..
   * *   `GCS_FILE_PATH` - a Identifies JSON Lines (.JSONL) file stored in
   *      Google Cloud Storage that contains in-line text in-line as documents
   *      for model training.
   * After the training data set has been determined from the `TRAIN` and
   * `UNASSIGNED` CSV files, the training data is divided into train and
   * validation data sets. 70% for training and 30% for validation.
   * For example:
   *     TRAIN,gs://folder/file1.jsonl
   *     VALIDATE,gs://folder/file2.jsonl
   *     TEST,gs://folder/file3.jsonl
   * **In-line JSONL files**
   * In-line .JSONL files contain, per line, a JSON document that wraps a
   * [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by
   * one or more [`annotations`][google.cloud.automl.v1.AnnotationPayload]
   * fields, which have `display_name` and `text_extraction` fields to describe
   * the entity from the text snippet. Multiple JSON documents can be separated
   * using line breaks (&#92;n).
   * The supplied text must be annotated exhaustively. For example, if you
   * include the text "horse", but do not label it as "animal",
   * then "horse" is assumed to not be an "animal".
   * Any given text snippet content must have 30,000 characters or
   * less, and also be UTF-8 NFC encoded. ASCII is accepted as it is
   * UTF-8 NFC encoded.
   * For example:
   *     {
   *       "text_snippet": {
   *         "content": "dog car cat"
   *       },
   *       "annotations": [
   *          {
   *            "display_name": "animal",
   *            "text_extraction": {
   *              "text_segment": {"start_offset": 0, "end_offset": 2}
   *           }
   *          },
   *          {
   *           "display_name": "vehicle",
   *            "text_extraction": {
   *              "text_segment": {"start_offset": 4, "end_offset": 6}
   *            }
   *          },
   *          {
   *            "display_name": "animal",
   *            "text_extraction": {
   *              "text_segment": {"start_offset": 8, "end_offset": 10}
   *            }
   *          }
   *      ]
   *     }&#92;n
   *     {
   *        "text_snippet": {
   *          "content": "This dog is good."
   *        },
   *        "annotations": [
   *           {
   *             "display_name": "animal",
   *             "text_extraction": {
   *               "text_segment": {"start_offset": 5, "end_offset": 7}
   *             }
   *           }
   *        ]
   *     }
   * **JSONL files that reference documents**
   * .JSONL files contain, per line, a JSON document that wraps a
   * `input_config` that contains the path to a source document.
   * Multiple JSON documents can be separated using line breaks (&#92;n).
   * Supported document extensions: .PDF, .TIF, .TIFF
   * For example:
   *     {
   *       "document": {
   *         "input_config": {
   *           "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
   *           }
   *         }
   *       }
   *     }&#92;n
   *     {
   *       "document": {
   *         "input_config": {
   *           "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
   *           }
   *         }
   *       }
   *     }
   * **In-line JSONL files with document layout information**
   * **Note:** You can only annotate documents using the UI. The format described
   * below applies to annotated documents exported using the UI or `exportData`.
   * In-line .JSONL files for documents contain, per line, a JSON document
   * that wraps a `document` field that provides the textual content of the
   * document and the layout information.
   * For example:
   *     {
   *       "document": {
   *               "document_text": {
   *                 "content": "dog car cat"
   *               }
   *               "layout": [
   *                 {
   *                   "text_segment": {
   *                     "start_offset": 0,
   *                     "end_offset": 11,
   *                    },
   *                    "page_number": 1,
   *                    "bounding_poly": {
   *                       "normalized_vertices": [
   *                         {"x": 0.1, "y": 0.1},
   *                         {"x": 0.1, "y": 0.3},
   *                         {"x": 0.3, "y": 0.3},
   *                         {"x": 0.3, "y": 0.1},
   *                       ],
   *                     },
   *                     "text_segment_type": TOKEN,
   *                 }
   *               ],
   *               "document_dimensions": {
   *                 "width": 8.27,
   *                 "height": 11.69,
   *                 "unit": INCH,
   *               }
   *               "page_count": 3,
   *             },
   *             "annotations": [
   *               {
   *                 "display_name": "animal",
   *                 "text_extraction": {
   *                   "text_segment": {"start_offset": 0, "end_offset": 3}
   *                 }
   *               },
   *               {
   *                 "display_name": "vehicle",
   *                 "text_extraction": {
   *                   "text_segment": {"start_offset": 4, "end_offset": 7}
   *                 }
   *               },
   *               {
   *                 "display_name": "animal",
   *                 "text_extraction": {
   *                   "text_segment": {"start_offset": 8, "end_offset": 11}
   *                 }
   *               },
   *             ],
   * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Classification&lt;/h5&gt;
   * See [Preparing your training
   * data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
   * information.
   * One or more CSV file(s) with each line in the following format:
   *     ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
   * *   `ML_USE` - Identifies the data set that the current row (file) applies
   * to.
   *     This value can be one of the following:
   *     * `TRAIN` - Rows in this file are used to train the model.
   *     * `TEST` - Rows in this file are used to test the model during training.
   *     * `UNASSIGNED` - Rows in this file are not categorized. They are
   *        Automatically divided into train and test data. 80% for training and
   *        20% for testing.
   * *   `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
   *     the column content is a valid Google Cloud Storage file path, that is,
   *     prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
   *     the content is enclosed in double quotes (""), it is treated as a
   *     `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
   *     file with supported extension and UTF-8 encoding, for example,
   *     "gs://folder/content.txt" AutoML imports the file content
   *     as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
   *     excluding quotes. In both cases, size of the content must be 10MB or
   *     less in size. For zip files, the size of each file inside the zip must be
   *     10MB or less in size.
   *     For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
   *     The `ML_USE` and `LABEL` columns are optional.
   *     Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
   * A maximum of 100 unique labels are allowed per CSV row.
   * Sample rows:
   *     TRAIN,"They have bad food and very rude",RudeService,BadFood
   *     gs://folder/content.txt,SlowService
   *     TEST,gs://folder/document.pdf
   *     VALIDATE,gs://folder/text_files.zip,BadFood
   * &lt;/section&gt;&lt;section&gt;&lt;h5&gt;Sentiment Analysis&lt;/h5&gt;
   * See [Preparing your training
   * data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
   * information.
   * CSV file(s) with each line in format:
   *     ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
   * *   `ML_USE` - Identifies the data set that the current row (file) applies
   * to.
   *     This value can be one of the following:
   *     * `TRAIN` - Rows in this file are used to train the model.
   *     * `TEST` - Rows in this file are used to test the model during training.
   *     * `UNASSIGNED` - Rows in this file are not categorized. They are
   *        Automatically divided into train and test data. 80% for training and
   *        20% for testing.
   * *   `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
   *     the column content is a valid  Google Cloud Storage file path, that is,
   *     prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
   *     the content is enclosed in double quotes (""), it is treated as a
   *     `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
   *     file with supported extension and UTF-8 encoding, for example,
   *     "gs://folder/content.txt" AutoML imports the file content
   *     as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
   *     excluding quotes. In both cases, size of the content must be 128kB or
   *     less in size. For zip files, the size of each file inside the zip must be
   *     128kB or less in size.
   *     The `ML_USE` and `SENTIMENT` columns are optional.
   *     Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
   * *  `SENTIMENT` - An integer between 0 and
   *     Dataset.text_sentiment_dataset_metadata.sentiment_max
   *     (inclusive). Describes the ordinal of the sentiment - higher
   *     value means a more positive sentiment. All the values are
   *     completely relative, i.e. neither 0 needs to mean a negative or
   *     neutral sentiment nor sentiment_max needs to mean a positive one -
   *     it is just required that 0 is the least positive sentiment
   *     in the data, and sentiment_max is the  most positive one.
   *     The SENTIMENT shouldn't be confused with "score" or "magnitude"
   *     from the previous Natural Language Sentiment Analysis API.
   *     All SENTIMENT values between 0 and sentiment_max must be
   *     represented in the imported data. On prediction the same 0 to
   *     sentiment_max range will be used. The difference between
   *     neighboring sentiment values needs not to be uniform, e.g. 1 and
   *     2 may be similar whereas the difference between 2 and 3 may be
   *     large.
   * Sample rows:
   *     TRAIN,"&#64;freewrytin this is way too good for your product",2
   *     gs://folder/content.txt,3
   *     TEST,gs://folder/document.pdf
   *     VALIDATE,gs://folder/text_files.zip,2
   *   &lt;/section&gt;
   * &lt;/div&gt;
   * &lt;h4&gt;AutoML Tables&lt;/h4&gt;&lt;div class="ui-datasection-main"&gt;&lt;section
   * class="selected"&gt;
   * See [Preparing your training
   * data](https://cloud.google.com/automl-tables/docs/prepare) for more
   * information.
   * You can use either
   * [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
   * [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
   * All input is concatenated into a
   * single
   * [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
   * **For gcs_source:**
   * CSV file(s), where the first row of the first file is the header,
   * containing unique column names. If the first row of a subsequent
   * file is the same as the header, then it is also treated as a
   * header. All other rows contain values for the corresponding
   * columns.
   * Each .CSV file by itself must be 10GB or smaller, and their total
   * size must be 100GB or smaller.
   * First three sample rows of a CSV file:
   * &lt;pre&gt;
   * "Id","First Name","Last Name","Dob","Addresses"
   * "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
   * "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
   * &lt;/pre&gt;
   * **For bigquery_source:**
   * An URI of a BigQuery table. The user data size of the BigQuery
   * table must be 100GB or smaller.
   * An imported table must have between 2 and 1,000 columns, inclusive,
   * and between 1000 and 100,000,000 rows, inclusive. There are at most 5
   * import data running in parallel.
   *   &lt;/section&gt;
   * &lt;/div&gt;
   * **Input field definitions:**
   * `ML_USE`
   * : ("TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED")
   *   Describes how the given example (file) should be used for model
   *   training. "UNASSIGNED" can be used when user has no preference.
   * `GCS_FILE_PATH`
   * : The path to a file on Google Cloud Storage. For example,
   *   "gs://folder/image1.png".
   * `LABEL`
   * : A display name of an object on an image, video etc., e.g. "dog".
   *   Must be up to 32 characters long and can consist only of ASCII
   *   Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
   *   For each label an AnnotationSpec is created which display_name
   *   becomes the label; AnnotationSpecs are given back in predictions.
   * `INSTANCE_ID`
   * : A positive integer that identifies a specific instance of a
   *   labeled entity on an example. Used e.g. to track two cars on
   *   a video while being able to tell apart which one is which.
   * `BOUNDING_BOX`
   * : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
   *   A rectangle parallel to the frame of the example (image,
   *   video). If 4 vertices are given they are connected by edges
   *   in the order provided, if 2 are given they are recognized
   *   as diagonally opposite vertices of the rectangle.
   * `VERTEX`
   * : (`COORDINATE,COORDINATE`)
   *   First coordinate is horizontal (x), the second is vertical (y).
   * `COORDINATE`
   * : A float in 0 to 1 range, relative to total length of
   *   image or video in given dimension. For fractions the
   *   leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
   *   Point 0,0 is in top left.
   * `TIME_SEGMENT_START`
   * : (`TIME_OFFSET`)
   *   Expresses a beginning, inclusive, of a time segment
   *   within an example that has a time dimension
   *   (e.g. video).
   * `TIME_SEGMENT_END`
   * : (`TIME_OFFSET`)
   *   Expresses an end, exclusive, of a time segment within
   *   n example that has a time dimension (e.g. video).
   * `TIME_OFFSET`
   * : A number of seconds as measured from the start of an
   *   example (e.g. video). Fractions are allowed, up to a
   *   microsecond precision. "inf" is allowed, and it means the end
   *   of the example.
   * `TEXT_SNIPPET`
   * : The content of a text snippet, UTF-8 encoded, enclosed within
   *   double quotes ("").
   * `DOCUMENT`
   * : A field that provides the textual content with document and the layout
   *   information.
   *  **Errors:**
   *  If any of the provided CSV files can't be parsed or if more than certain
   *  percent of CSV rows cannot be processed then the operation fails and
   *  nothing is imported. Regardless of overall success or failure the per-row
   *  failures, up to a certain count cap, is listed in
   *  Operation.metadata.partial_failures.
   * </pre>
   *
   * Protobuf type {@code google.cloud.automl.v1.InputConfig}
   */
  public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
      implements
      // @@protoc_insertion_point(builder_implements:google.cloud.automl.v1.InputConfig)
      com.google.cloud.automl.v1.InputConfigOrBuilder {
    public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
      return com.google.cloud.automl.v1.Io
          .internal_static_google_cloud_automl_v1_InputConfig_descriptor;
    }

    @SuppressWarnings({"rawtypes"})
    protected com.google.protobuf.MapField internalGetMapField(int number) {
      switch (number) {
        case 2:
          return internalGetParams();
        default:
          throw new RuntimeException("Invalid map field number: " + number);
      }
    }

    @SuppressWarnings({"rawtypes"})
    protected com.google.protobuf.MapField internalGetMutableMapField(int number) {
      switch (number) {
        case 2:
          return internalGetMutableParams();
        default:
          throw new RuntimeException("Invalid map field number: " + number);
      }
    }

    @java.lang.Override
    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return com.google.cloud.automl.v1.Io
          .internal_static_google_cloud_automl_v1_InputConfig_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              com.google.cloud.automl.v1.InputConfig.class,
              com.google.cloud.automl.v1.InputConfig.Builder.class);
    }

    // Construct using com.google.cloud.automl.v1.InputConfig.newBuilder()
    private Builder() {}

    private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
      super(parent);
    }

    @java.lang.Override
    public Builder clear() {
      super.clear();
      bitField0_ = 0;
      if (gcsSourceBuilder_ != null) {
        gcsSourceBuilder_.clear();
      }
      internalGetMutableParams().clear();
      sourceCase_ = 0;
      source_ = null;
      return this;
    }

    @java.lang.Override
    public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
      return com.google.cloud.automl.v1.Io
          .internal_static_google_cloud_automl_v1_InputConfig_descriptor;
    }

    @java.lang.Override
    public com.google.cloud.automl.v1.InputConfig getDefaultInstanceForType() {
      return com.google.cloud.automl.v1.InputConfig.getDefaultInstance();
    }

    @java.lang.Override
    public com.google.cloud.automl.v1.InputConfig build() {
      com.google.cloud.automl.v1.InputConfig result = buildPartial();
      if (!result.isInitialized()) {
        throw newUninitializedMessageException(result);
      }
      return result;
    }

    @java.lang.Override
    public com.google.cloud.automl.v1.InputConfig buildPartial() {
      com.google.cloud.automl.v1.InputConfig result =
          new com.google.cloud.automl.v1.InputConfig(this);
      if (bitField0_ != 0) {
        buildPartial0(result);
      }
      buildPartialOneofs(result);
      onBuilt();
      return result;
    }

    private void buildPartial0(com.google.cloud.automl.v1.InputConfig result) {
      int from_bitField0_ = bitField0_;
      if (((from_bitField0_ & 0x00000002) != 0)) {
        result.params_ = internalGetParams();
        result.params_.makeImmutable();
      }
    }

    private void buildPartialOneofs(com.google.cloud.automl.v1.InputConfig result) {
      result.sourceCase_ = sourceCase_;
      result.source_ = this.source_;
      if (sourceCase_ == 1 && gcsSourceBuilder_ != null) {
        result.source_ = gcsSourceBuilder_.build();
      }
    }

    @java.lang.Override
    public Builder clone() {
      return super.clone();
    }

    @java.lang.Override
    public Builder setField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.setField(field, value);
    }

    @java.lang.Override
    public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
      return super.clearField(field);
    }

    @java.lang.Override
    public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
      return super.clearOneof(oneof);
    }

    @java.lang.Override
    public Builder setRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
      return super.setRepeatedField(field, index, value);
    }

    @java.lang.Override
    public Builder addRepeatedField(
        com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
      return super.addRepeatedField(field, value);
    }

    @java.lang.Override
    public Builder mergeFrom(com.google.protobuf.Message other) {
      if (other instanceof com.google.cloud.automl.v1.InputConfig) {
        return mergeFrom((com.google.cloud.automl.v1.InputConfig) other);
      } else {
        super.mergeFrom(other);
        return this;
      }
    }

    public Builder mergeFrom(com.google.cloud.automl.v1.InputConfig other) {
      if (other == com.google.cloud.automl.v1.InputConfig.getDefaultInstance()) return this;
      internalGetMutableParams().mergeFrom(other.internalGetParams());
      bitField0_ |= 0x00000002;
      switch (other.getSourceCase()) {
        case GCS_SOURCE:
          {
            mergeGcsSource(other.getGcsSource());
            break;
          }
        case SOURCE_NOT_SET:
          {
            break;
          }
      }
      this.mergeUnknownFields(other.getUnknownFields());
      onChanged();
      return this;
    }

    @java.lang.Override
    public final boolean isInitialized() {
      return true;
    }

    @java.lang.Override
    public Builder mergeFrom(
        com.google.protobuf.CodedInputStream input,
        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      if (extensionRegistry == null) {
        throw new java.lang.NullPointerException();
      }
      try {
        boolean done = false;
        while (!done) {
          int tag = input.readTag();
          switch (tag) {
            case 0:
              done = true;
              break;
            case 10:
              {
                input.readMessage(getGcsSourceFieldBuilder().getBuilder(), extensionRegistry);
                sourceCase_ = 1;
                break;
              } // case 10
            case 18:
              {
                com.google.protobuf.MapEntry<java.lang.String, java.lang.String> params__ =
                    input.readMessage(
                        ParamsDefaultEntryHolder.defaultEntry.getParserForType(),
                        extensionRegistry);
                internalGetMutableParams()
                    .getMutableMap()
                    .put(params__.getKey(), params__.getValue());
                bitField0_ |= 0x00000002;
                break;
              } // case 18
            default:
              {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
          } // switch (tag)
        } // while (!done)
      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
        throw e.unwrapIOException();
      } finally {
        onChanged();
      } // finally
      return this;
    }

    private int sourceCase_ = 0;
    private java.lang.Object source_;

    public SourceCase getSourceCase() {
      return SourceCase.forNumber(sourceCase_);
    }

    public Builder clearSource() {
      sourceCase_ = 0;
      source_ = null;
      onChanged();
      return this;
    }

    private int bitField0_;

    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.automl.v1.GcsSource,
            com.google.cloud.automl.v1.GcsSource.Builder,
            com.google.cloud.automl.v1.GcsSourceOrBuilder>
        gcsSourceBuilder_;
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     *
     * @return Whether the gcsSource field is set.
     */
    @java.lang.Override
    public boolean hasGcsSource() {
      return sourceCase_ == 1;
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     *
     * @return The gcsSource.
     */
    @java.lang.Override
    public com.google.cloud.automl.v1.GcsSource getGcsSource() {
      if (gcsSourceBuilder_ == null) {
        if (sourceCase_ == 1) {
          return (com.google.cloud.automl.v1.GcsSource) source_;
        }
        return com.google.cloud.automl.v1.GcsSource.getDefaultInstance();
      } else {
        if (sourceCase_ == 1) {
          return gcsSourceBuilder_.getMessage();
        }
        return com.google.cloud.automl.v1.GcsSource.getDefaultInstance();
      }
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    public Builder setGcsSource(com.google.cloud.automl.v1.GcsSource value) {
      if (gcsSourceBuilder_ == null) {
        if (value == null) {
          throw new NullPointerException();
        }
        source_ = value;
        onChanged();
      } else {
        gcsSourceBuilder_.setMessage(value);
      }
      sourceCase_ = 1;
      return this;
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    public Builder setGcsSource(com.google.cloud.automl.v1.GcsSource.Builder builderForValue) {
      if (gcsSourceBuilder_ == null) {
        source_ = builderForValue.build();
        onChanged();
      } else {
        gcsSourceBuilder_.setMessage(builderForValue.build());
      }
      sourceCase_ = 1;
      return this;
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    public Builder mergeGcsSource(com.google.cloud.automl.v1.GcsSource value) {
      if (gcsSourceBuilder_ == null) {
        if (sourceCase_ == 1
            && source_ != com.google.cloud.automl.v1.GcsSource.getDefaultInstance()) {
          source_ =
              com.google.cloud.automl.v1.GcsSource.newBuilder(
                      (com.google.cloud.automl.v1.GcsSource) source_)
                  .mergeFrom(value)
                  .buildPartial();
        } else {
          source_ = value;
        }
        onChanged();
      } else {
        if (sourceCase_ == 1) {
          gcsSourceBuilder_.mergeFrom(value);
        } else {
          gcsSourceBuilder_.setMessage(value);
        }
      }
      sourceCase_ = 1;
      return this;
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    public Builder clearGcsSource() {
      if (gcsSourceBuilder_ == null) {
        if (sourceCase_ == 1) {
          sourceCase_ = 0;
          source_ = null;
          onChanged();
        }
      } else {
        if (sourceCase_ == 1) {
          sourceCase_ = 0;
          source_ = null;
        }
        gcsSourceBuilder_.clear();
      }
      return this;
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    public com.google.cloud.automl.v1.GcsSource.Builder getGcsSourceBuilder() {
      return getGcsSourceFieldBuilder().getBuilder();
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    @java.lang.Override
    public com.google.cloud.automl.v1.GcsSourceOrBuilder getGcsSourceOrBuilder() {
      if ((sourceCase_ == 1) && (gcsSourceBuilder_ != null)) {
        return gcsSourceBuilder_.getMessageOrBuilder();
      } else {
        if (sourceCase_ == 1) {
          return (com.google.cloud.automl.v1.GcsSource) source_;
        }
        return com.google.cloud.automl.v1.GcsSource.getDefaultInstance();
      }
    }
    /**
     *
     *
     * <pre>
     * The Google Cloud Storage location for the input content.
     * For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
     * a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
     * </pre>
     *
     * <code>.google.cloud.automl.v1.GcsSource gcs_source = 1;</code>
     */
    private com.google.protobuf.SingleFieldBuilderV3<
            com.google.cloud.automl.v1.GcsSource,
            com.google.cloud.automl.v1.GcsSource.Builder,
            com.google.cloud.automl.v1.GcsSourceOrBuilder>
        getGcsSourceFieldBuilder() {
      if (gcsSourceBuilder_ == null) {
        if (!(sourceCase_ == 1)) {
          source_ = com.google.cloud.automl.v1.GcsSource.getDefaultInstance();
        }
        gcsSourceBuilder_ =
            new com.google.protobuf.SingleFieldBuilderV3<
                com.google.cloud.automl.v1.GcsSource,
                com.google.cloud.automl.v1.GcsSource.Builder,
                com.google.cloud.automl.v1.GcsSourceOrBuilder>(
                (com.google.cloud.automl.v1.GcsSource) source_, getParentForChildren(), isClean());
        source_ = null;
      }
      sourceCase_ = 1;
      onChanged();
      return gcsSourceBuilder_;
    }

    private com.google.protobuf.MapField<java.lang.String, java.lang.String> params_;

    private com.google.protobuf.MapField<java.lang.String, java.lang.String> internalGetParams() {
      if (params_ == null) {
        return com.google.protobuf.MapField.emptyMapField(ParamsDefaultEntryHolder.defaultEntry);
      }
      return params_;
    }

    private com.google.protobuf.MapField<java.lang.String, java.lang.String>
        internalGetMutableParams() {
      if (params_ == null) {
        params_ = com.google.protobuf.MapField.newMapField(ParamsDefaultEntryHolder.defaultEntry);
      }
      if (!params_.isMutable()) {
        params_ = params_.copy();
      }
      bitField0_ |= 0x00000002;
      onChanged();
      return params_;
    }

    public int getParamsCount() {
      return internalGetParams().getMap().size();
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    @java.lang.Override
    public boolean containsParams(java.lang.String key) {
      if (key == null) {
        throw new NullPointerException("map key");
      }
      return internalGetParams().getMap().containsKey(key);
    }
    /** Use {@link #getParamsMap()} instead. */
    @java.lang.Override
    @java.lang.Deprecated
    public java.util.Map<java.lang.String, java.lang.String> getParams() {
      return getParamsMap();
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    @java.lang.Override
    public java.util.Map<java.lang.String, java.lang.String> getParamsMap() {
      return internalGetParams().getMap();
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    @java.lang.Override
    public /* nullable */ java.lang.String getParamsOrDefault(
        java.lang.String key,
        /* nullable */
        java.lang.String defaultValue) {
      if (key == null) {
        throw new NullPointerException("map key");
      }
      java.util.Map<java.lang.String, java.lang.String> map = internalGetParams().getMap();
      return map.containsKey(key) ? map.get(key) : defaultValue;
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    @java.lang.Override
    public java.lang.String getParamsOrThrow(java.lang.String key) {
      if (key == null) {
        throw new NullPointerException("map key");
      }
      java.util.Map<java.lang.String, java.lang.String> map = internalGetParams().getMap();
      if (!map.containsKey(key)) {
        throw new java.lang.IllegalArgumentException();
      }
      return map.get(key);
    }

    public Builder clearParams() {
      bitField0_ = (bitField0_ & ~0x00000002);
      internalGetMutableParams().getMutableMap().clear();
      return this;
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    public Builder removeParams(java.lang.String key) {
      if (key == null) {
        throw new NullPointerException("map key");
      }
      internalGetMutableParams().getMutableMap().remove(key);
      return this;
    }
    /** Use alternate mutation accessors instead. */
    @java.lang.Deprecated
    public java.util.Map<java.lang.String, java.lang.String> getMutableParams() {
      bitField0_ |= 0x00000002;
      return internalGetMutableParams().getMutableMap();
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    public Builder putParams(java.lang.String key, java.lang.String value) {
      if (key == null) {
        throw new NullPointerException("map key");
      }
      if (value == null) {
        throw new NullPointerException("map value");
      }
      internalGetMutableParams().getMutableMap().put(key, value);
      bitField0_ |= 0x00000002;
      return this;
    }
    /**
     *
     *
     * <pre>
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     * &lt;h4&gt;AutoML Tables&lt;/h4&gt;
     * `schema_inference_version`
     * : (integer) This value must be supplied.
     *   The version of the
     *   algorithm to use for the initial inference of the
     *   column data types of the imported table. Allowed values: "1".
     * </pre>
     *
     * <code>map&lt;string, string&gt; params = 2;</code>
     */
    public Builder putAllParams(java.util.Map<java.lang.String, java.lang.String> values) {
      internalGetMutableParams().getMutableMap().putAll(values);
      bitField0_ |= 0x00000002;
      return this;
    }

    @java.lang.Override
    public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.setUnknownFields(unknownFields);
    }

    @java.lang.Override
    public final Builder mergeUnknownFields(
        final com.google.protobuf.UnknownFieldSet unknownFields) {
      return super.mergeUnknownFields(unknownFields);
    }

    // @@protoc_insertion_point(builder_scope:google.cloud.automl.v1.InputConfig)
  }

  // @@protoc_insertion_point(class_scope:google.cloud.automl.v1.InputConfig)
  private static final com.google.cloud.automl.v1.InputConfig DEFAULT_INSTANCE;

  static {
    DEFAULT_INSTANCE = new com.google.cloud.automl.v1.InputConfig();
  }

  public static com.google.cloud.automl.v1.InputConfig getDefaultInstance() {
    return DEFAULT_INSTANCE;
  }

  private static final com.google.protobuf.Parser<InputConfig> PARSER =
      new com.google.protobuf.AbstractParser<InputConfig>() {
        @java.lang.Override
        public InputConfig parsePartialFrom(
            com.google.protobuf.CodedInputStream input,
            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
            throws com.google.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (com.google.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new com.google.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

  public static com.google.protobuf.Parser<InputConfig> parser() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.protobuf.Parser<InputConfig> getParserForType() {
    return PARSER;
  }

  @java.lang.Override
  public com.google.cloud.automl.v1.InputConfig getDefaultInstanceForType() {
    return DEFAULT_INSTANCE;
  }
}
