/*
 * Copyright (C) 2024 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package android.adservices.ondevicepersonalization;

import android.annotation.IntRange;
import android.annotation.NonNull;
import android.os.Parcelable;

import com.android.ondevicepersonalization.internal.util.AnnotationValidations;
import com.android.ondevicepersonalization.internal.util.ByteArrayParceledListSlice;
import com.android.ondevicepersonalization.internal.util.DataClass;

/**
 * Parcelable version of {@link InferenceInput}.
 *
 * @hide
 */
@DataClass(genAidl = false, genBuilder = false)
public class InferenceInputParcel implements Parcelable {
    /**
     * The location of TFLite model. The model is usually store in REMOTE_DATA or LOCAL_DATA table.
     */
    @NonNull private ModelId mModelId;

    /** The delegate to run model inference. If not specified, CPU delegate is used by default. */
    private @InferenceInput.Params.Delegate int mDelegate;

    /**
     * The number of threads available to the interpreter. Only set and take effective when input
     * tensors are on CPU. Setting cpuNumThread to 0 has the effect to disable multithreading, which
     * is equivalent to setting cpuNumThread to 1. If set to the value -1, the number of threads
     * used will be implementation-defined and platform-dependent.
     */
    private @IntRange(from = 1) int mCpuNumThread;

    /** An array of input data. The inputs should be in the same order as inputs of the model. */
    @NonNull private ByteArrayParceledListSlice mInputData;

    /**
     * The number of input examples. Adopter can set this field to run batching inference. The batch
     * size is 1 by default.
     */
    private int mBatchSize;

    private @InferenceInput.Params.ModelType int mModelType =
            InferenceInput.Params.MODEL_TYPE_TENSORFLOW_LITE;

    /**
     * The empty InferenceOutput representing the expected output structure. For TFLite, the
     * inference code will verify whether this expected output structure matches model output
     * signature.
     */
    @NonNull private InferenceOutputParcel mExpectedOutputStructure;

    /** @hide */
    public InferenceInputParcel(@NonNull InferenceInput value) {
        this(
                new ModelId.Builder()
                        .setTableId(value.getParams().getKeyValueStore().getTableId())
                        .setKey(value.getParams().getModelKey())
                        .build(),
                value.getParams().getDelegateType(),
                value.getParams().getRecommendedNumThreads(),
                ByteArrayParceledListSlice.create(value.getInputData()),
                value.getBatchSize(),
                value.getParams().getModelType(),
                new InferenceOutputParcel(value.getExpectedOutputStructure()));
    }

    // Code below generated by codegen v1.0.23.
    //
    // DO NOT MODIFY!
    // CHECKSTYLE:OFF Generated code
    //
    // To regenerate run:
    // $ codegen
    // $ANDROID_BUILD_TOP/packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInputParcel.java
    //
    // To exclude the generated code from IntelliJ auto-formatting enable (one-time):
    //   Settings > Editor > Code Style > Formatter Control
    // @formatter:off

    /**
     * Creates a new InferenceInputParcel.
     *
     * @param modelId The location of TFLite model. The model is usually store in REMOTE_DATA or
     *     LOCAL_DATA table.
     * @param delegate The delegate to run model inference. If not specified, CPU delegate is used
     *     by default.
     * @param cpuNumThread The number of threads available to the interpreter. Only set and take
     *     effective when input tensors are on CPU. Setting cpuNumThread to 0 has the effect to
     *     disable multithreading, which is equivalent to setting cpuNumThread to 1. If set to the
     *     value -1, the number of threads used will be implementation-defined and
     *     platform-dependent.
     * @param inputData An array of input data. The inputs should be in the same order as inputs of
     *     the model.
     * @param batchSize The number of input examples. Adopter can set this field to run batching
     *     inference. The batch size is 1 by default.
     * @param expectedOutputStructure The empty InferenceOutput representing the expected output
     *     structure. For TFLite, the inference code will verify whether this expected output
     *     structure matches model output signature.
     */
    @DataClass.Generated.Member
    public InferenceInputParcel(
            @NonNull ModelId modelId,
            @InferenceInput.Params.Delegate int delegate,
            @IntRange(from = 1) int cpuNumThread,
            @NonNull ByteArrayParceledListSlice inputData,
            int batchSize,
            @InferenceInput.Params.ModelType int modelType,
            @NonNull InferenceOutputParcel expectedOutputStructure) {
        this.mModelId = modelId;
        AnnotationValidations.validate(NonNull.class, null, mModelId);
        this.mDelegate = delegate;
        AnnotationValidations.validate(InferenceInput.Params.Delegate.class, null, mDelegate);
        this.mCpuNumThread = cpuNumThread;
        AnnotationValidations.validate(IntRange.class, null, mCpuNumThread, "from", 1);
        this.mInputData = inputData;
        AnnotationValidations.validate(NonNull.class, null, mInputData);
        this.mBatchSize = batchSize;
        this.mModelType = modelType;
        AnnotationValidations.validate(InferenceInput.Params.ModelType.class, null, mModelType);
        this.mExpectedOutputStructure = expectedOutputStructure;
        AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);

        // onConstructed(); // You can define this method to get a callback
    }

    /**
     * The location of TFLite model. The model is usually store in REMOTE_DATA or LOCAL_DATA table.
     */
    @DataClass.Generated.Member
    public @NonNull ModelId getModelId() {
        return mModelId;
    }

    /** The delegate to run model inference. If not specified, CPU delegate is used by default. */
    @DataClass.Generated.Member
    public @InferenceInput.Params.Delegate int getDelegate() {
        return mDelegate;
    }

    /**
     * The number of threads available to the interpreter. Only set and take effective when input
     * tensors are on CPU. Setting cpuNumThread to 0 has the effect to disable multithreading, which
     * is equivalent to setting cpuNumThread to 1. If set to the value -1, the number of threads
     * used will be implementation-defined and platform-dependent.
     */
    @DataClass.Generated.Member
    public @IntRange(from = 1) int getCpuNumThread() {
        return mCpuNumThread;
    }

    /** An array of input data. The inputs should be in the same order as inputs of the model. */
    @DataClass.Generated.Member
    public @NonNull ByteArrayParceledListSlice getInputData() {
        return mInputData;
    }

    /**
     * The number of input examples. Adopter can set this field to run batching inference. The batch
     * size is 1 by default.
     */
    @DataClass.Generated.Member
    public int getBatchSize() {
        return mBatchSize;
    }

    @DataClass.Generated.Member
    public @InferenceInput.Params.ModelType int getModelType() {
        return mModelType;
    }

    /**
     * The empty InferenceOutput representing the expected output structure. For TFLite, the
     * inference code will verify whether this expected output structure matches model output
     * signature.
     */
    @DataClass.Generated.Member
    public @NonNull InferenceOutputParcel getExpectedOutputStructure() {
        return mExpectedOutputStructure;
    }

    @Override
    @DataClass.Generated.Member
    public void writeToParcel(@NonNull android.os.Parcel dest, int flags) {
        // You can override field parcelling by defining methods like:
        // void parcelFieldName(Parcel dest, int flags) { ... }

        dest.writeTypedObject(mModelId, flags);
        dest.writeInt(mDelegate);
        dest.writeInt(mCpuNumThread);
        dest.writeTypedObject(mInputData, flags);
        dest.writeInt(mBatchSize);
        dest.writeInt(mModelType);
        dest.writeTypedObject(mExpectedOutputStructure, flags);
    }

    @Override
    @DataClass.Generated.Member
    public int describeContents() {
        return 0;
    }

    /** @hide */
    @SuppressWarnings({"unchecked", "RedundantCast"})
    @DataClass.Generated.Member
    protected InferenceInputParcel(@NonNull android.os.Parcel in) {
        // You can override field unparcelling by defining methods like:
        // static FieldType unparcelFieldName(Parcel in) { ... }

        ModelId modelId = (ModelId) in.readTypedObject(ModelId.CREATOR);
        int delegate = in.readInt();
        int cpuNumThread = in.readInt();
        ByteArrayParceledListSlice inputData =
                (ByteArrayParceledListSlice) in.readTypedObject(ByteArrayParceledListSlice.CREATOR);
        int batchSize = in.readInt();
        int modelType = in.readInt();
        InferenceOutputParcel expectedOutputStructure =
                (InferenceOutputParcel) in.readTypedObject(InferenceOutputParcel.CREATOR);

        this.mModelId = modelId;
        AnnotationValidations.validate(NonNull.class, null, mModelId);
        this.mDelegate = delegate;
        AnnotationValidations.validate(InferenceInput.Params.Delegate.class, null, mDelegate);
        this.mCpuNumThread = cpuNumThread;
        AnnotationValidations.validate(IntRange.class, null, mCpuNumThread, "from", 1);
        this.mInputData = inputData;
        AnnotationValidations.validate(NonNull.class, null, mInputData);
        this.mBatchSize = batchSize;
        this.mModelType = modelType;
        AnnotationValidations.validate(InferenceInput.Params.ModelType.class, null, mModelType);
        this.mExpectedOutputStructure = expectedOutputStructure;
        AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);

        // onConstructed(); // You can define this method to get a callback
    }

    @DataClass.Generated.Member
    public static final @NonNull Parcelable.Creator<InferenceInputParcel> CREATOR =
            new Parcelable.Creator<InferenceInputParcel>() {
                @Override
                public InferenceInputParcel[] newArray(int size) {
                    return new InferenceInputParcel[size];
                }

                @Override
                public InferenceInputParcel createFromParcel(@NonNull android.os.Parcel in) {
                    return new InferenceInputParcel(in);
                }
            };

    @DataClass.Generated(
            time = 1708579683131L,
            codegenVersion = "1.0.23",
            sourceFile =
                    "packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInputParcel.java",
            inputSignatures =
                    "private @android.annotation.NonNull android.adservices.ondevicepersonalization.ModelId mModelId\nprivate @android.adservices.ondevicepersonalization.InferenceInput.Params.Delegate int mDelegate\nprivate @android.annotation.IntRange int mCpuNumThread\nprivate @android.annotation.NonNull com.android.ondevicepersonalization.internal.util.ByteArrayParceledListSlice mInputData\nprivate  int mBatchSize\nprivate @android.adservices.ondevicepersonalization.InferenceInput.Params.ModelType int mModelType\nprivate @android.annotation.NonNull android.adservices.ondevicepersonalization.InferenceOutputParcel mExpectedOutputStructure\nclass InferenceInputParcel extends java.lang.Object implements [android.os.Parcelable]\n@com.android.ondevicepersonalization.internal.util.DataClass(genAidl=false, genBuilder=false)")
    @Deprecated
    private void __metadata() {}

    // @formatter:on
    // End of generated code

}
