Creating Environment.

  • Create new conda environment with python 3.7:

    conda create -n mask python=3.7

  • Activate the environment:

    conda activate mask

Installing Dependencies

  • After activating the environment, install the dependencies:

    pip install tensorflow-gpu

    pip install opencv-python

    pip install matplotlib

    pip install numpy

    pip install pandas

    pip install imutils

import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import imutils
from imutils import paths
import cv2
import time
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import MobileNetV2

Preprocessing Pipeline

  • In this Preprocessing class there are 3 methods:

    • preprocess

      This method is used to read the images from directory and convert to array and appending image on a data list and catagory in labels list.

    • binarizer

      This method is used to convert labels to in a 0 and 1 format.

    • change_to_numpy

      This method is used to convert data and labels to numpy array.

class Preprocess:
    def __init__(self):
        pass
    def preprocess(self,content, categories):
            try:
                data=[]
                labels=[]
                for category in categories:
                    path = os.path.join(content,category)
                    for img in os.listdir(path):
                        img_path=os.path.join(path,img)
                        image=load_img(img_path,target_size=(224,224))
                        image=img_to_array(image)
                        image=preprocess_input(image)

                        data.append(image)
                        labels.append(category)
                
                return data, labels
            except Exception as ex:
                raise ex

    def binarizer(self, labels):
            try:
                lb = LabelBinarizer()
                label = lb.fit_transform(labels)
                label = to_categorical(label)
                
                return label
            except Exception as ex:
                raise ex

        # Changing to numpy array
    def change_to_numpy(self, data, label):
            try:
                data = np.array(data,dtype="float32")
                labels = np.array(label)
                data.shape
                
                return data, labels
            except Exception as ex:
                raise ex 

Training Pipeline

  • This Train class has one method:

    train

    • I this method we call the preprocessing methods from a Preprocessing class.
    • We augment our image data by calling ImageDataGenerator from Keras.
    • We use MobileNetV2 as a base model.
    • We only tune the last layer of the mobile net because its already trained on imagenet dataset.
    • Then we set the hyperparameter and train the model.
class Train:
    def __init__(self):
        pass
    def model_building(self):
        # Preprocess data
        data, labels = Preprocess().preprocess("../dataset", ["with_mask", "without_mask"])

        # Doing one hot encoding
        label = Preprocess().binarizer(labels)

        # Changing to numpy array
        data, labels = Preprocess().change_to_numpy(data, label)


        # Now train_test_split
        x_train,X_test,y_train,y_test = train_test_split(data,labels,test_size=0.20,
                                                    stratify=labels,random_state=42)

        # Constructing image data generator
        augment = ImageDataGenerator(
            rotation_range=20,
            zoom_range=0.15,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.15,
            horizontal_flip=True,
            fill_mode="nearest")


        # Loading MobileNetV2 network, Here we put our fully connected layer off here.
        basemodel = MobileNetV2(weights="imagenet",include_top=False,
                                input_tensor=Input(shape=(224,224,3)))

        # Now declare the head of the model that stays on the top of the basemodel.
        headmodel=basemodel.output
        headmodel = AveragePooling2D(pool_size=(7,7))(headmodel)
        headmodel = Flatten(name="flatten")(headmodel)
        headmodel = Dense(128,activation='relu')(headmodel)
        headmodel = Dropout(0.5)(headmodel)
        headmodel = Dense(2,activation='softmax')(headmodel)

        # Place fullyconnected model on top of the base model
        model =  Model(inputs=basemodel.input,outputs=headmodel)

        # Loop over all layers in the base model and freeze them so they will not be updated during the first training process
        for layer in basemodel.layers:
            layer.trainable=False
        
        # Setting up hyperparameters.
        lr=1e-4
        epochs = 20
        BS = 32
        optimizer=Adam(learning_rate=lr,decay=lr/epochs)
        model.compile(loss="binary_crossentropy",optimizer=optimizer,
                    metrics=["accuracy"])

        # Fitting our model
        best = model.fit(
            augment.flow(x_train, y_train, batch_size=BS),
            steps_per_epoch=len(x_train) // BS,
            validation_data=(X_test, y_test),
            validation_steps=len(X_test) // BS,
            epochs=epochs)
        
        return model, best

Prediction Pipeline

  • In this prediction class we have two methods:

    predict

    This method will train and predict the image.

    visualize

    This method will show the training loss and accuracy.

class Prediction:
    def __init__(self):
        self.model = Train().model_building()     
    def pred(self):
        try:
            # Preprocess data
            data, labels = Preprocess().preprocess("../dataset", ["with_mask", "without_mask"])

            # Doing one hot encoding
            label = Preprocess().binarizer(labels)

            # Changing to numpy array
            data, labels = Preprocess().change_to_numpy(data, label)


            # Now train_test_split
            x_train,x_test,y_train,y_test = train_test_split(data,labels,test_size=0.20,
                                                            stratify=labels,random_state=42)

            # Importing model
            models, best =self.model

            # Predicting
            predict = models.predict(x_test, batch_size=32)
            predict = np.argmax(predict, axis=1)
            print(predict)
            
            # Saving our model
            models.save("mask_detector")
        except Exception as ex:
            raise ex
        
    def visualize(self):
        try:
            # Visualizing the training loss and accuracy of our model.
            # Importing model
            #logger.info("Calling model from train_mask.py...")
            models, best = self.model
            N = 20
            plt.style.use("ggplot")
            plt.figure()
            plt.plot(np.arange(0, N), best.history["loss"], label="train_loss")
            plt.plot(np.arange(0, N), best.history["val_loss"], label="val_loss")
            plt.plot(np.arange(0, N), best.history["accuracy"], label="train_acc")
            plt.plot(np.arange(0, N), best.history["val_accuracy"], label="val_acc")
            plt.title("Training Loss and Accuracy")
            plt.xlabel("#Epoch")
            plt.ylabel("Loss/Accuracy")
            plt.legend(loc="lower left")
            plt.savefig("plot.png")
        except Exception as ex:
            raise ex
  • Creating Objects of Prediction class and performing training and printing the graph.
predict = Prediction()
predict.pred()
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.
Epoch 1/20
25/25 [==============================] - 19s 590ms/step - loss: 0.4585 - accuracy: 0.8026 - val_loss: 0.2395 - val_accuracy: 0.9853
Epoch 2/20
25/25 [==============================] - 13s 518ms/step - loss: 0.2077 - accuracy: 0.9487 - val_loss: 0.1326 - val_accuracy: 0.9853
Epoch 3/20
25/25 [==============================] - 13s 515ms/step - loss: 0.1230 - accuracy: 0.9782 - val_loss: 0.0949 - val_accuracy: 0.9853
Epoch 4/20
25/25 [==============================] - 13s 530ms/step - loss: 0.0907 - accuracy: 0.9833 - val_loss: 0.0770 - val_accuracy: 0.9853
Epoch 5/20
25/25 [==============================] - 13s 509ms/step - loss: 0.0698 - accuracy: 0.9910 - val_loss: 0.0636 - val_accuracy: 0.9853
Epoch 6/20
25/25 [==============================] - 13s 512ms/step - loss: 0.0611 - accuracy: 0.9859 - val_loss: 0.0565 - val_accuracy: 0.9853
Epoch 7/20
25/25 [==============================] - 13s 505ms/step - loss: 0.0492 - accuracy: 0.9872 - val_loss: 0.0521 - val_accuracy: 0.9853
Epoch 8/20
25/25 [==============================] - 13s 530ms/step - loss: 0.0386 - accuracy: 0.9974 - val_loss: 0.0496 - val_accuracy: 0.9902
Epoch 9/20
25/25 [==============================] - 13s 536ms/step - loss: 0.0393 - accuracy: 0.9923 - val_loss: 0.0482 - val_accuracy: 0.9902
Epoch 10/20
25/25 [==============================] - 13s 549ms/step - loss: 0.0331 - accuracy: 0.9923 - val_loss: 0.0446 - val_accuracy: 0.9902
Epoch 11/20
25/25 [==============================] - 13s 514ms/step - loss: 0.0300 - accuracy: 0.9962 - val_loss: 0.0438 - val_accuracy: 0.9902
Epoch 12/20
25/25 [==============================] - 13s 538ms/step - loss: 0.0273 - accuracy: 0.9923 - val_loss: 0.0466 - val_accuracy: 0.9804
Epoch 13/20
25/25 [==============================] - 13s 527ms/step - loss: 0.0261 - accuracy: 0.9949 - val_loss: 0.0414 - val_accuracy: 0.9902
Epoch 14/20
25/25 [==============================] - 14s 546ms/step - loss: 0.0218 - accuracy: 0.9974 - val_loss: 0.0397 - val_accuracy: 0.9902
Epoch 15/20
25/25 [==============================] - 13s 523ms/step - loss: 0.0205 - accuracy: 0.9962 - val_loss: 0.0391 - val_accuracy: 0.9902
Epoch 16/20
25/25 [==============================] - 14s 539ms/step - loss: 0.0156 - accuracy: 0.9962 - val_loss: 0.0390 - val_accuracy: 0.9902
Epoch 17/20
25/25 [==============================] - 13s 533ms/step - loss: 0.0189 - accuracy: 0.9936 - val_loss: 0.0403 - val_accuracy: 0.9902
Epoch 18/20
25/25 [==============================] - 13s 533ms/step - loss: 0.0216 - accuracy: 0.9936 - val_loss: 0.0445 - val_accuracy: 0.9804
Epoch 19/20
25/25 [==============================] - 13s 534ms/step - loss: 0.0185 - accuracy: 0.9974 - val_loss: 0.0446 - val_accuracy: 0.9804
Epoch 20/20
25/25 [==============================] - 14s 544ms/step - loss: 0.0168 - accuracy: 0.9974 - val_loss: 0.0453 - val_accuracy: 0.9853
7/7 [==============================] - 2s 116ms/step
[0 1 0 0 1 1 1 1 1 0 0 0 0 0 1 1 1 1 0 0 0 0 1 1 0 0 0 1 0 1 1 0 1 0 0 0 1
 0 1 0 1 0 1 1 1 0 0 0 1 1 0 0 0 1 0 1 0 0 1 0 0 1 1 1 0 0 0 1 0 0 1 1 0 1
 0 0 0 1 0 1 1 1 0 1 1 1 1 1 1 0 1 0 0 0 1 1 0 0 1 0 1 1 0 0 1 0 0 1 0 0 0
 1 1 0 1 1 0 0 1 0 0 0 0 1 1 1 0 1 0 1 1 0 0 0 0 1 1 0 1 1 0 0 0 1 1 0 0 1
 0 0 1 1 1 1 0 1 1 1 0 0 1 0 1 1 1 0 0 1 1 1 1 1 0 0 0 1 1 0 0 1 1 0 0 0 1
 1 0 0 1 0 1 0 0 1 1 0 0 0 1 0 0 0 1 1]
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op while saving (showing 5 of 52). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: mask_detector\assets
INFO:tensorflow:Assets written to: mask_detector\assets
predict.visualize()

Mask Detection Pipelines

  • In this Mask class there is one methods:

    detect_and_predict_mask

    • This methods takes three input that is extracted frame from video, faceNet and maskNet and returns 2-tuple of the face locations and their corresponding predictions.
class Mask:
    def __init__(self):
        pass
    def detect_and_predict_mask(self,frame, faceNet, maskNet):
        # grab the dimensions of the frame and then construct a blob
        # from it
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
                                    (104.0, 177.0, 123.0))

        # pass the blob through the network and obtain the face detections
        faceNet.setInput(blob)
        detections = faceNet.forward()
        print(detections.shape)

        # initialize our list of faces, their corresponding locations,
        # and the list of predictions from our face mask network
        faces = []
        locs = []
        preds = []

        # loop over the detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the detection
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > 0.5:
                # compute the (x, y)-coordinates of the bounding box for
                # the object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # ensure the bounding boxes fall within the dimensions of
                # the frame
                (startX, startY) = (max(0, startX), max(0, startY))
                (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                # extract the face ROI, convert it from BGR to RGB channel
                # ordering, resize it to 224x224, and preprocess it
                face = frame[startY:endY, startX:endX]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)

                # add the face and bounding boxes to their respective
                # lists
                faces.append(face)
                locs.append((startX, startY, endX, endY))

        # only make a predictions if at least one face was detected
        if len(faces) > 0:
            # for faster inference we'll make batch predictions on *all*
            # faces at the same time rather than one-by-one predictions
            # in the above `for` loop
            faces = np.array(faces, dtype="float32")

        
            preds = maskNet.predict(faces, batch_size=32)
            

        # return a 2-tuple of the face locations and their corresponding
        # locations
        return (locs, preds)
  • This Videopred Class has one methods:

    detect_video

    • In this method we load the previously trained model as masknet which we have trained in previous step, facenet model to find location and pass prototxt file and weightspath which is pretrained caffemodel.
    • This method will help to open the video and display the face and mask.
class Videopred:
    def __init__(self):
        pass
    def detect_video(self):
        prototxtpath = "./face_detector/deploy.prototxt"
        weightspath = "./face_detector/res10_300x300_ssd_iter_140000.caffemodel"
        faceNet = cv2.dnn.readNet(prototxtpath, weightspath)
        maskNet = load_model("./mask_detector/mask_detector.model")
        vs = cv2.VideoCapture(0)
        time.sleep(2.0)

        # loop over the frames from the video stream
        while True:
            # grab the frame from the threaded video stream and resize it
            # to have a maximum width of 400 pixels
            _, frame = vs.read()
            frame = imutils.resize(frame, width=400)

            # detect faces in the frame and determine if they are wearing a
            # face mask or not
            (locs, preds) = Mask().detect_and_predict_mask(frame, faceNet, maskNet)

            # loop over the detected face locations and their corresponding
            # locations
            for (box, pred) in zip(locs, preds):
                # unpack the bounding box and predictions
                (startX, startY, endX, endY) = box
                (mask, withoutMask) = pred

                # determine the class label and color we'll use to draw
                # the bounding box and text
                label = "Mask" if mask > withoutMask else "No Mask"
                color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

                # include the probability in the label
                label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)

                # display the label and bounding box rectangle on the output
                # frame
                cv2.putText(frame, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

        # do a bit of cleanup
        vs.release()
        cv2.destroyAllWindows()
  • Creating a object of Videopred class
pred = Videopred()
pred.detect_video()
(1, 1, 200, 7)
1/1 [==============================] - 1s 1s/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 65ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 51ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 53ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 51ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 52ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 50ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 53ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 59ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 50ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 50ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 58ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 53ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 51ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 50ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 57ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 51ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 56ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 55ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 56ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 39ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 121ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 39ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 52ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 53ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 50ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 50ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 51ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 55ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 39ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 51ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 47ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 44ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 45ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 42ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 43ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 48ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 40ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 52ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 49ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 41ms/step
(1, 1, 200, 7)
1/1 [==============================] - 0s 46ms/step