This is the implementation of already trained model that I have trained.

import tensorflow as tf
from keras.preprocessing import image as image_utils
from keras.models import load_model
import numpy as np
import imutils
import time
import cv2
  • Loading Required model that is haarcascade_frontalface_default.xml for detecting face and epoch_75.hdf5 is for detecting emotion which I have already trained.
  • Also giving the labels for different emotions.
  • You can download a pretrained model from here
detector = cv2.CascadeClassifier("./haarcascade_frontalface_default.xml")
model = load_model("./epoch_75.hdf5")
EMOTIONS = ["Angry", "Scared", "Happy", "Sad", "Surprised", "Neutral"]

# If a video path was not supplies, grab the reference to the webcam.
camera = cv2.VideoCapture(0)
time.sleep(2.0)

We are applying OpenCV to read a frame from a video and predicting using pre trained model.

while True:
    # grab the current Frame.
    _, frame = camera.read()

    # resize the frame and convert it to grayscale
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # initialize the canvas for the visualization, then clone
    # the frame so we can draw on it.
    canvas = np.zeros((220, 300, 3), dtype="uint8")
    frameClone = frame.copy()

    # Detect faces in the input frame, then clone the frame so that
    # we can draw on it
    rects = detector.detectMultiScale(gray, scaleFactor=1.1,
                                      minNeighbors=5, minSize=(30, 30),
                                      flags=cv2.CASCADE_SCALE_IMAGE)

    # Ensure at least one face was found before continuing.
    if len(rects) > 0:
        # determine the largest face area
        rect = sorted(rects, reverse=True,
                      key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
        (fX, fY, fW, fH) = rect

        # extract the face ROI from the image, then preprocess
        # it for the network
        roi = gray[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (48, 48))
        roi = roi.astype("float") / 255.0
        roi = tf.keras.preprocessing.image.img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)

        # Make a prediction on the ROI,then lookup the class
        # label
        preds = model.predict(roi)[0]
        label = EMOTIONS[preds.argmax()]
        print(label)

        # Loop over the labels + probabilities and draw them
        for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
            # Construct the label text
            text = "{}: {:.2f}%".format(emotion, prob * 100)

            # Draw the label + probability bar on the canvas
            w = int(prob * 300)
            cv2.rectangle(canvas, (5, (i * 35) + 5),
                          (w, (i * 35) + 35), (0, 0, 255), -1)
            cv2.putText(canvas, text, (10, (i * 35) + 23),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45,
                        (2, 180, 48), 2)

            # draw the label on the frame
            cv2.putText(frameClone, label, (fX, fY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (252, 247, 48), 2)
            cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
                          (0, 0, 255), 2)

    # show our classifications + probabilities
    cv2.imshow("Face", frameClone)
    cv2.imshow("Probabilities", canvas)

    # if the ’q’ key is pressed, stop the loop
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()