Error while deploying Machine learning Flask project

46 views Asked by At

I am trying to build a sign language recognition model using LSTM. I am new to Flask and could not find what went wrong. When I run the file, it opens camera but does not detect the action. Also once the camera turns on, the app is getting stuck. How can I find the error?

The code is given below:

from flask import Flask, render_template, Response
import cv2
import pickle
import pyttsx3 
import numpy as np
import mediapipe as mp
import threading

app = Flask(__name__)

from tensorflow.keras.models import load_model
model = load_model('action.h5')

mp_holistic = mp.solutions.holistic
mp_drawing = mp.solutions.drawing_utils

actions = np.array(['Hello','I am','Affan','Thanks', 'i love you','Fever','See you', 'God'])

def mediapipe_detection(image, model):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image.flags.writeable = False
    results = model.process(image)
    image.flags.writeable = True
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    return image, results

def extract_keypoints(results):
    lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)
    rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)
    return np.concatenate([lh, rh])

def draw_styled_landmarks(image, results):
    mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
                             mp_drawing.DrawingSpec(color=(100, 100, 100), thickness=2, circle_radius=4),
                             mp_drawing.DrawingSpec(color=(100, 100, 100), thickness=2, circle_radius=2)
                             )
    mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
                             mp_drawing.DrawingSpec(color=(200, 200,200), thickness=2, circle_radius=4),
                             mp_drawing.DrawingSpec(color=(200, 200, 200), thickness=2, circle_radius=2)
                             )

sequence = []
sentence = []
predictions = []
threshold = 0.5

cap = cv2.VideoCapture(0)

def generate_frames():
    sequence = []  # Initialize sequence variable
    sentence = []  # Initialize sentence variable
    while True:
        ret, frame = cap.read()
        if not ret:
            break

        image, results = mediapipe_detection(frame, holistic)
        draw_styled_landmarks(image, results)
        keypoints = extract_keypoints(results)
        sequence.append(keypoints)
        sequence = sequence[-30:]

        if len(sequence) == 30:
            res = model.predict(np.expand_dims(sequence, axis=0))[0]
            predictions.append(np.argmax(res))
            
            if np.unique(predictions[-10:])[0] == np.argmax(res):
                if res[np.argmax(res)] > threshold:
                    if len(sentence) > 0:
                        if actions[np.argmax(res)] != sentence[-1]:
                            sentence.append(actions[np.argmax(res)])
                            new_word = actions[np.argmax(res)]
                            t2s.say(new_word)
                            t2s.runAndWait()
                    else:
                        sentence.append(actions[np.argmax(res)])
                        new_word = actions[np.argmax(res)]
                        t2s.say(new_word)
                        t2s.runAndWait()

            if len(sentence) > 5:
                sentence = sentence[-5:]

        ret, buffer = cv2.imencode('.jpg', image)
        frame = buffer.tobytes()
        yield (b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

    cap.release()


@app.route('/')
def index():
    return render_template('index.html')

@app.route('/video_feed')
def video_feed():
    return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')

if __name__ == "__main__":
    t2s = pyttsx3.init()
    holistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)
    app.run(debug=True)
  

I tried changing the model and modified codes, but it is not working. initially camera feed was not displayed, but now it is working

0

There are 0 answers