failed OutputStream Gstreamer

347 views Asked by At

I am using a Jetson Nano with two csi-cameras. A camera have a viewing angle of about 140 degrees. For camera calibration I use this open source code from github

The code from camera.py can be seen below

# This Class use two CSI cameras (IMX219-83 Stereo camera) connected to a
# NVIDIA Jetson Nano Developer Kit (Rev B01) using OpenCV
# Drivers for the camera and OpenCV are included in the base image in JetPack 4.5+
# This script is a modification of the code from the repository:
# https://github.com/JetsonHacksNano/CSI-Camera

# This script will open a window and place the camera stream from each camera in a window
# arranged horizontally.
# The camera streams are each read in their own thread, as when done sequentially there
# is a noticeable lag

import cv2
import threading
import numpy as np

# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of each camera pane in the window on the screen

left_camera = None
right_camera = None


class Camera:

    def __init__(self):
        # Initialize instance variables
        # OpenCV video capture element
        self.video_capture = None
        # The last captured image from the camera
        self.frame = None
        self.grabbed = False
        # The thread where the video capture runs
        self.read_thread = None
        self.read_lock = threading.Lock()
        self.running = False

    def open(self, sensor_id=0):
        # Currently there are setting frame rate on CSI Camera on Nano through gstreamer
        # Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
        #my camera has a resolution of 3264x2464px
        sensor_mode = 0         
        capture_width = 918
        capture_height = 693
        display_width = 918     #918=9*102
        display_height = 693    #693=9*77
        framerate = 20
        flip_method = 0
        gstreamer_pipeline_string = (
            "nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
            "video/x-raw(memory:NVMM), "
            "width=(int)%d, height=(int)%d, "
            "format=(string)NV12, framerate=(fraction)%d/1 ! "
            "nvvidconv flip-method=%d ! "
            "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
            "videoconvert ! "
            "video/x-raw, format=(string)BGR ! appsink"
            % (
                sensor_id,
                sensor_mode,
                capture_width,
                capture_height,
                framerate,
                flip_method,
                display_width,
                display_height,
            )
        )

        try:
            self.video_capture = cv2.VideoCapture(
                gstreamer_pipeline_string, cv2.CAP_GSTREAMER
            )

        except RuntimeError:
            self.video_capture = None
            print("Unable to open camera")
            print("Pipeline: " + gstreamer_pipeline_string)
            return
        # Grab the first frame to start the video capturing
        self.grabbed, self.frame = self.video_capture.read()

    def start(self):
        if self.running:
            print('Video capturing is already running')
            return None
        # create a thread to read the camera image
        if self.video_capture != None:
            self.running = True
            self.read_thread = threading.Thread(target=self.updateCamera)
            self.read_thread.start()
        return self

    def stop(self):
        self.running = False
        self.read_thread.join()

    def updateCamera(self):
        # This is the thread to read images from the camera
        while self.running:
            try:
                grabbed, frame = self.video_capture.read()
                with self.read_lock:
                    self.grabbed = grabbed
                    self.frame = frame
            except RuntimeError:
                print("Could not read image from camera")
        # FIX ME - stop and cleanup thread
        # Something bad happened

    def read(self):#hier wird Bild aufgenommen
        with self.read_lock:
            frame = self.frame.copy()
            grabbed = self.grabbed
        return grabbed, frame

    def release(self):
        if self.video_capture != None:
            self.video_capture.release()
            self.video_capture = None
        # Now kill the thread
        if self.read_thread != None:
            self.read_thread.join()

def start_cameras():
    left_camera = Camera()
    left_camera.open(0)
    left_camera.start()

    right_camera = Camera()
    right_camera.open(1)
    right_camera.start()

    cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)

    if (
            not left_camera.video_capture.isOpened()
            or not right_camera.video_capture.isOpened()
    ):
        # Cameras did not open, or no camera attached

        print("Unable to open any cameras")
        SystemExit(0)

    mypiccounter = 1

    while cv2.getWindowProperty("CSI Cameras", 0) >= 0:

        _, left_image = left_camera.read()
        _, right_image = right_camera.read()

        camera_images = np.hstack((left_image, right_image))
        cv2.imshow("CSI Cameras", camera_images)

        # This also acts as
        keycode = cv2.waitKey(30) & 0xFF
        # Stop the program on the ESC key
        if keycode == 27:
            break

        #optionally, images can be saved
        if keycode == 112:#112="p"
            cv2.imwrite("myTest/Test"+str(mypiccounter)+".0.jpg", left_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
            cv2.imwrite("myTest/Test"+str(mypiccounter)+".1.jpg", right_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
            print("Picture "+str(mypiccounter)+" saved successfully")
            mypiccounter+=1


    left_camera.stop()
    left_camera.release()
    right_camera.stop()
    right_camera.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    start_cameras()

I have ubuntu 20.04 installed on my Jetson Nano and I'm running the code using Visual Studio. Unfortunately, I get the following error message on the terminal:

GST_ARGUS: Creating output stream Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:776 Failed to create OutputStream [ WARN:[email protected]] global /home/jetson/opencv/modules/videoio/src/cap_gstreamer.cpp (1405) open OpenCV | GStreamer warning: Cannot query video position: status=0, value=-1, duration=-1 GST_ARGUS: Creating output stream Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:776 Failed to create OutputStream [ WARN:[email protected]] global /home/jetson/opencv/modules/videoio/src/cap_gstreamer.cpp (1405) open OpenCV | GStreamer warning: Cannot query video position: status=0, value=-1, duration=-1 Traceback (most recent call last): File "/home/jetson/MyProject/01-Jetson-Nano-CSI-Stereo-Vision-main/camera/camera.py", line 177, in start_cameras() File "/home/jetson/MyProject/01-Jetson-Nano-CSI-Stereo-Vision-main/camera/camera.py", line 150, in start_cameras _, left_image = left_camera.read() File "/home/jetson/MyProject/01-Jetson-Nano-CSI-Stereo-Vision-main/camera/camera.py", line 113, in read frame = self.frame.copy() AttributeError: 'NoneType' object has no attribute 'copy

I have to add that the program has worked before. Furthermore, it cannot be due to the hardware, since I can open and display the video of the two cameras via the pipeline in another program or via termial.

When the program was still working, the calibration could be carried out with start.py. The code from start.py can be seen below:

# This script calibrate IMX219-83 Stereo camera and show results (disparity map)
# Video starts from two Cameras connected to Jetson Nano B01
# When you see the recognized chessboard on the screen,
# press 'c' to add points into Calibration info
# You need to save about 30 shots of the chessboard, then press 's' to save calibration results.
# To view the calibration results press 'v'.
# Tap Esc to exit program.
# All Calibration data will be saved into /camera directory using pickle format

import cv2
import numpy as np
import pickle
from camera.camera import Camera

# Size of chessboard to Calibration
nx, ny = 9, 6

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)

# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# We use chessboard with 25mm square_size
objp = np.zeros((ny * nx, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
objp = objp * 0.025

# Arrays to store object points and image points from all screens.
objpoints = []   # 3d point in real world space
imgpointsl = []  # 2d points in image plane for left camera
imgpointsr = []  # 2d points in image plane for tight camera


def main():
    # Start Left Camera
    left_camera = Camera()
    left_camera.open(0)
    left_camera.start()

    # Start Right Camera
    right_camera = Camera()
    right_camera.open(1)
    right_camera.start()

    # Iteration ratio
    i = 1
    # Undistortion
    und = False

    cv2.namedWindow("IMX219-83 Stereo camera", cv2.WINDOW_NORMAL)
    #if necessary, adjust the window size
    #cv2.resizeWindow("IMX219-83 Stereo camera", 1836, 693)

    while cv2.getWindowProperty("IMX219-83 Stereo camera", 0) >= 0:

        # Read Images
        _, left_image = left_camera.read()
        _, right_image = right_camera.read()

        # Find the chessboard corners
        retl, cornersl = cv2.findChessboardCorners(left_image, (nx, ny), None)
        retr, cornersr = cv2.findChessboardCorners(right_image, (nx, ny), None)

        # This also acts as
        keycode = cv2.waitKey(30) & 0xFF#within 30ms, isolate the lowest 8bit
        # Stop the program on the ESC key
        if keycode == 27:
            break

        # If found corners, draw corners and check to add Calibration info
        if retl is True & retr is True:
            # Draw the corners
            cv2.drawChessboardCorners(left_image, (nx, ny), cornersl, retl)
            cv2.drawChessboardCorners(right_image, (nx, ny), cornersr, retr)

            # Add points into Calibration info when user press 'c'
            if keycode == ord('c'):
                print("Add points from video capture (%d)" % i)

                # Convert to grayscale
                grayl = cv2.cvtColor(left_image, cv2.COLOR_RGB2GRAY)
                grayr = cv2.cvtColor(right_image, cv2.COLOR_RGB2GRAY)

                # Add left points
                objpoints.append(objp)
                cv2.cornerSubPix(grayl, cornersl, (11, 11), (-1, -1), criteria)
                imgpointsl.append(cornersl)

                # Add right points
                cv2.cornerSubPix(grayr, cornersr, (11, 11), (-1, -1), criteria)
                imgpointsr.append(cornersr)

                i += 1

        # Save all calibration data into pickle format
        if keycode == ord('s') and i > 1:
            height, width, channel = left_image.shape
            retval, cm1, dc1, cm2, dc2, r, t, e, f = cv2.stereoCalibrate(
                objpoints, imgpointsl, imgpointsr,
                (width, height), None, None, None, None, criteria=criteria
            )
            print("Stereo calibration rms: ", retval)
            r1, r2, p1, p2, q, roi_left, roi_right = cv2.stereoRectify(
                cm1, dc1, cm2, dc2, (width, height), r, t,
                flags=0, alpha=-1
            )

            # Save the camera calibration results.
            calib_result_pickle = {
                "retval": retval,
                "cm1": cm1, "dc1": dc1,
                "cm2": cm2, "dc2": dc2,
                "r": r, "t": t, "e": e, "f": f,
                "r1": r1, "r2": r2, "p1": p1, "p2": p2, "q": q
            }

            try:
                pickle.dump(calib_result_pickle, open("./camera/stereo_calib.p", "wb"))
                print("Calibration results saved\n")
            except RuntimeError:
                print("Unable to save Calibration results\n")

        # View calibration result
        if keycode == ord('v'):
            try:
                calib_result_pickle = pickle.load(open("./camera/stereo_calib.p", "rb"))
                cm1 = calib_result_pickle["cm1"]
                cm2 = calib_result_pickle["cm2"]
                dc1 = calib_result_pickle["dc1"]
                dc2 = calib_result_pickle["dc2"]
                r = calib_result_pickle["r"]
                t = calib_result_pickle["t"]
                r1 = calib_result_pickle["r1"]
                p1 = calib_result_pickle["p1"]
                r2 = calib_result_pickle["r2"]
                p2 = calib_result_pickle["p2"]
                q = calib_result_pickle["q"]
                und = not und
            except RuntimeError:
                print("Unable to load Calibration coefficients")
                break

            # We use the shape for remap
            height, width, channel = left_image.shape

        # Undistortion and Rectification part!
        if und is True:
            leftMapx, leftMapy = cv2.initUndistortRectifyMap(cm1, dc1, r1, p1, (width, height), cv2.CV_32FC1)
            left_image = cv2.remap(left_image, leftMapx, leftMapy, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
            rightMapx, rightMapy = cv2.initUndistortRectifyMap(cm2, dc2, r2, p2, (width, height), cv2.CV_32FC1)
            right_image = cv2.remap(right_image, rightMapx, rightMapy, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

        camera_images = np.hstack((left_image, right_image))
        cv2.imshow("IMX219-83 Stereo camera", camera_images)

    # Stop Cameras and close all windows
    left_camera.stop()
    left_camera.release()
    right_camera.stop()
    right_camera.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    main()

The result was like: this

What do I have to change in the code so that the program works again? What adjustments do I have to make so that the result of the calibration looks like this?

0

There are 0 answers