HTC Vive Pro camera image deformation

32 views Asked by At

I'm working on a project using an HTC Vive Pro in which I want to apply some object segmentation to what the external front cameras are seeing. I'm using pyopenvr python bindings for the Valve's openvr C++ library. I have already been able to access the camera images but I'm having some issues when sending them back to the Vive's displays. The first picture is how the displays look when activating the cameras from the vive itself, the second picture is what is being sent to the screens with my script, they look really stretched out which naturally causes a lot of nausea when wearing the headset.

I'm feeling a little stuck right now because I don't really know how to fix this. Also if anyone knows a better way to do this, maybe other tools or libraries I would love to hear them.

P.S: I am using pyopenvr instead of the native OpenVR in C++ because even though I have been able to get the camera images on the native SDK, I have not been able at all to send the images to the displays after trying it for a long time.

This is how the images look with my script:

enter image description here

And this is how they look when activating the camera passthrough from the headset itself:

enter image description here

import openvr
import numpy as np
import pygame as pg
from OpenGL.GL import *
import glfw
import threading
import time

vr_app_scene = openvr.init(openvr.VRApplication_Scene)

camera = openvr.IVRTrackedCamera()
camera_handle = camera.acquireVideoStreamingService(0)
frame_type = openvr.VRTrackedCameraFrameType_Undistorted
frame_size = camera.getCameraFrameSize(0, frame_type)
print(frame_size)
frame_buffer = (ctypes.c_uint8 * frame_size[2])()
frame_header = openvr.VRTextureBounds_t()

IMG_WIDTH = 1224
IMG_HEIGHT = 920

pg.display.set_mode((0, 0), pg.OPENGL | pg.DOUBLEBUF | pg.NOFRAME)
glfw.init()
glfw.window_hint(glfw.SAMPLES, 4)
window = glfw.create_window(1, 1, 'hello_vr', None, None)
vr_sys = openvr.VRSystem()
left_eye_texture = None
right_eye_texture = None

poses = []
render_lock = threading.Lock()

# Generate textures outside the rendering loop


def render_loop():
    global left_eye_texture, right_eye_texture, window

    glfw.make_context_current(window)
    
    textures = glGenTextures(2)

    while not glfw.window_should_close(window):
        with render_lock:
            # Rendering code goes here
            # ...

            # Left eye texture
            glBindTexture(GL_TEXTURE_2D, textures[0])
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
            glGenerateMipmap(GL_TEXTURE_2D)
            glDepthFunc(GL_LEQUAL)
                glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,IMG_WIDTH,IMG_HEIGHT,0,GL_RGBA,GL_UNSIGNED_BYTE,img_data_izq)
            
            # ... (texture parameters and data setup)

            left_eye_texture = openvr.Texture_t(
                handle=int(textures[0]),
                eType=openvr.TextureType_OpenGL,
                eColorSpace=openvr.ColorSpace_Gamma,
            )
            
            glClearColor( 0.0, 0.0, 0.0, 1.0 )
            glEnable( GL_MULTISAMPLE )
            
            glViewport(0, 0, IMG_WIDTH, IMG_HEIGHT)
            
            glDisable( GL_MULTISAMPLE )
            glBlitFramebuffer( 0, 0, IMG_WIDTH, IMG_HEIGHT, 0, 0, IMG_WIDTH, IMG_HEIGHT, 
                GL_COLOR_BUFFER_BIT,
                GL_LINEAR )
            
            glEnable( GL_MULTISAMPLE )

            # Right eye texture
            glBindTexture(GL_TEXTURE_2D, textures[1])
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
            glGenerateMipmap(GL_TEXTURE_2D)
            glDepthFunc(GL_LEQUAL)
                  glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,IMG_WIDTH,IMG_HEIGHT,0,GL_RGBA,GL_UNSIGNED_BYTE,img_data_der)
            
            # ... (texture parameters and data setup)

            right_eye_texture = openvr.Texture_t(
                handle=int(textures[1]),
                eType=openvr.TextureType_OpenGL,
                eColorSpace=openvr.ColorSpace_Gamma,
            )

            # Submit textures to VR compositor
            if right_eye_texture is not None and left_eye_texture is not None:
                try:
                    openvr.VRCompositor().submit(openvr.Eye_Left, left_eye_texture)
                    openvr.VRCompositor().submit(openvr.Eye_Right, right_eye_texture)
                    glFlush()
                except openvr.error_code.CompositorError_AlreadySubmitted:
                    pass  # First frame fails because waitGetPoses has not been called yet

            # Ensure a constant frame rate
            time.sleep(0.0016)  # Adjust the sleep duration based on your desired frame rate

# Start the rendering thread
render_thread = threading.Thread(target=render_loop)
render_thread.start()

# Main loop
while not glfw.window_should_close(window):
    try:
        with render_lock:
            # Update headset pose
            poses, _ = openvr.VRCompositor().waitGetPoses(poses, None)

            # Get camera frame
            result = camera.getVideoStreamFrameBuffer(camera_handle, frame_type, frame_buffer, frame_size[2])
            pyimage_ambos = pg.image.frombuffer(frame_buffer, frame_size[:2], "RGBA")

            if pyimage_ambos:
                img_data = pg.image.tostring(pyimage_ambos, "RGBA", 1)
                img_data_izq, img_data_der = img_data[:len(img_data) // 2], img_data[len(img_data) // 2:]

    except openvr.error_code.TrackedCameraError as e:
        print(f"TrackedCameraError: {e}")

0

There are 0 answers