Simply get rgb and depth image stream with pykinect and python3

2.2k Views Asked by At

I want to get the depth and rgb video stream from a kinect (version 1). I'm using the Python 3 version of pykinect, not CPython.

I have found some examples but Pykinect documentation is nearly inexistant and I don't want to use pygame.

On linux with freenect I did:

rgb_stream = freenect.sync_get_video()[0]
rgb_stream = rgb_stream[:, :, ::-1]
rgb_image = cv.cvtColor(rgb_stream, cv.COLOR_BGR2RGB)

depth_stream = freenect.sync_get_depth()[0]
depth_stream = np.uint8(depth_stream)
depth_image = cv.cvtColor(depth_stream, cv.COLOR_GRAY2RGB)

However I'm using pykinect on Windows and I want to get depth and rgb stream in a similar fashion, and then process it with OpenCV and display it with Qt.

Here is an example code I found:

from pykinect import nui
import numpy
import cv2

def video_handler_function(frame):
    video = numpy.empty((480,640,4),numpy.uint8)
    frame.image.copy_bits(video.ctypes.data)
    cv2.imshow('KINECT Video Stream', video)

kinect = nui.Runtime()
kinect.video_frame_ready += video_handler_function
kinect.video_stream.open(nui.ImageStreamType.Video, 2,nui.ImageResolution.Resolution640x480,nui.ImageType.Color)

cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)

while True:

    key = cv2.waitKey(1)
    if key == 27: break

kinect.close()
cv2.destroyAllWindows()

What is video_handler_function ? What is the purpose of kinect.video_frame_ready += video_handler_function ?

I tried kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth) to get the depth image with some modifications to the handler function but couldn't make it work.

1

There are 1 best solutions below

1
On
from pykinect import nui
import numpy
import cv2


kinect = nui.Runtime()
kinect.skeleton_engine.enabled = True

def getColorImage(frame):
    height, width = frame.image.height, frame.image.width  #get width and height of the images
    rgb = numpy.empty((height, width, 4), numpy.uint8)
    frame.image.copy_bits(rgb.ctypes.data)                 #copy the bit of the image to the array

    cv2.imshow('KINECT Video Stream', rgb) # display the image

def getDepthImage(frame):
    height, width = frame.image.height, frame.image.width  #get frame height and width
    depth = numpy.empty((height, width, 1), numpy.uint8)
    arr2d = (depth >> 3) & 4095
    arr2d >>= 4
    frame.image.copy_bits(arr2d.ctypes.data)

    cv2.imshow('KINECT depth Stream', arr2d)

def frame_ready(frame):
    for skeleton in frame.SkeletonData:
        if skeleton.eTrackingState == nui.SkeletonTrackingState.TRACKED:
            print(skeleton.Position.x, skeleton.Position.y, skeleton.Position.z, skeleton.Position.w)

def main():

    while True:
        kinect.video_frame_ready += getColorImage
        kinect.video_stream.open(nui.ImageStreamType.Video, 2, nui.ImageResolution.Resolution640x480, nui.ImageType.Color)
        cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)

        kinect.depth_frame_ready += getDepthImage
        kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth)
        cv2.namedWindow('KINECT depth Stream', cv2.WINDOW_AUTOSIZE)

        kinect.skeleton_frame_ready += frame_ready

        if cv2.waitKey(0) == 27:
            cv2.destroyAllWindows()
            kinect.close()
            break

if __name__ == '__main__':
    main()

~~~~~