Kinect のスケルトン情報を取得&表示
スケルトン情報の取得に関するサンプルが沢山あるのだけれど,表示については(pygame の例はあれど)OpenCV のみで対処したサンプルが見つからず。
こんなのでよいのかしら?
import cv2 import numpy as np import pykinect from pykinect import nui import thread import sys # Video def video_frame_ready( frame ): if videoDisplay == False: return with screenLock: video = np.empty( ( 480, 640, 4 ), np.uint8 ) frame.image.copy_bits( video.ctypes.data ) if skeletons is not None: for index, data in enumerate(skeletons): if data.eTrackingState != nui.SkeletonTrackingState.TRACKED: continue #get right hand position handRightPosition = data.SkeletonPositions[nui.JointId.HandRight] hr = nui.SkeletonEngine.skeleton_to_depth_image(handRightPosition, 640, 480) #get left hand position handLeftPosition = data.SkeletonPositions[nui.JointId.HandLeft] hl = nui.SkeletonEngine.skeleton_to_depth_image(handLeftPosition, 640, 480) print "(%d, %d)" % (int(hr[0]), int(hr[1])) cv2.circle(video, (int(hr[0]), int(hr[1])), 20, (255, 0, 0), thickness=10) cv2.circle(video, (int(hl[0]), int(hl[1])), 20, (0, 0, 255), thickness=10) cv2.imshow( 'frame', video ) # Depth def depth_frame_ready( frame ): if videoDisplay == True: return depth = np.empty( ( 240, 320, 1 ), np.uint16 ) frame.image.copy_bits( depth.ctypes.data ) print skeletons if skeletons is not None: for index, data in enumerate(skeletons): if data.eTrackingState != nui.SkeletonTrackingState.TRACKED: continue #get head position headPosition = data.SkeletonPositions[nui.JointId.Head] hp = nui.SkeletonEngine.skeleton_to_depth_image(headPosition, 320, 240) cv2.circle(depth, (int(hp[0]), int(hp[1])), 20, (255, 255, 255), thickness=10) cv2.imshow( 'frame', depth ) def skeleton_frame_ready(frame): global skeletons skeletons = frame.SkeletonData if __name__ == '__main__': screenLock = thread.allocate() videoDisplay = False kinect = nui.Runtime() skeletons = None #skeleton frame ready event handling #Reference : http://www.slideshare.net/pycontw/pykinect kinect.skeleton_engine.enabled = True kinect.skeleton_frame_ready += skeleton_frame_ready kinect.video_frame_ready += video_frame_ready kinect.depth_frame_ready += depth_frame_ready kinect.video_stream.open( nui.ImageStreamType.Video, 2, nui.ImageResolution.Resolution640x480, nui.ImageType.Color ) kinect.depth_stream.open( nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth ) cv2.namedWindow( 'frame', cv2.WINDOW_AUTOSIZE ) while True: #waitKey() returns ASCII code of the pressed key key = cv2.waitKey(33) if key == 27: # ESC break elif key == 118: # 'v' print >> sys.stderr, "Video stream activated" videoDisplay = True elif key == 100: # 'd' print >> sys.stderr, "Depth stream activated" videoDisplay = False cv2.destroyAllWindows() kinect.close()