转自:http://www.cnblogs.com/yangecnu/archive/2012/04/04/KinectSDK_Depth_Image_Processing_Part1.html
深度数据是Kinect传感器的精髓
DepthImageStream的使用和ColorImageStream的使用类似。DepthImageStream和ColorImageStream都继承自ImageStream。可以像从ColorImageStream获取数据生成图像那样生成景深图像。
显示深度数据的套路相同
初始化
private void InitializeKinectSensor(KinectSensor kinectSensor) { if (kinectSensor != null) { DepthImageStream depthStream = kinectSensor.DepthStream; depthStream.Enable(); depthImageBitMap = new WriteableBitmap(depthStream.FrameWidth, depthStream.FrameHeight, 96, 96, PixelFormats.Gray16, null); depthImageBitmapRect = new Int32Rect(0, 0, depthStream.FrameWidth, depthStream.FrameHeight); depthImageStride = depthStream.FrameWidth * depthStream.FrameBytesPerPixel; DepthImage.Source = depthImageBitMap; kinectSensor.DepthFrameReady += new EventHandler<DepthImageFrameReadyEventArgs>(kinectSensor_DepthFrameReady); kinectSensor.Start(); } }
事件处理
void kinectSensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e) { if (lastDepthFrame != null) { lastDepthFrame.Dispose(); lastDepthFrame = null; } lastDepthFrame = e.OpenDepthImageFrame(); if (lastDepthFrame != null) { depthPixelDate = new short[lastDepthFrame.PixelDataLength]; lastDepthFrame.CopyPixelDataTo(depthPixelDate); depthImageBitMap.WritePixels(depthImageBitmapRect, depthPixelDate, depthImageStride, 0); CreateColorDepthImage(this.lastDepthFrame, depthPixelDate); } }
获取单点深度信息
获取每一个像素的距离很容易,但是要直接使用还需要做一些位操作。可能大家在实际编程中很少情况会用到位运算。如上图所示,深度值存储在第3至15位中,要获取能够直接使用的深度数据需要向右移位,将游戏者索引(Player Index)位移除。后面将会介绍游戏者索引位的重要性。下面的代码简要描述了如何获取像素的深度值。代码中pixelData变量就是从深度帧数据中获取的short数组。PixelIndex基于待计算像素的位置就算出来的。SDK在DepthImageFrame类中定义了一个常量PlayerIndexBitmaskWidth,它定义了要获取深度数据值需要向右移动的位数。
有一点值得注意的是,在UI界面中Image空间的属性中,宽度和高度是硬编码的。如果不设置值,那么空间会随着父容器(From窗体)的大小进行缩放,如果空间的长宽尺寸和深度数据帧的尺寸不一致,当鼠标点击图片时,代码就会返回错误的数据,在某些情况下甚至会抛出异常。像素数组中的数据是固定大小的,它是根据DepthImageStream的Enable方法中的DepthImageFormat参数值来确定的。如果不设置图像控件的大小,那么他就会根据Form窗体的大小进行缩放,这样就需要进行额外的计算,将鼠标的在Form中的位置换算到深度数据帧的维度上。这种缩放和空间转换操作很常见,在后面的文章中我们将会进行讨论,现在为了简单,对图像控件的尺寸进行硬编码。
private void DepthImage_MouseLeftButtonUp(object sender, MouseButtonEventArgs e) { // 获取鼠标位置的深度数据 Point p = e.GetPosition(DepthImage); if (depthPixelDate != null && depthPixelDate.Length > 0) { Int32 pixelIndex = (Int32)(p.X + ((Int32)p.Y * this.lastDepthFrame.Width)); Int32 depth = this.depthPixelDate[pixelIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth; // 获取深度 Int32 depthInches = (Int32)(depth * 0.0393700787); // 英寸 Int32 depthFt = depthInches / 12; // 英尺 depthInches = depthInches % 12; //PixelDepth.Text = String.Format("{0}mm~{1}'{1}", depth, depthFt, depthInches); } }
深度图像增强
代码中过滤掉了一些距离太近的点。因为过近的点和过远的点都不准确。所以过滤掉了大于3.5米小于0米的数据,将这些数据设置为白色。
private void CreateLighterShadesOfGray(DepthImageFrame depthFrame, short[] pixelData) { // 对深度图像的简单处理 —— 高低阈值 Int32 depth; Int32 loThreashold = 0; Int32 hiThreshold = 3500; short[] enhPixelData = new short[depthFrame.Width * depthFrame.Height]; for (int i = 0; i < pixelData.Length; i++) { depth = pixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth < loThreashold || depth > hiThreshold) { enhPixelData[i] = 0xFF; } else { enhPixelData[i] = (short)~pixelData[i]; } } EnhancedDepthImage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Gray16, null, enhPixelData, depthFrame.Width * depthFrame.BytesPerPixel); }
如果能够将16位的灰度级用32位彩色表示效果会更好。当 RGB值一样时,就会呈现出灰色。灰度值的范围是0~255,0为黑色,255为白色,之间的颜色为灰色。现在将灰色值以RGB模式展现出来。
将彩色影像的格式改为了Bgr32位,这意味每一个像素占用32位(4个字节)。每一个R,G,B分别占8位,剩余8位留用。这种模式限制了RGB的取值为0-255,所以需要将深度值转换到这一个范围内。除此之外,我们还设置了最小最大的探测范围,这个和之前的一样,任何不在范围内的都设置为白色。将深度值除以4095(0XFFF,深度探测的最大值),然后乘以255,这样就可以将深度数据转换到0至255之间了。
private void CreateBetterShadesOfGray(DepthImageFrame depthFrame, short[] pixelData) { Int32 depth; Int32 gray; Int32 loThreashold = 0; Int32 bytePerPixel = 4; Int32 hiThreshold = 3500; byte[] enhPixelData = new byte[depthFrame.Width * depthFrame.Height * bytePerPixel]; for (int i = 0, j = 0; i < pixelData.Length; i++, j += bytePerPixel) { depth = pixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth < loThreashold || depth > hiThreshold) { gray = 0xFF; } else { gray = (255 * depth / 0xFFF); } enhPixelData[j] = (byte)gray; enhPixelData[j + 1] = (byte)gray; enhPixelData[j + 2] = (byte)gray; } EnhancedDepthImage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, enhPixelData, depthFrame.Width * bytePerPixel); }
深度数据的彩色渲染 —— 伪彩色图像
将深度数据值转化到0-255并用RGB模式进行显示可以起到增强图像的效果,能够从图像上直观的看出更多的深度细节信息。还有另外一种简单,效果也不错的方法,那就是将深度数据值转换为色调和饱和度并用图像予以显示。
private void CreateColorDepthImage(DepthImageFrame depthFrame, short[] pixelData) { Int32 depth; Double hue; Int32 loThreshold = 1200; Int32 hiThreshold = 3500; Int32 bytesPerPixel = 4; byte[] rgb = new byte[3]; byte[] enhPixelData = new byte[depthFrame.Width * depthFrame.Height * bytesPerPixel]; for (int i = 0, j = 0; i < pixelData.Length; i++, j += bytesPerPixel) { depth = pixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth < loThreshold || depth > hiThreshold) { enhPixelData[j] = 0x00; enhPixelData[j + 1] = 0x00; enhPixelData[j + 2] = 0x00; } else { hue = ((360 * depth / 0xFFF) + loThreshold); ConvertHslToRgb(hue, 100, 100, rgb); enhPixelData[j] = rgb[2]; //Blue enhPixelData[j + 1] = rgb[1]; //Green enhPixelData[j + 2] = rgb[0]; //Red } } EnhancedDepthImage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, enhPixelData, depthFrame.Width * bytesPerPixel); }
以上代码中使用了ConvertHslToRgb这一函数,该函数的作用是进行两个颜色空间的转换,就是将H(Hue色调)S(Saturation饱和度)L(Light亮度)颜色空间转换到RGB颜色空间的函数。
public void ConvertHslToRgb(double hue, double saturation, double lightness, byte[] rgb) { double red = 0.0; double green = 0.0; double blue = 0.0; hue = hue % 360.0; saturation = saturation / 100.0; lightness = lightness / 100.0; if (saturation == 0.0) { red = lightness; green = lightness; blue = lightness; } else { double huePrime = hue / 60.0; int x = (int)huePrime; double xPrime = huePrime - (double)x; double L0 = lightness * (1.0 - saturation); double L1 = lightness * (1.0 - (saturation * xPrime)); double L2 = lightness * (1.0 - (saturation * (1.0 - xPrime))); switch (x) { case 0: red = lightness; green = L2; blue = L0; break; case 1: red = L1; green = lightness; blue = L0; break; case 2: red = L0; green = lightness; blue = L2; break; case 3: red = L0; green = L1; blue = lightness; break; case 4: red = L2; green = L0; blue = lightness; break; case 5: red = lightness; green = L0; blue = L1; break; } } rgb[0] = (byte)(255.0 * red); rgb[1] = (byte)(255.0 * green); rgb[2] = (byte)(255.0 * blue); } }
貌似上面的颜色空间转换挺麻烦的 ~~ 好了,差不多看懂了,自己实现一下:
namespace TestDepthProcess { /// <summary> /// MainWindow.xaml 的交互逻辑 /// </summary> public partial class MainWindow : Window { private KinectSensor kinect; private WriteableBitmap depthImageBitMap; private Int32Rect depthImageBitmapRect; private Int32 depthImageStride; private DepthImageFrame lastDepthFrame; private short[] depthPixelDate; public KinectSensor Kinect { // C#的set ,get方法好奇葩的说 get { return kinect; } set { if (kinect != null) { UninitializeKinectSensor(this.kinect); kinect = null; } if (value!=null && value.Status==KinectStatus.Connected) { kinect = value; InitializeKinectSensor(this.kinect); } } } public MainWindow() { InitializeComponent(); this.Loaded += (s, e) => DiscoverKinectSensor(); this.Unloaded += (s, e) => this.kinect = null; } private void UninitializeKinectSensor(KinectSensor kinect) { if (kinect!=null) { kinect.Stop(); kinect.DepthFrameReady -= new EventHandler<DepthImageFrameReadyEventArgs>(kinectSensor_DepthFrameReady); } } private void InitializeKinectSensor(KinectSensor kinect) { if (kinect!=null) { DepthImageStream depthStream = kinect.DepthStream; depthStream.Enable(); depthImageBitMap = new WriteableBitmap(depthStream.FrameWidth, depthStream.FrameHeight, 96, 96, PixelFormats.Gray16, null); depthImageBitmapRect = new Int32Rect(0, 0, depthStream.FrameWidth, depthStream.FrameHeight); depthImageStride = depthStream.FrameWidth * depthStream.FrameBytesPerPixel; DepthImage.Source = depthImageBitMap; kinect.DepthFrameReady += new EventHandler<DepthImageFrameReadyEventArgs>(kinectSensor_DepthFrameReady); kinect.Start(); } } private void DiscoverKinectSensor() { KinectSensor.KinectSensors.StatusChanged += new EventHandler<StatusChangedEventArgs>(KinectSensors_StatusChanged); this.Kinect = KinectSensor.KinectSensors.FirstOrDefault(sensor => sensor.Status == KinectStatus.Connected); // 此处调用kinect的set方法,注意 this.Kinect 而不是 this.kinect } private void KinectSensors_StatusChanged(object sender, StatusChangedEventArgs e) { switch (e.Status) { case KinectStatus.Connected: if (this.kinect == null) this.kinect = e.Sensor; break; case KinectStatus.Disconnected: if (this.kinect == e.Sensor) { this.kinect = null; this.kinect = KinectSensor.KinectSensors.FirstOrDefault(x => x.Status == KinectStatus.Connected); if (this.kinect == null) { //TODO:通知用于Kinect已拔出 } } break; //TODO:处理其他情况下的状态 } } private void kinectSensor_DepthFrameReady(Object sender,DepthImageFrameReadyEventArgs e) { if (lastDepthFrame!=null) { lastDepthFrame.Dispose(); lastDepthFrame = null; } lastDepthFrame = e.OpenDepthImageFrame(); if (lastDepthFrame!=null) { depthPixelDate = new short[lastDepthFrame.PixelDataLength]; lastDepthFrame.CopyPixelDataTo(depthPixelDate); depthImageBitMap.WritePixels(depthImageBitmapRect, depthPixelDate, depthImageStride, 0); CreateColorDepthImage(this.lastDepthFrame, depthPixelDate); } } private void CreateColorDepthImage(DepthImageFrame depthFrame, short[] pixelData) { Int32 depth; Double hue; Int32 loThreshold = 1200; Int32 hiThreshold = 3500; Int32 bytesPerPixel = 4; byte[] rgb=new byte[3]; byte[] enhPixelData=new byte[depthFrame.Width*depthFrame.Height*bytesPerPixel]; for (int i = 0, j = 0; i < pixelData.Length;i++,j+=bytesPerPixel ) { depth = pixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth<loThreshold || depth>hiThreshold) { enhPixelData[j] = 0x00; enhPixelData[j + 1] = 0x00; enhPixelData[j + 2] = 0x00; } else { hue = ((360 * depth / 0xFFF) + loThreshold); ConvertHslToRgb(hue, 100, 100, rgb); enhPixelData[j] = rgb[2]; //blue enhPixelData[j + 1] = rgb[1]; // green; enhPixelData[j + 2] = rgb[0]; //red } } EnhancedDepthImage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, enhPixelData, depthFrame.Width * bytesPerPixel); } private void ConvertHslToRgb(double hue,double saturation,double lightness,byte[] rgb) { double red = 0.0; double green = 0.0; double blue = 0.0; hue = hue % 360; saturation = saturation / 100.0; lightness = lightness / 100.0; if (saturation==0.0) { red = lightness; green = lightness; blue = lightness; } else { double huePrime = hue / 60.0; int x = (int)huePrime; double xPrime = huePrime - (double)x; ; double L0 = lightness * (1.0 - saturation); double L1 = lightness * (1.0 - (saturation * xPrime)); double L2 = lightness * (1.0 - (saturation * (1.0 - xPrime))); switch (x) { case 0: red = lightness; green = L2; blue = L0; break; case 1: red = L1; green = lightness; blue = L0; break; case 2: red = L0; green = lightness; blue = L2; break; case 3: red = L0; green = L1; blue = lightness; break; case 4: red = L2; green = L0; blue = lightness; break; case 5: red = lightness; green = L0; blue = L1; break; } } rgb[0] = (byte)(255.0 * red); rgb[1] = (byte)(255.0 * green); rgb[2] = (byte)(255.0 * blue); } private void DepthImage_MouseLeftButtonUp(Object sender, MouseButtonEventArgs e) { Point p = e.GetPosition(DepthImage); if (depthPixelDate!=null&&depthPixelDate.Length>0) { Int32 pixelIndex = (Int32)(p.X + ((Int32)p.Y * this.lastDepthFrame.Width)); Int32 depth = this.depthPixelDate[pixelIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth; Int32 depthInches = (Int32)(depth * 0.0393700787); Int32 depthFt = depthInches / 12; depthInches = depthInches % 12; PixelDepth.Text = string.Format("{0}mm~{1}'{1}", depth, depthFt, depthInches); } } private void CreateLighterShadesOfGray(DepthImageFrame depthFrame, short[] pixelData) { Int32 depth; Int32 loThreshold = 0; Int32 hiThreshold = 3500; short[] enhPixelData= new short[depthFrame.Width*depthFrame.Height]; for (int i = 0; i < pixelData.Length;i++ ) { depth = pixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth<loThreshold || depth>hiThreshold) { enhPixelData[i] = 0xFF; } else { enhPixelData[i]=(short)~pixelData[i]; } } EnhancedDepthImage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Gray16, null, enhPixelData, depthFrame.Width * depthFrame.BytesPerPixel); } private void CreateBetterShadesOfGray(DepthImageFrame depthFrame, short[] pixelData) { Int32 depth; Int32 gray; Int32 loThreashold = 0; Int32 bytePerPixel = 4; // 4个通道只用前面3个bgr Int32 hiThreshold = 3500; byte[] enhPixelData = new byte[depthFrame.Width * depthFrame.Height * bytePerPixel]; for (int i = 0, j = 0; i < pixelData.Length; i++, j += bytePerPixel) { depth = pixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth < loThreashold || depth > hiThreshold) { gray = 0xFF; } else { gray = (255 * depth / 0xFFF); } enhPixelData[j] = (byte)gray; enhPixelData[j + 1] = (byte)gray; enhPixelData[j + 2] = (byte)gray; } EnhancedDepthImage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, enhPixelData, depthFrame.Width * bytePerPixel); } } }
C#的set方法 ~~ 这个错误找了半天 ~~