找出图片变换后的映射关系
for (...) { cv::Point tmppt; ... ptarr1.push_back(tmppt); }for (...) { cv::Point tmppt;
... ptarr2.push_back(tmppt); } cv::Mat homo = cv::findHomography(ptarr1, ptarr2, CV_RANSAC);
ptarr1的点经过homo的映射关系会得到ptarr2的点 homo 就是其中的映射关系
使用映射关系
warpPerspective(srcImageMat,imageTransform1,homo,readBackImageMat.size());
srcImageMat经过homo的映射关系,按照readBackImageMat的尺寸,计算出imageTransform1
提取图片的某个区域
cv::Rect roi_rect = cv::Rect(nleft, ntop, width, height); Mat roi= imageTransform1(roi_rect);//提出背景区域 Mat roi2 = readBackImageMat(roi_rect);
拷贝一个图片到另一个图片的对应区域
outpt.copyTo(readBackImageMat(roi_rect));
outpt拷贝到readBackImageMat的roi_rect区域
松柏克隆
Mat roi= imageTransform1(roi_rect);//提出背景区域 Mat roi2 = readBackImageMat(roi_rect); Mat mask(roi.size(), CV_8UC1,255); cv::Size center = mask.size() / 2; cv::Mat outpt; seamlessClone(roi, roi2, mask, center, outpt, NORMAL_CLONE);
https://blog.csdn.net/hjimce/article/details/45716603
像素融合
cv::Mat outpt; seamlessClone(roi, roi2, mask, center, outpt, NORMAL_CLONE); mask.setTo(0); mask(cv::Rect(15,30, mask.cols-30,mask.rows-60)).setTo(255); cv::blur(mask, mask, cv::Size(15, 30)); cv::Mat rcMat = readBackImageMat(roi_rect); for (int i = 0; i < rcMat.rows; i++) { uchar* cc = rcMat.ptr<uchar>(i); uchar* ss = outpt.ptr<uchar>(i); uchar* mm = mask.ptr<uchar>(i); for (int j = 0; j < rcMat.cols; j++) { float falpha = mm[j] / 255.0f; cc[j * 3] = ss[j * 3] * falpha + cc[j * 3] * (1 - falpha); cc[j * 3+1] = ss[j * 3+1] * falpha + cc[j * 3+1] * (1 - falpha); cc[j * 3+2] = ss[j * 3+2] * falpha + cc[j * 3+2] * (1 - falpha); } }
Mat转bitmap
Bitmap* CPublicFunction::CopyMatToBmp(Mat& i_Mat) { //assert(mb_InitDone); PixelFormat e_Format; switch (i_Mat.channels()) { case 1: e_Format = PixelFormat8bppIndexed; break; case 3: e_Format = PixelFormat24bppRGB; break; case 4: e_Format = PixelFormat32bppARGB; break; default: throw L"Image format not supported."; } // Create Bitmap with own memory Bitmap* pi_Bmp = new Bitmap(i_Mat.cols, i_Mat.rows, e_Format); BitmapData i_Data; Gdiplus::Rect k_Rect(0, 0, i_Mat.cols, i_Mat.rows); if (Ok != pi_Bmp->LockBits(&k_Rect, ImageLockModeWrite, e_Format, &i_Data)) { delete pi_Bmp; throw L"Error locking Bitmap."; } if (i_Mat.elemSize1() == 1) // 1 Byte per channel (8 bit gray scale palette) { BYTE* u8_Src = i_Mat.data; BYTE* u8_Dst = (BYTE*)i_Data.Scan0; int s32_RowLen = i_Mat.cols * i_Mat.channels(); // != i_Mat.step !! // The Windows Bitmap format requires all rows to be DWORD aligned (always!) // while OpenCV by default stores bitmap data sequentially. for (int R = 0; R<i_Mat.rows; R++) { memcpy(u8_Dst, u8_Src, s32_RowLen); u8_Src += i_Mat.step; // step may be e.g 3729 u8_Dst += i_Data.Stride; // while Stride is 3732 } } else // i_Mat may contain e.g. float data (CV_32F -> 4 Bytes per pixel grayscale) { int s32_Type; switch (i_Mat.channels()) { case 1: s32_Type = CV_8UC1; break; case 3: s32_Type = CV_8UC3; break; default: throw L"Image format not supported."; } CvMat i_Dst; cvInitMatHeader(&i_Dst, i_Mat.rows, i_Mat.cols, s32_Type, i_Data.Scan0, i_Data.Stride); CvMat i_Img = i_Mat; cvConvertImage(&i_Img, &i_Dst, 0); } pi_Bmp->UnlockBits(&i_Data); // Add the grayscale palette if required. if (e_Format == PixelFormat8bppIndexed) { CByteArray i_Arr; i_Arr.SetSize(sizeof(ColorPalette) + 256 * sizeof(ARGB)); ColorPalette* pk_Palette = (ColorPalette*)i_Arr.GetData(); pk_Palette->Count = 256; pk_Palette->Flags = PaletteFlagsGrayScale; ARGB* pk_Color = &pk_Palette->Entries[0]; for (int i = 0; i<256; i++) { pk_Color[i] = Color::MakeARGB(255, i, i, i); } if (Ok != pi_Bmp->SetPalette(pk_Palette)) { delete pi_Bmp; throw L"Error setting grayscale palette."; } } return pi_Bmp; }
展示图片的时候控制窗口属性
cv::namedWindow("head", CV_WINDOW_NORMAL); cv::resizeWindow("head", 150, 120); //控制窗口大小 cv::moveWindow("head", 1000, 100);//控制窗口起始坐标 cv::imshow("head", imageROI);
提取某个矩形区域的图片
cv::Mat mMat = m_findfacethread->GetImage();
cv::Mat imageROI = mMat(cv::Rect(x1, y, x2 - x1, y12 - y));