革博士程序V1仓库
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

1398 строки
63 KiB

  1. using System;
  2. using System.Collections.Generic;
  3. using System.Linq;
  4. using System.Runtime.InteropServices;
  5. using System.Text;
  6. using System.Threading.Tasks;
  7. using System.Xaml;
  8. using DocumentFormat.OpenXml.Vml;
  9. using HalconDotNet;
  10. using OpenCvSharp;
  11. using OpenCvSharp.XImgProc;
  12. namespace LeatherApp.Utils
  13. {
  14. public class OpenCVUtil
  15. {
  16. #region 模型寻边
  17. private static StructuredEdgeDetection _edgeDetect;
  18. public static void LoadEdgeMode()
  19. {
  20. if (_edgeDetect == null)
  21. _edgeDetect = OpenCvSharp.XImgProc.CvXImgProc.CreateStructuredEdgeDetection("model.yml");
  22. }
  23. /// <summary>
  24. /// 模型寻边
  25. /// </summary>
  26. /// <param name="image"></param>
  27. /// <param name="FindType"></param>
  28. /// <param name="Roi"></param>
  29. /// <param name="IsLeft"></param>
  30. /// <returns></returns>
  31. private static int EdgeClipping3(Mat image, int FindType, Rect Roi, bool IsLeft)
  32. {
  33. Mat mat_rgb = image.Clone(Roi);
  34. int height = mat_rgb.Rows;
  35. int width = mat_rgb.Cols;
  36. int sf = 10; //缩放比例
  37. int pix = 5; //获取均值区域长宽像素
  38. int pointNum = 15; //获取找遍点数
  39. int offsetGray = 5; //二值化偏差
  40. int length_t = 0;
  41. List<int> lines = new List<int>();
  42. List<int> total_t = new List<int>();
  43. //按比例缩放
  44. double sf_height = height / sf;
  45. double sf_width = width / sf;
  46. Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
  47. Mat himg = new Mat();
  48. Mat edgeimg = new Mat();
  49. Cv2.CvtColor(mat_rgb, edgeimg, ColorConversionCodes.BGR2RGB);
  50. Mat edges = new Mat();
  51. edgeimg.ConvertTo(edgeimg, MatType.CV_32F, 1 / 255.0);
  52. if (_edgeDetect == null)
  53. LoadEdgeMode();
  54. //Cv2.Normalize(edgeimg, edgeimg, 1.0, 0, NormTypes.L2, -1);
  55. _edgeDetect.DetectEdges(edgeimg, edges);
  56. Mat image_Otsu = new Mat();
  57. int hDis = (int)sf_height / (pointNum + 2); //去除边缘两点
  58. edges.ConvertTo(image_Otsu, MatType.CV_8U, 255.0);
  59. Cv2.Threshold(image_Otsu, image_Otsu, 0, 255, ThresholdTypes.Otsu);
  60. // 定义空数组保存结果
  61. int[] total = new int[pointNum];
  62. // 平均截取pointNum行数据并处理图像
  63. for (int i = 0; i < pointNum; i++)
  64. {
  65. // 截取当前行的图像
  66. Rect roi = new Rect(0, hDis + hDis * i, (int)sf_width, 1);
  67. Mat current_segment = image_Otsu.Clone(roi);
  68. //Mat filled_image3 = current_segment.Clone();
  69. Mat filled_image3 = current_segment;
  70. #if true
  71. //从左到右判断边和从右到左判断边
  72. int numX = 0;
  73. int tm = 0;
  74. byte tempVal = 0;
  75. bool findOne = false;
  76. if (!IsLeft)
  77. {
  78. tempVal = filled_image3.At<byte>(0, 0);
  79. //filled_image3.
  80. for (int j = 0; j < filled_image3.Cols; j++)
  81. {
  82. if (filled_image3.At<byte>(0, j) != tempVal)
  83. {
  84. if (!findOne)
  85. {
  86. tm = j;
  87. findOne = true;
  88. tempVal = filled_image3.At<byte>(0, j);
  89. }
  90. else
  91. {
  92. //numX = j;
  93. numX = (tm + j) / 2;
  94. break;
  95. }
  96. }
  97. }
  98. }
  99. else
  100. {
  101. tempVal = filled_image3.At<byte>(0, filled_image3.Cols - 1);
  102. for (int j = filled_image3.Cols - 1; j >= 0; j--)
  103. {
  104. if (filled_image3.At<byte>(0, j) != tempVal)
  105. {
  106. if (!findOne)
  107. {
  108. tm = j;
  109. findOne = true;
  110. tempVal = filled_image3.At<byte>(0, j);
  111. }
  112. else
  113. {
  114. //numX = j;
  115. numX = (tm + j) / 2;
  116. break;
  117. }
  118. }
  119. }
  120. }
  121. #else
  122. int numX = Cv2.CountNonZero(filled_image3);
  123. #endif
  124. //length_t = (numX > (sf_width / 2)) ? numX :(int)(sf_width - numX);
  125. length_t = numX;
  126. total[i] = (length_t);
  127. if (length_t > 0)
  128. total_t.Add(length_t);
  129. }
  130. // 取平均值作为宽度
  131. int length = 0;
  132. if (total_t.Count > 0)
  133. {
  134. length = (int)total_t.Average();
  135. if (IsLeft)
  136. length = length - 0;
  137. else
  138. length = length + 0;
  139. }
  140. //乘上换算系数还原
  141. length = length * sf + Roi.X;
  142. return length;
  143. }
  144. #endregion
  145. public static Mat resize(Mat mat, int width, int height, out int xw, out int xh)
  146. {
  147. OpenCvSharp.Size dsize = new OpenCvSharp.Size(width, height);
  148. //Mat mat2 = new Mat();
  149. //Cv2.Resize(mat, mat2, dsize);
  150. //ResizeUniform(mat, dsize, out mat2, out xw, out xh);
  151. xw = (width - mat.Cols) / 2;
  152. xh = (height - mat.Rows) / 2;
  153. Mat mat2 = new Mat(height, width, MatType.CV_8UC3, new Scalar(114, 114, 114));
  154. Rect roi = new Rect((width - mat.Cols) / 2, (height - mat.Rows) / 2, mat.Cols, mat.Rows);
  155. mat.CopyTo(new Mat(mat2, roi));
  156. return mat2;
  157. }
  158. public static int ResizeUniform(Mat src, Size dst_size, out Mat dst, out int xw, out int xh)
  159. {
  160. xw = xh = 0;
  161. int w = src.Cols;
  162. int h = src.Rows;
  163. int dst_w = dst_size.Width;
  164. int dst_h = dst_size.Height;
  165. //std::cout << "src: (" << h << ", " << w << ")" << std::endl;
  166. dst = new Mat(dst_h, dst_w, MatType.CV_8UC3, new Scalar(114, 114, 114));
  167. float[] ratio = new float[2];
  168. float ratio_src = w * 1.0f / h;
  169. float ratio_dst = dst_w * 1.0f / dst_h;
  170. int tmp_w = 0;
  171. int tmp_h = 0;
  172. if (ratio_src > ratio_dst)
  173. {
  174. tmp_w = dst_w;
  175. tmp_h = (int)(dst_w * 1.0f / w) * h;
  176. ratio[0] = (float)w / (float)tmp_w;
  177. ratio[1] = (float)h / (float)tmp_h;
  178. }
  179. else if (ratio_src < ratio_dst)
  180. {
  181. tmp_h = dst_h;
  182. tmp_w = (int)((dst_h * 1.0f / h) * w);
  183. ratio[0] = (float)w / (float)tmp_w;
  184. ratio[1] = (float)h / (float)tmp_h;
  185. }
  186. else
  187. {
  188. Cv2.Resize(src, dst, dst_size);
  189. ratio[0] = (float)w / (float)tmp_w;
  190. ratio[1] = (float)h / (float)tmp_h;
  191. return 0;
  192. }
  193. //std::cout << "tmp: (" << tmp_h << ", " << tmp_w << ")" << std::endl;
  194. Mat tmp = new Mat();
  195. Cv2.Resize(src, tmp, new Size(tmp_w, tmp_h));
  196. unsafe
  197. {
  198. if (tmp_w != dst_w)
  199. { //高对齐,宽没对齐
  200. int index_w = (int)((dst_w - tmp_w) / 2.0);
  201. xw = index_w;
  202. //std::cout << "index_w: " << index_w << std::endl;
  203. for (int i = 0; i < dst_h; i++)
  204. {
  205. Buffer.MemoryCopy(IntPtr.Add(tmp.Data, i * tmp_w * 3).ToPointer(), IntPtr.Add(dst.Data, i * dst_w * 3 + index_w * 3).ToPointer(), tmp_w * 3, tmp_w * 3);
  206. }
  207. }
  208. else if (tmp_h != dst_h)
  209. { //宽对齐, 高没有对齐
  210. int index_h = (int)((dst_h - tmp_h) / 2.0);
  211. xh = index_h;
  212. //std::cout << "index_h: " << index_h << std::endl;
  213. Buffer.MemoryCopy(tmp.Data.ToPointer(), IntPtr.Add(dst.Data, index_h * dst_w * 3).ToPointer(), tmp_w * tmp_h * 3, tmp_w * tmp_h * 3);
  214. }
  215. else
  216. {
  217. }
  218. }
  219. return 0;
  220. }
  221. public static Mat CreateLetterbox(Mat mat, OpenCvSharp.Size sz, Scalar color, out float ratio, out OpenCvSharp.Point diff, out OpenCvSharp.Point diff2, bool auto = true, bool scaleFill = false, bool scaleup = true)
  222. {
  223. //Mat mat = new Mat();
  224. //Cv2.CvtColor(mat, mat, ColorConversionCodes.BGR2RGB);
  225. ratio = Math.Min((float)sz.Width / (float)mat.Width, (float)sz.Height / (float)mat.Height);
  226. if (!scaleup)
  227. {
  228. ratio = Math.Min(ratio, 1f);
  229. }
  230. OpenCvSharp.Size dsize = new OpenCvSharp.Size((int)Math.Round((float)mat.Width * ratio), (int)Math.Round((float)mat.Height * ratio));
  231. int num = sz.Width - dsize.Width;
  232. int num2 = sz.Height - dsize.Height;
  233. float num3 = (float)sz.Height / (float)sz.Width;
  234. float num4 = (float)mat.Height / (float)mat.Width;
  235. if (auto && num3 != num4)
  236. {
  237. bool flag = false;
  238. }
  239. else if (scaleFill)
  240. {
  241. num = 0;
  242. num2 = 0;
  243. dsize = sz;
  244. }
  245. int num5 = (int)Math.Round((float)num / 2f);
  246. int num6 = (int)Math.Round((float)num2 / 2f);
  247. int num7 = 0;
  248. int num8 = 0;
  249. if (num5 * 2 != num)
  250. {
  251. num7 = num - num5 * 2;
  252. }
  253. if (num6 * 2 != num2)
  254. {
  255. num8 = num2 - num6 * 2;
  256. }
  257. if (mat.Width != dsize.Width || mat.Height != dsize.Height)
  258. {
  259. Cv2.Resize(mat, mat, dsize);
  260. }
  261. Cv2.CopyMakeBorder(mat, mat, num6 + num8, num6, num5, num5 + num7, BorderTypes.Constant, color);
  262. diff = new OpenCvSharp.Point(num5, num6);
  263. diff2 = new OpenCvSharp.Point(num7, num8);
  264. return mat;
  265. }
  266. /// <summary>
  267. /// 裁切指定区域
  268. /// </summary>
  269. /// <param name="mat"></param>
  270. /// <param name="x"></param>
  271. /// <param name="y"></param>
  272. /// <param name="width"></param>
  273. /// <param name="height"></param>
  274. /// <returns></returns>
  275. public static Mat cutImage(Mat mat, int x, int y, int width, int height)
  276. {
  277. Rect roi = new Rect(x, y, width, height);
  278. return new Mat(mat, roi).Clone();
  279. }
  280. /// <summary>
  281. /// 合并MAT(宽高必需一致)
  282. /// </summary>
  283. /// <param name="mats"></param>
  284. /// <param name="isHorizontal"></param>
  285. /// <returns></returns>
  286. public static Mat mergeImage_sameSize(Mat[] mats, bool isHorizontal = true)
  287. {
  288. Mat matOut = new Mat();
  289. if (isHorizontal)
  290. Cv2.HConcat(mats, matOut);//横向拼接
  291. else
  292. Cv2.VConcat(mats, matOut);//纵向拼接
  293. return matOut;
  294. }
  295. /// <summary>
  296. /// 合并MAT-纵向
  297. /// </summary>
  298. /// <param name="mat1"></param>
  299. /// <param name="mat2"></param>
  300. /// <returns></returns>
  301. public static Mat mergeImageV(Mat mat1,Mat mat2 )
  302. {
  303. Mat matOut = new Mat();
  304. //push_back 方法将图像2拷贝到图像1的最后一行
  305. Mat img_merge = new Mat();//要先设置大小吗
  306. img_merge.PushBack(mat1);
  307. img_merge.PushBack(mat2);
  308. return matOut;
  309. }
  310. /// <summary>
  311. /// 合并MAT-横向
  312. /// </summary>
  313. /// <param name="mat1"></param>
  314. /// <param name="mat2"></param>
  315. /// <returns></returns>
  316. public static Mat mergeImageH(Mat[] mats)
  317. {
  318. Stitcher stitcher = Stitcher.Create(Stitcher.Mode.Scans);
  319. Mat pano = new Mat();
  320. var status = stitcher.Stitch(mats, pano);
  321. if (status == Stitcher.Status.OK)
  322. return pano;
  323. else
  324. return null;
  325. // //1.新建一个要合并的图像
  326. // Size size = new Size(image1.Cols + image2.Cols, Math.Max(image1.Rows, image1.Rows));
  327. //Mat img_merge=new Mat();
  328. //img_merge.Create(size,new MatType( image1.Depth()));
  329. ////img_merge = Scalar.All(0);
  330. //Mat outImg_left, outImg_right;
  331. ////2.在新建合并图像中设置感兴趣区域
  332. //outImg_left = img_merge.a(Rect(0, 0, image1.cols, image1.rows));
  333. //outImg_right = img_merge(Rect(image1.cols, 0, image1.cols, image1.rows));
  334. ////3.将待拷贝图像拷贝到感性趣区域中
  335. //image1.copyTo(outImg_left);
  336. //image2.copyTo(outImg_right);
  337. //namedWindow("image1", 0);
  338. //Cv2.ImShow("image1", img_merge);
  339. }
  340. /// <summary>
  341. /// 获取最小外接矩形(正矩形)
  342. /// </summary>
  343. /// <returns></returns>
  344. public static Mat getMimOutRect(Mat srcImg)
  345. {
  346. try
  347. {
  348. //Mat srcImg = new Mat(@"E:\D\AutoCode\LeatherProject\LeatherApp\bin\Debug\testpic\2\11.bmp");
  349. Mat grayImg = new Mat();
  350. Mat binaryImg = new Mat();
  351. Cv2.CvtColor(srcImg, grayImg, ColorConversionCodes.BGR2GRAY);
  352. //Cv2.ImShow("src", srcImg);
  353. //toImg(this.pictureBox1, grayImg);
  354. //
  355. //double thresh = 30;//小于此值(超小超是所选黑色越多)转为maxval色
  356. //double maxval = 255;//上面值转为255白色
  357. double thresh = 80;//小于此值(超小超是所选黑色越多)转为maxval色
  358. double maxval = 255;//上面值转为255白色
  359. Cv2.Threshold(grayImg, binaryImg, thresh, maxval, ThresholdTypes.Binary);//转化黑白二值图 thresh:阀值
  360. //颜色反转
  361. //byte grayPixel = 0;
  362. //for (int r = 0; r < binary.Rows; r++)
  363. //{
  364. // for (int c = 0; c < binary.Cols; c++)
  365. // {
  366. // grayPixel = binary.At<byte>(r, c);
  367. // binary.Set<byte>(r, c, (byte)(255 - grayPixel));
  368. // }
  369. //}
  370. //FindContours让轮廓
  371. OpenCvSharp.Point[][] contours; //轮廓查找结果变量
  372. HierarchyIndex[] hierarchy; //轮廓拓扑结构变量
  373. //====RetrievalModes:
  374. //CV_RETR_EXTERNAL表示只检测外轮廓
  375. //CV_RETR_LIST检测的轮廓不建立等级关系
  376. //CV_RETR_CCOMP建立两个等级的轮廓,上面的一层为外边界,里面的一层为内孔的边界信息。如果内孔内还有一个连通物体,这个物体的边界也在顶层。
  377. //CV_RETR_TREE建立一个等级树结构的轮廓。具体参考contours.c这个demo
  378. //====ContourApproximationModes:
  379. //CV_CHAIN_APPROX_NONE存储所有的轮廓点,相邻的两个点的像素位置差不超过1,即max(abs(x1 - x2),abs(y2 - y1))== 1
  380. //CV_CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
  381. //CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS使用teh - Chinl chain 近似算法
  382. Cv2.FindContours(binaryImg, out contours, out hierarchy, RetrievalModes.CComp, ContourApproximationModes.ApproxSimple);
  383. //DrawContours将结果画出并返回结果
  384. Mat dst_Image = Mat.Zeros(grayImg.Size(), srcImg.Type());
  385. Random rnd = new Random();
  386. int maxIndex = 0, maxLength = 0;
  387. for (int i = 0; i < contours.Length; i++)
  388. {
  389. if (contours[i].Length > maxLength)
  390. {
  391. maxLength = contours[i].Length;
  392. maxIndex = i;
  393. }
  394. }
  395. Scalar color = new Scalar(rnd.Next(0, 0), rnd.Next(0, 255), rnd.Next(0, 255));
  396. //var rectMin = Cv2.MinAreaRect(contours[i]);
  397. //这里有三个参数 分别是中心位置,旋转角度,缩放程度
  398. //Cv2.WarpAffine(srcImg, rectMin.Center, (height, width))
  399. //Rect rect = rectMin.BoundingRect();//
  400. Rect rect = Cv2.BoundingRect(contours[maxIndex]);// 获取矩形边界框
  401. //OpenCvSharp.Point pt1 = new OpenCvSharp.Point(rect.X, rect.Y);
  402. //OpenCvSharp.Point pt2 = new OpenCvSharp.Point(rect.X + rect.Width, rect.Y + rect.Height); //定义矩形对顶点
  403. //Cv2.Rectangle(srcImg, pt1, pt2, color, 1); //绘制矩形边框
  404. //Cv2.Line(srcImg, pt1, pt2, color, 1); //矩形单个对角线相,两点
  405. //Cv2.DrawContours(srcImg, contours, maxIndex, color, 2, LineTypes.Link8, hierarchy);
  406. //toImg(this.pictureBox1, srcImg);
  407. //return cutImage(srcImg, rect.X, rect.Y, rect.Width, rect.Height);
  408. return cutImage(srcImg, rect.X, 0, rect.Width, rect.Height);
  409. //
  410. //建立轮廓接受数组
  411. //Point[][] contours;
  412. //HierarchyIndex[] hierarchy;
  413. //Cv2.FindContours(binary, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxNone);
  414. //最小外接矩形接收数组
  415. //RotatedRect[] rotateRect = new RotatedRect[contours.Length];
  416. //Point[][] contours_poly = new Point[contours.Length][];
  417. //int maxPointCount = 0, index = -1;
  418. //for (int x = 0; x < contours.Length; x++)
  419. //{
  420. // if (maxPointCount < contours[x].Length)
  421. // {
  422. // maxPointCount = contours[x].Length;
  423. // index = x;
  424. // }
  425. //}
  426. }
  427. catch (Exception ex)
  428. {
  429. return null;
  430. }
  431. }
  432. #region 获取最大内接矩形
  433. /// <summary>
  434. /// 获取最大内接矩形(高度使用原图值未裁剪)
  435. /// </summary>
  436. /// <param name="srcImg"></param>
  437. /// <returns></returns>
  438. public static Mat getMaxInsetRect(Mat srcImg, double thresh = 45, double maxval = 255)
  439. {
  440. API.OutputDebugString("--------start:"+DateTime.Now.ToString("mm:ss fff"));
  441. var dst = new Mat();
  442. //转灰度
  443. Cv2.CvtColor(srcImg, dst, ColorConversionCodes.RGB2GRAY);
  444. API.OutputDebugString("--------转灰度:" + DateTime.Now.ToString("mm:ss fff"));
  445. //转化黑白二值图 thresh:阀值
  446. //double thresh = 50;//小于此值(超小超是所选黑色越多)转为maxval色
  447. //double maxval = 255;//上面值转为255白色
  448. Cv2.Threshold(dst, dst, thresh, maxval, ThresholdTypes.Binary);
  449. API.OutputDebugString("--------黑白二值图:" + DateTime.Now.ToString("mm:ss fff"));
  450. //取轮廓
  451. Cv2.FindContours(dst, out var contours, out var hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
  452. int maxIndex = 0, maxLength = 0;
  453. for (int i = 0; i < contours.Length; i++)
  454. {
  455. if (contours[i].Length > maxLength)
  456. {
  457. maxLength = contours[i].Length;
  458. maxIndex = i;
  459. }
  460. }
  461. API.OutputDebugString("--------取全部轮廓:" + DateTime.Now.ToString("mm:ss fff"));
  462. List<List<Point>> approxContours = new List<List<Point>>();
  463. //先求出多边形的近似轮廓,减少轮廓数量,方便后面计算
  464. var approxContour = Cv2.ApproxPolyDP(contours[maxIndex], 20, true);
  465. API.OutputDebugString("--------减少轮廓数量:" + DateTime.Now.ToString("mm:ss fff"));
  466. approxContours.Add(approxContour.ToList());
  467. //绘制边缘
  468. //DrawContour(srcImg, approxContour, Scalar.Red, 20);
  469. //return srcImg;
  470. Rect rect = GetMaxInscribedRect(srcImg, approxContour.ToList());
  471. API.OutputDebugString("--------取最大内切矩形:" + DateTime.Now.ToString("mm:ss fff"));
  472. var result= cutImage(srcImg, rect.X, 0, rect.Width, srcImg.Height);
  473. API.OutputDebugString("--------裁剪完成:" + DateTime.Now.ToString("mm:ss fff"));
  474. return result;
  475. }
  476. public static Mat getMaxInsetRect2(Mat mat_rgb,bool isLeft,int marginHoleWidth,out int marginWidth)
  477. {
  478. int bian = 3500;
  479. Rect Roi;
  480. if (!isLeft)
  481. Roi = new Rect(mat_rgb.Width - bian, 0, bian, mat_rgb.Height);
  482. else
  483. Roi = new Rect(0, 0, bian, mat_rgb.Height);
  484. int type = isLeft ? 1 : 0;
  485. int len = EdgeClipping3(mat_rgb, type, Roi, isLeft);
  486. #if false
  487. //Mat mat_rgb = new Mat("E:\\CPL\\测试代码\\边缘检测\\test\\test\\test\\img\\19.bmp");
  488. Mat image_gray = new Mat();
  489. Cv2.CvtColor(mat_rgb, image_gray, ColorConversionCodes.BGR2GRAY);
  490. //cvtColor(image_RGB, image, COLOR_RGB2GRAY);
  491. int height = image_gray.Rows;
  492. int width = image_gray.Cols;
  493. // 算法定义:取均分5段图片的五条横线,经过一系列处理之后,二值化,找到沿边位置,然后取均值作为直边,在缩进一段有针眼的位置
  494. // 定义每段的行数
  495. int num_rows = 5;
  496. int segment_height = height / num_rows - 1;
  497. // 定义空数组保存结果
  498. int[] total = new int[num_rows];
  499. // 平均截取5行数据并处理图像
  500. for (int i = 0; i < num_rows; i++)
  501. {
  502. // 截取当前行的图像
  503. int start_row = i * segment_height;
  504. Rect roi = new Rect(0, start_row, width, 1);
  505. Mat current_segment = image_gray.Clone(roi);
  506. // 对当前行的图像进行平滑处理
  507. Mat smoothed_image = new Mat();
  508. Cv2.GaussianBlur(current_segment, smoothed_image, new Size(5, 1), 0);
  509. // 计算当前行的灰度直方图
  510. Mat absolute_histo = new Mat();
  511. Cv2.CalcHist(new Mat[] { smoothed_image }, new int[] { 0 }, new Mat(), absolute_histo, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
  512. Cv2.GaussianBlur(current_segment, smoothed_image, new Size(19, 1), 0);
  513. // 对图片进行分割i+1
  514. //double otsu_threshold;
  515. //threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
  516. Cv2.Threshold(smoothed_image, smoothed_image, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
  517. // 使用形态学操作进行孔洞填充
  518. Mat kernel = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(25, 1));
  519. Mat filled_image = new Mat();
  520. Cv2.MorphologyEx(smoothed_image, filled_image, MorphTypes.Close, kernel);
  521. // 取较长的一个值作为皮革的宽度
  522. int num_255 = Cv2.CountNonZero(filled_image);
  523. int length_t = (num_255 > width / 2) ? num_255 : width - num_255;
  524. total[i] = (length_t);
  525. API.OutputDebugString($"getMaxInsetRect2: 【{i + 1}】{length_t}={num_255}|{width}");
  526. }
  527. // 取平均值作为宽度
  528. int length = (int)total.Average();
  529. marginWidth = width-length;
  530. #endif
  531. int length = (len > mat_rgb.Width / 2) ? len : mat_rgb.Width - len;
  532. marginWidth = mat_rgb.Width - length;
  533. // 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
  534. //int abnormal_pxl = 200;
  535. //for (int i = 0; i < num_rows; i++)
  536. //{
  537. // if (Math.Abs(total[i] - length) > abnormal_pxl)
  538. // throw new Exception("数据异常,当段图片的宽度有问题!");
  539. //}
  540. //右侧相机,拍摄产品,边缘位于右侧判断,缩进100像素,去点针眼
  541. //Cv2.Line(mat_rgb, new Point(length - 100, 0), new Point(length - 100, height), new Scalar(255, 0, 0), 20);
  542. ////左侧相机,拍摄产品,边缘位于左侧判断,缩进100像素,去点针眼
  543. //Cv2.Line(mat_rgb, new Point(width - length + 100, 0), new Point(width - length + 100, height), new Scalar(0, 255, 0), 20);
  544. //int decWidth = width - length + marginHoleWidth;
  545. //if (isLeft)
  546. // return cutImage(mat_rgb, decWidth, 0, width- decWidth, height);
  547. //else
  548. // return cutImage(mat_rgb, 0, 0, width - decWidth, height);
  549. API.OutputDebugString($"getMaxInsetRect2:margin={marginWidth},length={length}({marginHoleWidth}),isLeft={isLeft},mat_rgb={mat_rgb.Width}*{mat_rgb.Height},w={length - marginHoleWidth},h={mat_rgb.Height}");
  550. if (isLeft)
  551. return cutImage(mat_rgb, mat_rgb.Width - length + marginHoleWidth, 0, length - marginHoleWidth, mat_rgb.Height);
  552. else
  553. return cutImage(mat_rgb, 0, 0, length - marginHoleWidth, mat_rgb.Height);
  554. //if (isLeft)
  555. // return cutImage(mat_rgb, length + marginHoleWidth, 0, length - marginHoleWidth, mat_rgb.Height);
  556. //else
  557. // return cutImage(mat_rgb, 0, 0, length - marginHoleWidth, mat_rgb.Height);
  558. }
  559. /// <summary>
  560. ///
  561. /// </summary>
  562. /// <param name="image">图片</param>
  563. /// <param name="FindType">0:从左往右找边,1:从右往左找边</param>
  564. /// <param name="Roi">寻找区域</param>
  565. /// <returns></returns>
  566. public static int EdgeClipping(Mat image, int FindType, Rect Roi)
  567. {
  568. DateTimeOffset startTime = DateTimeOffset.Now;
  569. Mat mat_rgb = image.Clone(Roi);
  570. int height = mat_rgb.Rows;
  571. int width = mat_rgb.Cols;
  572. int sf = 10; //缩放比例
  573. int pix = 5; //获取均值区域长宽像素
  574. int pointNum = 15; //获取找遍点数
  575. //按比例缩放
  576. int sf_height = height / sf;
  577. int sf_width = width / sf;
  578. Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
  579. Mat himg = new Mat();
  580. himg = mat_rgb.Clone();
  581. DateTimeOffset endTime = DateTimeOffset.Now;
  582. Console.WriteLine("图片缩小(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  583. startTime = DateTimeOffset.Now;
  584. //滤过去除多余噪声
  585. //Cv2.EdgePreservingFilter(himg, himg, EdgePreservingMethods.RecursFilter);
  586. //Cv2.PyrMeanShiftFiltering(himg, himg, 10, 500, 3);
  587. Cv2.PyrMeanShiftFiltering(himg, himg, 1, 2, 1);
  588. //himg.ImWrite("himg.jpg");
  589. endTime = DateTimeOffset.Now;
  590. Console.WriteLine("滤过去除多余噪声(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  591. startTime = DateTimeOffset.Now;
  592. //转灰度图
  593. Mat image_gray = new Mat();
  594. Cv2.CvtColor(himg, image_gray, ColorConversionCodes.BGR2GRAY);
  595. //image_gray.ImWrite("image_gray.jpg");
  596. //二值化
  597. Mat image_Otsu = new Mat();
  598. int hDis = sf_height / (pointNum + 2); //去除边缘两点
  599. #if false
  600. List<double> LeftAvg = new List<double>();
  601. List<double> RightAvg = new List<double>();
  602. //double thb = Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
  603. #region 多点获取二值化均值
  604. for (int i = 0; i < pointNum; i++)
  605. {
  606. Rect roiLeft = new Rect(0, hDis + hDis * i, pix, pix);
  607. Mat current_segmentL = image_gray.Clone(roiLeft);
  608. //Scalar ttr = current_segmentL.Mean();
  609. LeftAvg.Add(current_segmentL.Mean().Val0);
  610. Rect roiRight = new Rect(sf_width - pix, hDis + hDis * i, pix, pix);
  611. Mat current_segmentR = image_gray.Clone(roiRight);
  612. RightAvg.Add(current_segmentR.Mean().Val0);
  613. }
  614. double thres = (RightAvg.Average() + LeftAvg.Average())/2;
  615. #endregion
  616. #else
  617. double min, max;
  618. image_gray.MinMaxLoc(out min, out max);
  619. double thres = (min + max) / 2;
  620. #endif
  621. //Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Otsu);
  622. double thb = Cv2.Threshold(image_gray, image_Otsu, thres, 255, ThresholdTypes.Binary);
  623. //image_Otsu.ImWrite("Otsu1.jpg");
  624. endTime = DateTimeOffset.Now;
  625. Console.WriteLine("灰度图二值化(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  626. startTime = DateTimeOffset.Now;
  627. // 定义空数组保存结果
  628. int[] total = new int[pointNum];
  629. List<int> total_t = new List<int>();
  630. bool isLeft = FindType == 0 ? true : false;
  631. // 平均截取pointNum行数据并处理图像
  632. for (int i = 0; i < pointNum; i++)
  633. {
  634. // 截取当前行的图像
  635. Rect roi = new Rect(0, hDis + hDis * i, sf_width, 1);
  636. Mat current_segment = image_Otsu.Clone(roi);
  637. #if false
  638. #region 预处理
  639. // 对当前行的图像进行平滑处理
  640. Mat smoothed_image2 = new Mat();
  641. Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(5, 1), 0);
  642. // 计算当前行的灰度直方图
  643. Mat absolute_histo2 = new Mat();
  644. Cv2.CalcHist(new Mat[] { smoothed_image2 }, new int[] { 0 }, new Mat(), absolute_histo2, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
  645. Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(9, 1), 0);
  646. // 对图片进行分割
  647. //double otsu_threshold;
  648. //threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
  649. double otsu_threshold2 = Cv2.Threshold(smoothed_image2, smoothed_image2, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
  650. // 使用形态学操作进行孔洞填充
  651. Mat kernel3 = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 1));
  652. Mat filled_image3 = new Mat();
  653. Cv2.MorphologyEx(smoothed_image2, filled_image3, MorphTypes.Close, kernel3);
  654. #endregion
  655. #else
  656. Mat filled_image3 = current_segment.Clone();
  657. #endif
  658. #if true
  659. //从左到右判断边和从右到左判断边
  660. int numX = 0;
  661. byte tempVal = 0;
  662. if (isLeft)
  663. {
  664. tempVal = filled_image3.At<byte>(0, 0);
  665. for (int j = 0; j < filled_image3.Cols; j++)
  666. {
  667. if (filled_image3.At<byte>(0, j) != tempVal)
  668. {
  669. numX = j;
  670. break;
  671. }
  672. }
  673. }
  674. else
  675. {
  676. tempVal = filled_image3.At<byte>(0, filled_image3.Cols - 1);
  677. for (int j = filled_image3.Cols - 1; j >= 0; j--)
  678. {
  679. if (filled_image3.At<byte>(0, j) != tempVal)
  680. {
  681. numX = j;
  682. break;
  683. }
  684. }
  685. }
  686. #else
  687. int numX = Cv2.CountNonZero(filled_image3);
  688. #endif
  689. //int length_t = (numX > (sf_width / 2)) ? numX :sf_width - numX;
  690. int length_t = numX;
  691. total[i] = (length_t);
  692. if (length_t > 0)
  693. total_t.Add(length_t);
  694. }
  695. // 取平均值作为宽度
  696. int length = (int)total_t.Average();
  697. endTime = DateTimeOffset.Now;
  698. Console.WriteLine("计算边(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  699. // 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
  700. //int abnormal_pxl = 100 / 4;
  701. //for (int i = 0; i < pointNum; i++)
  702. //{
  703. // if (Math.Abs(total[i] - length) > abnormal_pxl)
  704. // Console.WriteLine("数据异常!");
  705. // //出现数据异常,当段图片的宽度有问题
  706. //}
  707. //乘上换算系数还原
  708. length = length * sf + Roi.X;
  709. return length;
  710. }
  711. public static int EdgeClipping2(Mat image, int FindType, Rect Roi, bool IsLeft)
  712. {
  713. DateTimeOffset startTime = DateTimeOffset.Now;
  714. Mat mat_rgb = image.Clone(Roi);
  715. int height = mat_rgb.Rows;
  716. int width = mat_rgb.Cols;
  717. int sf = 10; //缩放比例
  718. int pix = 5; //获取均值区域长宽像素
  719. int pointNum = 15; //获取找遍点数
  720. int offsetGray = 5; //二值化偏差
  721. //按比例缩放
  722. int sf_height = height / sf;
  723. int sf_width = width / sf;
  724. Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
  725. Mat himg = new Mat();
  726. himg = mat_rgb.Clone();
  727. DateTimeOffset endTime = DateTimeOffset.Now;
  728. Console.WriteLine("图片缩小(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  729. startTime = DateTimeOffset.Now;
  730. //滤过去除多余噪声
  731. //Cv2.EdgePreservingFilter(himg, himg, EdgePreservingMethods.NormconvFilter);
  732. //Cv2.PyrMeanShiftFiltering(himg, himg, 1, 2, 1);
  733. Cv2.PyrMeanShiftFiltering(himg, himg, 10, 17, 2);
  734. //himg.ImWrite("himg.jpg");
  735. endTime = DateTimeOffset.Now;
  736. Console.WriteLine("滤过去除多余噪声(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  737. startTime = DateTimeOffset.Now;
  738. //转灰度图
  739. Mat image_gray = new Mat();
  740. Cv2.CvtColor(himg, image_gray, ColorConversionCodes.BGR2GRAY);
  741. //image_gray.ImWrite("image_gray.jpg");
  742. Mat image_Canny = new Mat();
  743. Cv2.Canny(image_gray, image_Canny, 32, 64);
  744. //image_Canny.ImWrite("image_Canny.jpg");
  745. //二值化
  746. Mat image_Otsu = new Mat();
  747. int hDis = sf_height / (pointNum + 2); //去除边缘两点
  748. #if false //二值算法
  749. List<double> LeftAvg = new List<double>();
  750. List<double> RightAvg = new List<double>();
  751. //double thb = Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
  752. #region 多点获取二值化均值
  753. for (int i = 0; i < pointNum; i++)
  754. {
  755. Rect roiLeft = new Rect(0, hDis + hDis * i, pix, pix);
  756. Mat current_segmentL = image_gray.Clone(roiLeft);
  757. //Scalar ttr = current_segmentL.Mean();
  758. LeftAvg.Add(current_segmentL.Mean().Val0);
  759. Rect roiRight = new Rect(sf_width - pix, hDis + hDis * i, pix, pix);
  760. Mat current_segmentR = image_gray.Clone(roiRight);
  761. RightAvg.Add(current_segmentR.Mean().Val0);
  762. }
  763. double thres = 0;
  764. if (IsLeft)
  765. {
  766. if (LeftAvg.Average() > RightAvg.Average())
  767. thres = RightAvg.Max() + offsetGray;
  768. else
  769. thres = RightAvg.Min() - offsetGray;
  770. }
  771. else
  772. {
  773. if (LeftAvg.Average() > RightAvg.Average())
  774. thres = LeftAvg.Min() - offsetGray;
  775. else
  776. thres = LeftAvg.Max() + offsetGray;
  777. }
  778. //double thres = (RightAvg.Average() + )/2;
  779. #endregion
  780. #endif
  781. #if false
  782. double min, max;
  783. image_gray.MinMaxLoc(out min, out max);
  784. double thres = (min + max) / 2;
  785. #endif
  786. #if false //二值化图片
  787. //Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Otsu);
  788. double thb = Cv2.Threshold(image_gray, image_Otsu, thres, 255, ThresholdTypes.Binary);
  789. image_Otsu.ImWrite("Otsu1.jpg");
  790. Cv2.MedianBlur(image_Otsu, image_Otsu, 21);
  791. image_Otsu.ImWrite("Otsu2.jpg");
  792. endTime = DateTimeOffset.Now;
  793. Console.WriteLine("灰度图二值化(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  794. startTime = DateTimeOffset.Now;
  795. #else
  796. image_Otsu = image_Canny;
  797. #endif
  798. // 定义空数组保存结果
  799. int[] total = new int[pointNum];
  800. List<int> total_t = new List<int>();
  801. bool isLeft = FindType == 0 ? true : false;
  802. // 平均截取pointNum行数据并处理图像
  803. for (int i = 0; i < pointNum; i++)
  804. {
  805. // 截取当前行的图像
  806. Rect roi = new Rect(0, hDis + hDis * i, sf_width, 1);
  807. Mat current_segment = image_Otsu.Clone(roi);
  808. #if false
  809. #region 预处理
  810. // 对当前行的图像进行平滑处理
  811. Mat smoothed_image2 = new Mat();
  812. Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(5, 1), 0);
  813. // 计算当前行的灰度直方图
  814. Mat absolute_histo2 = new Mat();
  815. Cv2.CalcHist(new Mat[] { smoothed_image2 }, new int[] { 0 }, new Mat(), absolute_histo2, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
  816. Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(9, 1), 0);
  817. // 对图片进行分割
  818. //double otsu_threshold;
  819. //threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
  820. double otsu_threshold2 = Cv2.Threshold(smoothed_image2, smoothed_image2, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
  821. // 使用形态学操作进行孔洞填充
  822. Mat kernel3 = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 1));
  823. Mat filled_image3 = new Mat();
  824. Cv2.MorphologyEx(smoothed_image2, filled_image3, MorphTypes.Close, kernel3);
  825. #endregion
  826. #else
  827. //Mat filled_image3 = current_segment.Clone();
  828. Mat filled_image3 = current_segment;
  829. #endif
  830. #if true
  831. //从左到右判断边和从右到左判断边
  832. int numX = 0;
  833. byte tempVal = 0;
  834. if (isLeft)
  835. {
  836. tempVal = filled_image3.At<byte>(0, 0);
  837. for (int j = 0; j < filled_image3.Cols; j++)
  838. {
  839. if (filled_image3.At<byte>(0, j) != tempVal)
  840. {
  841. numX = j;
  842. break;
  843. }
  844. }
  845. }
  846. else
  847. {
  848. tempVal = filled_image3.At<byte>(0, filled_image3.Cols - 1);
  849. for (int j = filled_image3.Cols - 1; j >= 0; j--)
  850. {
  851. if (filled_image3.At<byte>(0, j) != tempVal)
  852. {
  853. numX = j;
  854. break;
  855. }
  856. }
  857. }
  858. #else
  859. int numX = Cv2.CountNonZero(filled_image3);
  860. #endif
  861. //int length_t = (numX > (sf_width / 2)) ? numX :sf_width - numX;
  862. int length_t = numX;
  863. total[i] = (length_t);
  864. if (length_t > 0)
  865. total_t.Add(length_t);
  866. }
  867. // 取平均值作为宽度
  868. int length = (int)total_t.Average();
  869. endTime = DateTimeOffset.Now;
  870. Console.WriteLine("计算边(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
  871. // 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
  872. //int abnormal_pxl = 100 / 4;
  873. //for (int i = 0; i < pointNum; i++)
  874. //{
  875. // if (Math.Abs(total[i] - length) > abnormal_pxl)
  876. // Console.WriteLine("数据异常!");
  877. // //出现数据异常,当段图片的宽度有问题
  878. //}
  879. //乘上换算系数还原
  880. length = length * sf + Roi.X;
  881. return length;
  882. }
  883. private static Rect GetMaxInscribedRect(Mat src, List<Point> contour)
  884. {
  885. //根据轮廓让点与下一个点之间形成一个矩形,然后让每个矩形都与当前所有矩形相交,求出相交的矩形,
  886. //再把这些矩形所有的角放到一个集合里,筛选出在轮廓内并且非重复的点,
  887. //最后让这些点两两组合成一个矩形,判断是否为内部矩形,算出面积,找出最大内接矩形。
  888. //比如一共4个点,第1个与第2个形成矩形(矩形1),第1与第3(矩形2),
  889. //第1与第4(矩形3),第2与第3(矩形4),第2与第4(矩形5),第3与第4(矩形6),
  890. //由于矩形1为第一个元素,没有相交矩形,所以直接放入allPoint中,
  891. //接着把矩形2的四个角,以及矩形2和矩形1相交矩形的四个角,放入allPoint中,
  892. //矩形3以此类推,其本身四个角,以及和矩形1相交矩形的四个角,以及和矩形2相交矩形的四个角
  893. Rect maxInscribedRect = new Rect();
  894. List<Rect> allRect = new List<Rect>();
  895. List<Point> allPoint = new List<Point>(contour);
  896. //根据轮廓让点与下一个点之间形成一个矩形
  897. for (int i = 0; i < contour.Count; i++)
  898. {
  899. for (int j = i + 1; j < contour.Count; j++)
  900. {
  901. var p1 = contour[i];
  902. var p2 = contour[j];
  903. if (p1.Y == p2.Y || p1.X == p2.X)
  904. continue;
  905. var tempRect = FromTowPoint(p1, p2);
  906. allPoint.AddRange(GetAllCorner(tempRect));
  907. //让每个矩形都与当前所有矩形相交,求出相交的矩形,再把这些矩形所有的角放到一个集合里
  908. foreach (var rect in allRect)
  909. {
  910. var intersectR = tempRect.Intersect(rect);
  911. if (intersectR != Rect.Empty)
  912. allPoint.AddRange(GetAllCorner(intersectR));
  913. }
  914. allRect.Add(tempRect);
  915. }
  916. }
  917. //去除重复的点,再让这些点两两组合成一个矩形,判断是否为内部矩形,算出面积,找出最大内接矩形
  918. List<Point> distinctPoints = allPoint.Distinct().ToList();
  919. for (int i = 0; i < distinctPoints.Count; i++)
  920. {
  921. for (int j = i + 1; j < distinctPoints.Count; j++)
  922. {
  923. var tempRect = FromTowPoint(distinctPoints[i], distinctPoints[j]);
  924. //只要矩形包含一个轮廓内的点,就不算多边形的内部矩形;只要轮廓不包含该矩形,该矩形就不算多边形的内部矩形
  925. if (!ContainPoints(contour, GetAllCorner(tempRect)) || ContainsAnyPt(tempRect, contour))
  926. continue;
  927. //src.Rectangle(tempRect, Scalar.RandomColor(), 2);
  928. if (tempRect.Width * tempRect.Height > maxInscribedRect.Width * maxInscribedRect.Height)
  929. maxInscribedRect = tempRect;
  930. }
  931. }
  932. //src.Rectangle(maxInscribedRect, Scalar.Yellow, 2);
  933. return maxInscribedRect == Rect.Empty ? Cv2.BoundingRect(contour) : maxInscribedRect;
  934. }
  935. public static Point[] GetAllCorner(Rect rect)
  936. {
  937. Point[] result = new Point[4];
  938. result[0] = rect.Location;
  939. result[1] = new Point(rect.X + rect.Width, rect.Y);
  940. result[2] = rect.BottomRight;
  941. result[3] = new Point(rect.X, rect.Y + rect.Height);
  942. return result;
  943. }
  944. private static bool ContainPoint(List<Point> contour, Point p1)
  945. {
  946. return Cv2.PointPolygonTest(contour, p1, false) > 0;
  947. }
  948. private static bool ContainPoints(List<Point> contour, IEnumerable<Point> points)
  949. {
  950. foreach (var point in points)
  951. {
  952. if (Cv2.PointPolygonTest(contour, point, false) < 0)
  953. return false;
  954. }
  955. return true;
  956. }
  957. private static void DrawContour(Mat mat, Point[] contour, Scalar color, int thickness)
  958. {
  959. for (int i = 0; i < contour.Length; i++)
  960. {
  961. if (i + 1 < contour.Length)
  962. Cv2.Line(mat, contour[i], contour[i + 1], color, thickness);
  963. }
  964. }
  965. /// <summary>
  966. /// 是否有任意一个点集合中的点包含在矩形内,在矩形边界上不算包含
  967. /// </summary>
  968. /// <param name="rect"></param>
  969. /// <param name="points"></param>
  970. /// <returns></returns>
  971. public static bool ContainsAnyPt(Rect rect, IEnumerable<Point> points)
  972. {
  973. foreach (var point in points)
  974. {
  975. if (point.X > rect.X && point.X < rect.X + rect.Width && point.Y < rect.BottomRight.Y && point.Y > rect.Y)
  976. return true;
  977. }
  978. return false;
  979. }
  980. /// <summary>
  981. /// 用任意两点组成一个矩形
  982. /// </summary>
  983. /// <param name="p1"></param>
  984. /// <param name="p2"></param>
  985. /// <returns></returns>
  986. public static Rect FromTowPoint(Point p1, Point p2)
  987. {
  988. if (p1.X == p2.X || p1.Y == p2.Y)
  989. return Rect.Empty;
  990. if (p1.X > p2.X && p1.Y < p2.Y)
  991. {
  992. (p1, p2) = (p2, p1);
  993. }
  994. else if (p1.X > p2.X && p1.Y > p2.Y)
  995. {
  996. (p1.X, p2.X) = (p2.X, p1.X);
  997. }
  998. else if (p1.X < p2.X && p1.Y < p2.Y)
  999. {
  1000. (p1.Y, p2.Y) = (p2.Y, p1.Y);
  1001. }
  1002. return Rect.FromLTRB(p1.X, p2.Y, p2.X, p1.Y);
  1003. }
  1004. #endregion
  1005. public static Mat CannyOperator(Mat srcImg, double threshold1 = 100, double threshold2 = 200)
  1006. {
  1007. var dst = new Mat();// srcImg.Rows, srcImg.Cols,MatType.CV_8UC1);
  1008. //转灰度
  1009. Cv2.CvtColor(srcImg, dst, ColorConversionCodes.RGB2GRAY);
  1010. //滤波
  1011. Cv2.Blur(dst, dst, new OpenCvSharp.Size(2, 2));
  1012. //double threshold1 = 255, threshold2 = 0;
  1013. Cv2.Canny(srcImg, dst, threshold1, threshold2);
  1014. //Cv2.ImShow("dst", dst);
  1015. return dst;
  1016. }
  1017. public static Mat LaplacianOperator(Mat srcImg, double threshold1 = 10, double threshold2 = 255)
  1018. {
  1019. Mat LaplacianImg = new Mat();
  1020. Mat gussImage = new Mat();
  1021. //高斯滤波: 每个像素点的值都由本身与和邻近区域的其他像素值经过加权平均后得到,加权系数越靠近中心越大,越远离中心越小
  1022. /* src:输入图像
  1023. dst:输出图像
  1024. ksize:高斯核的大小。ksize。宽度和高度可以不同,但它们都必须是正的和奇数的。或者,它们可以是0然后用sigma来计算
  1025. sigmaX:表示高斯核在X轴方向的标准偏差
  1026. sigmaY :表示高斯核在Y轴方向的标准偏差值,如果sigmaY 为0,则sigmaY =sigmaX,如果两个sigma都为零,则用ksize计算
  1027. borderType :一般用默认值
  1028. */
  1029. Cv2.GaussianBlur(srcImg, gussImage, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
  1030. Mat grayImage = new Mat();
  1031. Cv2.CvtColor(gussImage, grayImage, ColorConversionCodes.RGB2GRAY); //灰度图
  1032. //Laplacian运算, 计算二阶导数
  1033. /*src 源图像
  1034. dst 输出图像,将具有与src相同的大小和相同数量的通道
  1035. ddepth 目标图像的所需深度 默认填 -1,与源图一致
  1036. ksize 用于计算二阶导数滤波器的孔径大小,卷积核大小,奇数
  1037. scale 计算的拉普拉斯值的可选缩放因子(默认情况下不应用缩放)
  1038. delta 可选的增量值,在将结果存储到dst之前添加到结果中
  1039. borderType 边缘处理方法
  1040. */
  1041. Cv2.Laplacian(grayImage, LaplacianImg, -1, 3); //参数:1,源图像;2,输出图像;3,目标图像的所需深度 默认填 -1,与源图一致;4,用于计算二阶导数滤波器的卷积核大小,需奇数。
  1042. //阈值操作:可根据灰度的差异来分割图像
  1043. /* src:输入图像
  1044. dst:输出图像
  1045. thresh:阈值
  1046. maxval:阈值最大
  1047. type:阈值类型,详解见下
  1048. Binary:阈值二值化(大于阈值的让它等于最大值,小于的等于最小值)
  1049. BinaryInv:阈值反二值化(二值化阈值相反,大于阈值为最小值,小于阈值为最大值)
  1050. Trunc:截断(大于阈值的就等于阈值,小的不变)
  1051. ToZero:阈值归零(当大于阈值的不变,小于阈值的归零)
  1052. ToZeroIv:阈值归零取反(与阈值取零相反,大于时为最小值,小于时保持不变)
  1053. */
  1054. Mat dst = new Mat();
  1055. Cv2.Threshold(LaplacianImg, dst, threshold1, threshold2, ThresholdTypes.Binary);
  1056. return dst;
  1057. }
  1058. //Sobel算子主要用来检测离散微分边缘算子,Sobel算子对噪声灰常敏感,一般需要先把图片进行高斯降噪
  1059. public static Mat SobelOperator(Mat src_img, double threshold1 = 10, double threshold2 = 250)
  1060. {
  1061. Mat dst = new Mat();
  1062. //高斯滤波
  1063. Cv2.GaussianBlur(src_img, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
  1064. Mat grayImage = new Mat();
  1065. Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY); //转换为灰度图
  1066. Mat X = new Mat();
  1067. Mat Y = new Mat();
  1068. /*src:输入图像
  1069. dst:输出图像
  1070. ddepth:输出图像深度
  1071. xorder:X方向的差分阶数
  1072. yorder:Y方向的差分阶数
  1073. ksize :表示Sobel核大小,只能为奇数
  1074. scale: 计算导数值时候的缩放因子,默认为1
  1075. delta :表示存入目标图前可选的delta值
  1076. borderType :边界模式,一般为默认
  1077. */
  1078. Cv2.Sobel(grayImage, X, MatType.CV_16S, 1, 0, 3); //Sobel边缘查找,参数:1,输入;2,输出X方向梯度图像;3,输出图像的深度;4,X方向几阶导数;5,Y方向几阶导数;6,卷积核大小,必须为奇数。
  1079. Cv2.Sobel(grayImage, Y, MatType.CV_16S, 0, 1, 3); //输出Y方向梯度图像
  1080. #region 方式1:像素操作进行相加
  1081. int width = X.Cols;
  1082. int hight = Y.Rows;
  1083. Mat output = new Mat(X.Size(), X.Type());
  1084. for (int x = 0; x < hight; x++) //合并X和Y,G= (Gx*Gx +Gy*Gy)的开平方根
  1085. {
  1086. for (int y = 0; y < width; y++)
  1087. {
  1088. int xg = X.At<byte>(x, y); //获取像素点的值
  1089. int yg = Y.At<byte>(x, y);
  1090. double v1 = Math.Pow(xg, 2); //平方
  1091. double v2 = Math.Pow(yg, 2);
  1092. int val = (int)Math.Sqrt(v1 + v2); //开平方根
  1093. if (val > 255) //确保像素值在 0至255之间
  1094. {
  1095. val = 255;
  1096. }
  1097. if (val < 0)
  1098. {
  1099. val = 0;
  1100. }
  1101. byte xy = (byte)val;
  1102. output.Set<byte>(x, y, xy); //为图像设置像素值
  1103. }
  1104. }
  1105. Mat tmp = new Mat(output.Size(), MatType.CV_8UC1);
  1106. #endregion
  1107. #region 方式2:利用现有API实现(X梯度+Y梯度)
  1108. //Mat Abs_X = new Mat();
  1109. //Mat Abs_Y = new Mat();
  1110. //Mat Result = new Mat();
  1111. //Cv2.ConvertScaleAbs(X, Abs_X, 1.0);//缩放,计算绝对值并将结果转换为8位。
  1112. //Cv2.ConvertScaleAbs(Y, Abs_Y, 1.0);//缩放,计算绝对值并将结果转换为8位。
  1113. //Cv2.AddWeighted(Abs_X, 0.5, Abs_Y, 0.5, 0, Result);//以不同的权重将两幅图片叠加
  1114. #endregion
  1115. //阈值
  1116. Mat result = new Mat();
  1117. Cv2.Threshold(tmp, result, threshold1, threshold2, ThresholdTypes.Binary);
  1118. return result;
  1119. }
  1120. //Scharr算子是对Sobel算子的优化,特别在核为3*3时
  1121. public static Mat ScharrOperator(Mat srcImg, double threshold1 = 10, double threshold2 = 250)
  1122. {
  1123. Mat dst = new Mat();
  1124. Cv2.GaussianBlur(srcImg, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
  1125. Mat grayImage = new Mat();
  1126. Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY); //转换为灰度图
  1127. Mat grad_x = new Mat();
  1128. Mat grad_x2 = new Mat();
  1129. Mat grad_y = new Mat();
  1130. Mat grad_y2 = new Mat();
  1131. Cv2.Scharr(grayImage, grad_x, MatType.CV_16S, 1, 0);
  1132. Cv2.Scharr(grayImage, grad_y, MatType.CV_16S, 0, 1);
  1133. Cv2.ConvertScaleAbs(grad_x, grad_x2);
  1134. Cv2.ConvertScaleAbs(grad_y, grad_y2);
  1135. Mat result = new Mat();
  1136. Cv2.AddWeighted(grad_x2, 0.5, grad_y2, 0.5, 0, result);
  1137. //阈值
  1138. Cv2.Threshold(result, result, threshold1, threshold2, ThresholdTypes.Binary);
  1139. //Cv2.ImShow("Scharr", result);
  1140. return result;
  1141. }
  1142. #region MatToHalcon
  1143. public static void MatToHObject(Mat imgMat, out HObject imgHOject)
  1144. {
  1145. int ImageWidth = imgMat.Width;
  1146. int ImageHeight = imgMat.Height;
  1147. int channel = imgMat.Channels();
  1148. long size = ImageWidth * ImageHeight * channel;
  1149. int col_byte_num = ImageWidth * channel;
  1150. byte[] rgbValues = new byte[size];
  1151. unsafe
  1152. {
  1153. for (int i = 0; i < ImageHeight; i++)
  1154. {
  1155. IntPtr c = imgMat.Ptr(i);
  1156. // 一行一行将mat 像素复制到byte[]
  1157. Marshal.Copy(c, rgbValues, i * col_byte_num, col_byte_num);
  1158. }
  1159. void* p;
  1160. IntPtr ptr;
  1161. fixed (byte* pc = rgbValues)
  1162. {
  1163. p = (void*)pc;
  1164. ptr = new IntPtr(p);
  1165. }
  1166. if (channel == 1)
  1167. {
  1168. HOperatorSet.GenImage1(out imgHOject, "byte", ImageWidth, ImageHeight, ptr);
  1169. }
  1170. else
  1171. {
  1172. HOperatorSet.GenImageInterleaved(out imgHOject, ptr, "bgr", ImageWidth, ImageHeight, 0, "byte", 0, 0, 0, 0, -1, 0);
  1173. }
  1174. }
  1175. }
  1176. #if false
  1177. /// <summary>
  1178. /// 把OpenCV图像转换到Halcon图像
  1179. /// </summary>
  1180. /// <param name="mImage">OpenCV图像_Mat</param>
  1181. /// <returns>Halcon图像_HObject</returns>
  1182. public HObject MatToHImage(Mat mImage)
  1183. {
  1184. try
  1185. {
  1186. HObject hImage;
  1187. int matChannels = 0; // 通道数
  1188. Type matType = null;
  1189. int width, height; // 宽,高
  1190. width = height = 0; // 宽,高初始化
  1191. // 获取通道数
  1192. matChannels = mImage.Channels();
  1193. if (matChannels == 0)
  1194. {
  1195. return null;
  1196. }
  1197. if (matChannels == 1) // 单通道
  1198. {
  1199. IntPtr ptr; // 灰度图通道
  1200. Mat[] mats = mImage.Split();
  1201. // 改自:Mat.GetImagePointer1(mImage, out ptr, out matType, out width, out height); // ptr=2157902018096 cType=byte width=830 height=822
  1202. ptr = mats[0].Data; // 取灰度图值
  1203. matType = mImage.GetType(); // byte
  1204. height = mImage.Rows; // 高
  1205. width = mImage.Cols; // 宽
  1206. // 改自:hImage = new HObject(new OpenCvSharp.Size(width, height), MatType.CV_8UC1, new Scalar(0));
  1207. byte[] dataGrayScaleImage = new byte[width * height]; //Mat dataGrayScaleImage = new Mat(new OpenCvSharp.Size(width, height), MatType.CV_8UC1);
  1208. unsafe
  1209. {
  1210. fixed (byte* ptrdata = dataGrayScaleImage)
  1211. {
  1212. #region 按行复制
  1213. //for (int i = 0; i < height; i++)
  1214. //{
  1215. // CopyMemory((IntPtr)(ptrdata + width * i), new IntPtr((long)ptr + width * i), width);
  1216. //}
  1217. #endregion
  1218. CopyMemory((IntPtr)ptrdata, new IntPtr((long)ptr), width * height);
  1219. HOperatorSet.GenImage1(out hImage, "byte", width, height, (IntPtr) ptrdata);
  1220. }
  1221. }
  1222. return hImage;
  1223. }
  1224. else if (matChannels == 3) // 三通道
  1225. {
  1226. IntPtr ptrRed; // R通道图
  1227. IntPtr ptrGreen; // G通道图
  1228. IntPtr ptrBlue; // B通道图
  1229. Mat[] mats = mImage.Split();
  1230. ptrRed = mats[0].Data; // 取R通道值
  1231. ptrGreen = mats[1].Data; // 取G通道值
  1232. ptrBlue = mats[2].Data; // 取B通道值
  1233. matType = mImage.GetType(); // 类型
  1234. height = mImage.Rows; // 高
  1235. width = mImage.Cols; // 宽
  1236. // 改自:hImage = new HObject(new OpenCvSharp.Size(width, height), MatType.CV_8UC1, new Scalar(0));
  1237. byte[] dataRed = new byte[width * height]; //Mat dataGrayScaleImage = new Mat(new OpenCvSharp.Size(width, height), MatType.CV_8UC1);
  1238. byte[] dataGreen = new byte[width * height];
  1239. byte[] dataBlue = new byte[width * height];
  1240. unsafe
  1241. {
  1242. fixed (byte* ptrdataRed = dataRed, ptrdataGreen = dataGreen, ptrdataBlue = dataBlue)
  1243. {
  1244. #region 按行复制
  1245. //HImage himg = new HImage("byte", width, height, (IntPtr)ptrdataRed);
  1246. //for (int i = 0; i < height; i++)
  1247. //{
  1248. // CopyMemory((IntPtr)(ptrdataRed + width * i), new IntPtr((long)ptrRed + width * i), width);
  1249. // CopyMemory((IntPtr)(ptrdataGreen + width * i), new IntPtr((long)ptrGreen + width * i), width);
  1250. // CopyMemory((IntPtr)(ptrdataBlue + width * i), new IntPtr((long)ptrBlue + width * i), width);
  1251. //}
  1252. #endregion
  1253. CopyMemory((IntPtr)ptrdataRed, new IntPtr((long)ptrRed), width * height); // 复制R通道
  1254. CopyMemory((IntPtr)ptrdataGreen, new IntPtr((long)ptrGreen), width * height); // 复制G通道
  1255. CopyMemory((IntPtr)ptrdataBlue, new IntPtr((long)ptrBlue), width * height); // 复制B通道
  1256. HOperatorSet.GenImage3(out hImage, "byte", width, height, (IntPtr)ptrdataRed, (IntPtr)ptrdataGreen, (IntPtr)ptrdataBlue); // 合成
  1257. }
  1258. }
  1259. return hImage;
  1260. }
  1261. else
  1262. {
  1263. return null;
  1264. }
  1265. }
  1266. catch (Exception ex)
  1267. {
  1268. throw ex;
  1269. }
  1270. }
  1271. #endif
  1272. #endregion
  1273. }
  1274. }