org.opencv.imgproc.Imgproc.rectangle()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(12.8k)|赞(0)|评价(0)|浏览(657)

本文整理了Java中org.opencv.imgproc.Imgproc.rectangle()方法的一些代码示例,展示了Imgproc.rectangle()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Imgproc.rectangle()方法的具体详情如下:
包路径:org.opencv.imgproc.Imgproc
类名称:Imgproc
方法名:rectangle

Imgproc.rectangle介绍

暂无

代码示例

代码示例来源:origin: raulh82vlc/Image-Detection-Samples

public static void drawMatchedEye(Point matchLocTx, Point matchLocTy, Mat matrixRgba) {
    Imgproc.rectangle(matrixRgba, matchLocTx, matchLocTy, new Scalar(255, 255, 0,
        255));
  }
}

代码示例来源:origin: raulh82vlc/Image-Detection-Samples

public static void drawEyeRectangle(Rect eyeArea, Mat matrixRgba) {
  Imgproc.rectangle(matrixRgba, eyeArea.tl(), eyeArea.br(),
      new Scalar(255, 0, 0, 255), 2);
}

代码示例来源:origin: JavaOpenCVBook/code

private void drawBoundingBox(MatOfPoint currentContour) {
  Rect rectangle = Imgproc.boundingRect(currentContour);
  Imgproc.rectangle(image, rectangle.tl(), rectangle.br(), new Scalar(255,0,0),1);
}

代码示例来源:origin: openpnp/openpnp

private void locateTemplateMatchesDebug(Mat roiImage, Mat templateImage,
      org.opencv.core.Point matchLoc) {
    if (LogUtils.isDebugEnabled()) {
      try {
        Imgproc.rectangle(roiImage, matchLoc,
            new org.opencv.core.Point(matchLoc.x + templateImage.cols(),
                matchLoc.y + templateImage.rows()),
            new Scalar(0, 255, 0));

        OpenCvUtils.saveDebugImage(OpenCvVisionProvider.class, "locateTemplateMatches", "debug", roiImage);
      }
      catch (Exception e) {
        e.printStackTrace();
      }
    }
  }
}

代码示例来源:origin: raulh82vlc/Image-Detection-Samples

public static void drawFaceShapes(Rect face, Mat matrixRGBA) {
  Point start = face.tl();
  int h = (int) start.y + (face.height / 2);
  int w = (int) start.x + (face.width / 2);
  Imgproc.rectangle(matrixRGBA, start, face.br(),
      FACE_RECT_COLOR, 3);
  Point center = new Point(w, h);
  Imgproc.circle(matrixRGBA, center, 10, new Scalar(255, 0, 0, 255), 3);
}

代码示例来源:origin: kongqw/OpenCVForAndroid

@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
  // 子线程(非UI线程)
  mRgba = inputFrame.rgba();
  mGray = inputFrame.gray();
  for (ObjectDetector detector : mObjectDetects) {
    // 检测目标
    Rect[] object = detector.detectObject(mGray, mObject);
    for (Rect rect : object) {
      Imgproc.rectangle(mRgba, rect.tl(), rect.br(), detector.getRectColor(), 3);
    }
  }
  return mRgba;
}

代码示例来源:origin: openpnp/openpnp

@Override
  public Result process(CvPipeline pipeline) throws Exception {
    if (templateMatchesStageName == null) {
      return null;
    }
    Result result = pipeline.getResult(templateMatchesStageName);
    if (result == null || result.model == null) {
      return null;
    }
    Mat mat = pipeline.getWorkingImage();

    List<TemplateMatch> matches = (List<TemplateMatch>) result.model;
    for (int i = 0; i < matches.size(); i++) {
      TemplateMatch match = matches.get(i);
      double x = match.x;
      double y = match.y;
      double score = match.score;
      double width = match.width;
      double height = match.height;
      Color color_ = this.color == null ? FluentCv.indexedColor(i) : this.color;
      Scalar color = FluentCv.colorToScalar(color_);
      Imgproc.rectangle(mat, new org.opencv.core.Point(x, y),
          new org.opencv.core.Point(x + width, y + height), color);
      Imgproc.putText(mat, "" + score, new org.opencv.core.Point(x + width, y + height),
          Core.FONT_HERSHEY_PLAIN, 1.0, color);
    }

    return null;
  }
}

代码示例来源:origin: jdye64/nifi-addons

@Override
  public void process(OutputStream outputStream) throws IOException {
    Mat croppedImage = null;
    //Should the image be cropped? If so there is no need to draw bounds because that would be the same as the cropping
    if (dd.getBoolean("crop")) {
      Rect rectCrop = new Rect(rect.x, rect.y, rect.width, rect.height);
      croppedImage = new Mat(image, rectCrop);
      MatOfByte updatedImage = new MatOfByte();
      Imgcodecs.imencode(".jpg", croppedImage, updatedImage);
      croppedImageReference.set(croppedImage);
      outputStream.write(updatedImage.toArray());
    } else {
      //Should the image have a border drawn around it?
      if (dd.getBoolean("drawBounds")) {
        Mat imageWithBorder = image.clone();
        Imgproc.rectangle(imageWithBorder, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 255, 255));
        MatOfByte updatedImage = new MatOfByte();
        Imgcodecs.imencode(".jpg", imageWithBorder, updatedImage);
        outputStream.write(updatedImage.toArray());
      } else {
        MatOfByte updatedImage = new MatOfByte();
        Imgcodecs.imencode(".jpg", image, updatedImage);
        outputStream.write(updatedImage.toArray());
      }
    }
  }
});

代码示例来源:origin: hschott/Camdroid

protected void execute() {
    out = gray();
    Imgproc.equalizeHist(out, out);
    synchronized (mog) {
      mog.apply(out, this.mask, (double) (-10 + learning_rate) / 10);
    }
    Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE,
        new Size(3, 3));
    Imgproc.dilate(mask, mask, kernel);
    ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(this.mask, contours, new Mat(),
        Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
    double maxheight = object_max_size * this.in.height() / 100;
    double minheight = object_min_size * this.in.height() / 100;
    Iterator<MatOfPoint> each = contours.iterator();
    each = contours.iterator();
    while (each.hasNext()) {
      MatOfPoint contour = each.next();
      Rect rect = Imgproc.boundingRect(contour);
      if (rect.height > minheight && rect.height < maxheight) {
        Imgproc.rectangle(out, rect.tl(), rect.br(), new Scalar(255,
            0, 0), 1);
      }
    }
  }
}

代码示例来源:origin: SOFTPOWER1991/OpenCVCheck

public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
  mRgba = inputFrame.rgba();
  mGray = inputFrame.gray();
  if (mAbsoluteFaceSize == 0) {
    int height = mGray.rows();
    if (Math.round(height * mRelativeFaceSize) > 0) {
      mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
    }
    mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
  }
  MatOfRect faces = new MatOfRect();
  if (mDetectorType == JAVA_DETECTOR) {
    if (mJavaDetector != null)
      mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
          new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
  } else if (mDetectorType == NATIVE_DETECTOR) {
    if (mNativeDetector != null)
      mNativeDetector.detect(mGray, faces);
  } else {
    Log.e(TAG, "Detection method is not selected!");
  }
  Rect[] facesArray = faces.toArray();
  for (int i = 0; i < facesArray.length; i++)
    Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
  return mRgba;
}

代码示例来源:origin: JavaOpenCVBook/code

private void detectAndDrawFace(Mat image) {
    MatOfRect faceDetections = new MatOfRect();
    faceDetector.detectMultiScale(    image, faceDetections, 1.1, 7,0,new Size(250,40),new Size());
    // Draw a bounding box around each face.
    for (Rect rect : faceDetections.toArray()) {
      Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0));
    }
  }
}

代码示例来源:origin: SOFTPOWER1991/OpenCVCheck

public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
  mRgba = inputFrame.rgba();
  mGray = inputFrame.gray();
  if (mAbsoluteFaceSize == 0) {
    int height = mGray.rows();
    if (Math.round(height * mRelativeFaceSize) > 0) {
      mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
    }
    mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
  }
  MatOfRect faces = new MatOfRect();
  if (mDetectorType == JAVA_DETECTOR) {
    if (mJavaDetector != null)
      mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
          new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
  } else if (mDetectorType == NATIVE_DETECTOR) {
    if (mNativeDetector != null)
      mNativeDetector.detect(mGray, faces);
  } else {
    Log.e(TAG, "Detection method is not selected!");
  }
  Rect[] facesArray = faces.toArray();
  for (int i = 0; i < facesArray.length; i++)
    Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
  return mRgba;
}

代码示例来源:origin: SOFTPOWER1991/OpenCVCheck

public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
  mRgba = inputFrame.rgba();
  mGray = inputFrame.gray();
  if (mAbsoluteFaceSize == 0) {
    int height = mGray.rows();
    if (Math.round(height * mRelativeFaceSize) > 0) {
      mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
    }
    mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
  }
  MatOfRect faces = new MatOfRect();
  if (mDetectorType == JAVA_DETECTOR) {
    if (mJavaDetector != null)
      mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
          new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
  } else if (mDetectorType == NATIVE_DETECTOR) {
    if (mNativeDetector != null)
      mNativeDetector.detect(mGray, faces);
  } else {
    Log.e(TAG, "Detection method is not selected!");
  }
  Rect[] facesArray = faces.toArray();
  for (int i = 0; i < facesArray.length; i++)
    Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
  return mRgba;
}

代码示例来源:origin: SOFTPOWER1991/OpenCVCheck

private void checkFace() {
  Bitmap imgtemp = BitmapFactory.decodeResource(getResources(), R.mipmap.twop);
  Utils.bitmapToMat(imgtemp, mRgba);
  Mat mat1 = new Mat();
  Utils.bitmapToMat(imgtemp, mat1);
  Imgproc.cvtColor(mat1, mGray, Imgproc.COLOR_BGR2GRAY);
  if (mAbsoluteFaceSize == 0) {
    int height = mGray.rows();
    if (Math.round(height * mRelativeFaceSize) > 0) {
      mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
    }
    mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
  }
  MatOfRect faces = new MatOfRect();
  if (mDetectorType == JAVA_DETECTOR) {
    if (mJavaDetector != null)
      mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
          new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
  } else if (mDetectorType == NATIVE_DETECTOR) {
    if (mNativeDetector != null)
      mNativeDetector.detect(mGray, faces);
  } else {
    Log.e(TAG, "Detection method is not selected!");
  }
  Rect[] facesArray = faces.toArray();
  for (int i = 0; i < facesArray.length; i++)
    Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
  Utils.matToBitmap(mRgba, imgtemp, true);
  imageView.setImageBitmap(imgtemp);
}

代码示例来源:origin: SouvDc/face-detection

public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
  mRgba = inputFrame.rgba();
  mGray = inputFrame.gray();
  if (mAbsoluteFaceSize == 0) {
    int height = mGray.rows();
    if (Math.round(height * mRelativeFaceSize) > 0) {
      mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
    }
    mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
  }
  MatOfRect faces = new MatOfRect();
  if (mDetectorType == JAVA_DETECTOR) {
    if (mJavaDetector != null)
      mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
          new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
  }
  else if (mDetectorType == NATIVE_DETECTOR) {
    if (mNativeDetector != null)
      mNativeDetector.detect(mGray, faces);
  }
  else {
    Log.e(TAG, "Detection method is not selected!");
  }
  Rect[] facesArray = faces.toArray();
  for (int i = 0; i < facesArray.length; i++)
    Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
  return mRgba;
}

代码示例来源:origin: nroduit/Weasis

public static ImageCV applyCropMask(Mat source, Rectangle b, double alpha) {
  Mat srcImg = Objects.requireNonNull(source);
  ImageCV dstImg = new ImageCV();        
  source.copyTo(dstImg);
  if(b.getY() > 0) {
    Imgproc.rectangle(dstImg, new Point(0.0, 0.0), new Point(dstImg.width(), b.getMinY() ), new Scalar(0), -1);
  }
  if(b.getX() > 0) {
    Imgproc.rectangle(dstImg, new Point(0.0, b.getMinY()), new Point(b.getMinX(), b.getMaxY() ), new Scalar(0), -1);
  }
  if(b.getX() < dstImg.width()) {
    Imgproc.rectangle(dstImg, new Point(b.getMaxX(), b.getMinY()), new Point(dstImg.width(), b.getMaxY() ), new Scalar(0), -1);
  }
  if(b.getY() < dstImg.height()) {
    Imgproc.rectangle(dstImg, new Point(0.0, b.getMaxY()), new Point(dstImg.width(), dstImg.height() ), new Scalar(0), -1);
  }
  Core.addWeighted(dstImg, alpha, srcImg, 1- alpha, 0.0, dstImg);
  return dstImg;
}

代码示例来源:origin: kongqw/OpenCVForAndroid

public RotatedRect objectTracking(Mat mRgba) {
  rgba2Hsv(mRgba);
  updateHueImage();
  // 计算直方图的反投影。
  // Imgproc.calcBackProject(hueList, new MatOfInt(0), hist, prob, ranges, 255);
  Imgproc.calcBackProject(hueList, new MatOfInt(0), hist, prob, ranges, 1.0);
  // 计算两个数组的按位连接(dst = src1 & src2)计算两个数组或数组和标量的每个元素的逐位连接。
  Core.bitwise_and(prob, mask, prob, new Mat());
  // 追踪目标
  rotatedRect = Video.CamShift(prob, trackRect, new TermCriteria(TermCriteria.EPS, 10, 1));
  if (null != mOnCalcBackProjectListener) {
    mOnCalcBackProjectListener.onCalcBackProject(prob);
  }
  // 将本次最终到的目标作为下次追踪的对象
  trackRect = rotatedRect.boundingRect();
  Imgproc.rectangle(prob, trackRect.tl(), trackRect.br(), new Scalar(255, 255, 0, 255), 6);
  Log.i(TAG, "objectTracking: 宽度 : " + trackRect.width + "  高度 : " + trackRect.height + "  角度 : " + rotatedRect.angle);
  return rotatedRect;
}

代码示例来源:origin: hschott/Camdroid

protected void execute() {
  if (!classifierPath.equals(lastUsedClassifierPath)) {
    detector.load(classifierPath);
    lastUsedClassifierPath = classifierPath;
  }
  if (detector.empty()) {
    return;
  }
  out = gray();
  double min = (double) this.in.height() * object_min_size / 100;
  double max = (double) this.in.height() * object_max_size / 100;
  MatOfRect rects = new MatOfRect();
  detector.detectMultiScale(out, rects,
      1 + (double) scale_factor / 100, min_neighbors, 0,
      new Size(min, min), new Size(max, max));
  for (Rect rect : rects.toArray()) {
    Imgproc.rectangle(out, rect.tl(), rect.br(),
        new Scalar(255, 0, 0), 1);
  }
}

代码示例来源:origin: kongqw/OpenCVForAndroid

Imgproc.rectangle(mRgba, rect.tl(), rect.br(), TRACKING_RECT_COLOR, 3);
if (null != mOnObjectTrackingListener) {
  Point center = rotatedRect.center;

代码示例来源:origin: openpnp/openpnp

@Override
  public Result process(CvPipeline pipeline) throws Exception {
    Mat mat = pipeline.getWorkingImage();
    Mat mask = mat.clone();
    Mat masked = mat.clone();
    Scalar color = FluentCv.colorToScalar(Color.black);
    mask.setTo(color);
    masked.setTo(color);
    Point low = new Point(mat.cols() / 2 - getWidth() / 2, mat.rows() / 2 - getHeight() / 2);
    Point high = new Point(mat.cols() / 2 + getWidth() / 2, mat.rows() / 2 + getHeight() / 2);
    Imgproc.rectangle(mask, low, high, new Scalar(255, 255, 255), -1);
    if (getWidth() * getHeight() < 0) {
      Core.bitwise_not(mask, mask);
    }
    mat.copyTo(masked, mask);
    mask.release();
    return new Result(masked);
  }
}

相关文章

Imgproc类方法