diff --git a/addons/.gitignore b/addons/.gitignore index 6000646f925..f6960b5362d 100644 --- a/addons/.gitignore +++ b/addons/.gitignore @@ -24,6 +24,7 @@ ofxAndroid/ofAndroidLib/gen !ofxEmscripten !ofxUnitTests !ofxProjectGenerator +!ofxCv # don't ignore the .gitignore file !.gitignore diff --git a/addons/ofxCv/addon_config.mk b/addons/ofxCv/addon_config.mk new file mode 100644 index 00000000000..a13d2577d12 --- /dev/null +++ b/addons/ofxCv/addon_config.mk @@ -0,0 +1,63 @@ +# All variables and this file are optional, if they are not present the PG and the +# makefiles will try to parse the correct values from the file system. +# +# Variables that specify exclusions can use % as a wildcard to specify that anything in +# that position will match. A partial path can also be specified to, for example, exclude +# a whole folder from the parsed paths from the file system +# +# Variables can be specified using = or += +# = will clear the contents of that variable both specified from the file or the ones parsed +# from the file system +# += will add the values to the previous ones in the file or the ones parsed from the file +# system +# +# The PG can be used to detect errors in this file, just create a new project with this addon +# and the PG will write to the console the kind of error and in which line it is + +meta: + ADDON_NAME = ofxCv + ADDON_DESCRIPTION = Addon for computer vision using the open source library openCv + ADDON_AUTHOR = Kyle McDonald + ADDON_TAGS = "computer vision" "opencv" "image processing" + ADDON_URL = https://github.com/kylemcdonald/ofxcv + +common: + # dependencies with other addons, a list of them separated by spaces + # or use += in several lines + ADDON_DEPENDENCIES = ofxOpenCv + + # include search paths, this will be usually parsed from the file system + # but if the addon or addon libraries need special search paths they can be + # specified here separated by spaces or one per line using += + ADDON_INCLUDES = libs/ofxCv/include + ADDON_INCLUDES += libs/CLD/include/CLD + ADDON_INCLUDES += src + + # any special flag that should be passed to the compiler when using this + # addon + # ADDON_CFLAGS = + + # any special flag that should be passed to the linker when using this + # addon, also used for system libraries with -lname + # ADDON_LDFLAGS = + + # linux only, any library that should be included in the project using + # pkg-config + # ADDON_PKG_CONFIG_LIBRARIES = + + # osx/iOS only, any framework that should be included in the project + # ADDON_FRAMEWORKS = + + # source files, these will be usually parsed from the file system looking + # in the src folders in libs and the root of the addon. if your addon needs + # to include files in different places or a different set of files per platform + # they can be specified here + # ADDON_SOURCES = + + # some addons need resources to be copied to the bin/data folder of the project + # specify here any files that need to be copied, you can use wildcards like * and ? + # ADDON_DATA = + + # when parsing the file system looking for libraries exclude this for all or + # a specific platform + # ADDON_LIBS_EXCLUDE = diff --git a/addons/ofxCv/example-ar/addons.make b/addons/ofxCv/example-ar/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-ar/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-ar/bin/data/mbp-2011-isight.yml b/addons/ofxCv/example-ar/bin/data/mbp-2011-isight.yml new file mode 100644 index 00000000000..5b55839ab2c --- /dev/null +++ b/addons/ofxCv/example-ar/bin/data/mbp-2011-isight.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 6.6278599887122368e+02, 0., 3.1244256016006659e+02, 0., + 6.6129276875199082e+02, 2.2747179767124251e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -1.8848338341464690e-01, 1.0721890419183855e+00, + -3.5244467228016116e-03, -7.0195032848241403e-04, + -2.0412827999027101e+00 ] +reprojectionError: 2.1723265945911407e-01 diff --git a/addons/ofxCv/example-ar/src/main.cpp b/addons/ofxCv/example-ar/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-ar/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-ar/src/ofApp.cpp b/addons/ofxCv/example-ar/src/ofApp.cpp new file mode 100644 index 00000000000..4fa84b66c8d --- /dev/null +++ b/addons/ofxCv/example-ar/src/ofApp.cpp @@ -0,0 +1,62 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + cam.setup(640, 480); + + calibration.load("mbp-2011-isight.yml"); + patternSize = calibration.getPatternSize(); + objectPoints = Calibration::createObjectPoints(patternSize, 1., CHESSBOARD); + found = false; + + light.enable(); + light.setPosition(500, 0, 0); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + found = calibration.findBoard(toCv(cam), imagePoints); + if(found) { + Mat cameraMatrix = calibration.getDistortedIntrinsics().getCameraMatrix(); + Mat rvec, tvec; + solvePnP(Mat(objectPoints), Mat(imagePoints), cameraMatrix, calibration.getDistCoeffs(), rvec, tvec); + modelMatrix = makeMatrix(rvec, tvec); + } + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + if(found) { + calibration.getDistortedIntrinsics().loadProjectionMatrix(); + applyMatrix(modelMatrix); + + ofMesh mesh; + mesh.setMode(OF_PRIMITIVE_POINTS); + for(int i = 0; i < objectPoints.size(); i++) { + mesh.addVertex(toOf(objectPoints[i])); + } + glPointSize(3); + ofSetColor(magentaPrint); + mesh.drawVertices(); + + ofEnableLighting(); + ofSetColor(255); + glEnable(GL_DEPTH_TEST); + ofTranslate(.5, .5, -.5); + for(int i = 0; i < patternSize.width / 2; i++) { + for(int j = 0; j < patternSize.height / 2; j++) { + for(int k = 0; k < 3; k++) { + ofBox(2 * i, 2 * j, -2 * k, 1); + } + } + } + glDisable(GL_DEPTH_TEST); + ofDisableLighting(); + } +} diff --git a/addons/ofxCv/example-ar/src/ofApp.h b/addons/ofxCv/example-ar/src/ofApp.h new file mode 100644 index 00000000000..a0c8ce6407f --- /dev/null +++ b/addons/ofxCv/example-ar/src/ofApp.h @@ -0,0 +1,20 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofxCv::Calibration calibration; + vector objectPoints; + vector imagePoints; + ofMatrix4x4 modelMatrix; + bool found; + cv::Size patternSize; + ofLight light; +}; diff --git a/addons/ofxCv/example-background/addons.make b/addons/ofxCv/example-background/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-background/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-background/src/main.cpp b/addons/ofxCv/example-background/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-background/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-background/src/ofApp.cpp b/addons/ofxCv/example-background/src/ofApp.cpp new file mode 100644 index 00000000000..67e70c7c26b --- /dev/null +++ b/addons/ofxCv/example-background/src/ofApp.cpp @@ -0,0 +1,29 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(640, 480); + background.setLearningTime(900); + background.setThresholdValue(10); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + background.update(cam, thresholded); + thresholded.update(); + } +} + +void ofApp::draw() { + cam.draw(0, 0); + thresholded.draw(640, 0); +} + +void ofApp::keyPressed(int key) { + if(key == ' ') { + background.reset(); + } +} \ No newline at end of file diff --git a/addons/ofxCv/example-background/src/ofApp.h b/addons/ofxCv/example-background/src/ofApp.h new file mode 100644 index 00000000000..e82b8243a79 --- /dev/null +++ b/addons/ofxCv/example-background/src/ofApp.h @@ -0,0 +1,17 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + void keyPressed(int key); + + ofVideoGrabber cam; + ofxCv::RunningBackground background; + ofImage thresholded; +}; diff --git a/addons/ofxCv/example-bayer/addons.make b/addons/ofxCv/example-bayer/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-bayer/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-bayer/bin/data/bayer.png b/addons/ofxCv/example-bayer/bin/data/bayer.png new file mode 100644 index 00000000000..9c808d2ddb5 Binary files /dev/null and b/addons/ofxCv/example-bayer/bin/data/bayer.png differ diff --git a/addons/ofxCv/example-bayer/bin/data/rgb.png b/addons/ofxCv/example-bayer/bin/data/rgb.png new file mode 100644 index 00000000000..cbb7363a4b7 Binary files /dev/null and b/addons/ofxCv/example-bayer/bin/data/rgb.png differ diff --git a/addons/ofxCv/example-bayer/src/main.cpp b/addons/ofxCv/example-bayer/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-bayer/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-bayer/src/ofApp.cpp b/addons/ofxCv/example-bayer/src/ofApp.cpp new file mode 100644 index 00000000000..3c271d9df91 --- /dev/null +++ b/addons/ofxCv/example-bayer/src/ofApp.cpp @@ -0,0 +1,47 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + + bayer.load("bayer.png"); + rgb.load("rgb.png"); + + bayerType = 0; +} + +void ofApp::update() { + int code; + switch(bayerType) { + case 0: code = CV_BayerBG2RGB; break; + case 1: code = CV_BayerGB2RGB; break; + case 2: code = CV_BayerRG2RGB; break; + case 3: code = CV_BayerGR2RGB; break; + } + + convertColor(bayer, rgb, code); + rgb.update(); +} + +void ofApp::draw() { + ofPushMatrix(); + bayer.draw(0, 0); + ofTranslate(rgb.getWidth(), 0); + rgb.draw(0, 0); + ofTranslate(bayer.getWidth(), 0); + ofPopMatrix(); + + drawHighlightString("use the up/down keys: " + ofToString(bayerType), 10, 10); +} + +void ofApp::keyPressed(int key) { + if(key == OF_KEY_UP) { + bayerType--; + } + if(key == OF_KEY_DOWN) { + bayerType++; + } + bayerType = ofClamp(bayerType, 0, 3); +} \ No newline at end of file diff --git a/addons/ofxCv/example-bayer/src/ofApp.h b/addons/ofxCv/example-bayer/src/ofApp.h new file mode 100644 index 00000000000..0f3503a5d59 --- /dev/null +++ b/addons/ofxCv/example-bayer/src/ofApp.h @@ -0,0 +1,15 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void keyPressed(int key); + + ofImage bayer, rgb; + int bayerType; +}; diff --git a/addons/ofxCv/example-blur/addons.make b/addons/ofxCv/example-blur/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-blur/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-blur/src/main.cpp b/addons/ofxCv/example-blur/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-blur/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-blur/src/ofApp.cpp b/addons/ofxCv/example-blur/src/ofApp.cpp new file mode 100644 index 00000000000..a945ba1eb9a --- /dev/null +++ b/addons/ofxCv/example-blur/src/ofApp.cpp @@ -0,0 +1,30 @@ +#include "ofApp.h" + +void ofApp::setup() { + useGaussian = false; + cam.setup(640, 480); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + ofxCv::copy(cam, img); + if(useGaussian) { + ofxCv::GaussianBlur(img, 50); + } else { + ofxCv::blur(img, 50); + } + img.update(); + } +} + +void ofApp::draw() { + if(img.isAllocated()) { + img.draw(0, 0); + } + ofDrawBitmapStringHighlight(useGaussian ? "GaussianBlur()" : "blur()", 10, 20); +} + +void ofApp::keyPressed(int key) { + useGaussian = !useGaussian; +} \ No newline at end of file diff --git a/addons/ofxCv/example-blur/src/ofApp.h b/addons/ofxCv/example-blur/src/ofApp.h new file mode 100644 index 00000000000..9ea03f285a8 --- /dev/null +++ b/addons/ofxCv/example-blur/src/ofApp.h @@ -0,0 +1,16 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void keyPressed(int key); + + ofVideoGrabber cam; + ofImage img; + bool useGaussian; +}; diff --git a/addons/ofxCv/example-calibration-lcp/addons.make b/addons/ofxCv/example-calibration-lcp/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-calibration-lcp/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-calibration-lcp/src/main.cpp b/addons/ofxCv/example-calibration-lcp/src/main.cpp new file mode 100644 index 00000000000..3a026762848 --- /dev/null +++ b/addons/ofxCv/example-calibration-lcp/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_FULLSCREEN); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-calibration-lcp/src/ofApp.cpp b/addons/ofxCv/example-calibration-lcp/src/ofApp.cpp new file mode 100644 index 00000000000..00ef37f8a64 --- /dev/null +++ b/addons/ofxCv/example-calibration-lcp/src/ofApp.cpp @@ -0,0 +1,51 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + distorted.load("distorted.jpg"); + undistortedReference.load("undistorted.jpg"); + + float imageWidth = 5616; // ImageWidth, pixels + float imageHeight = 3744; // ImageLength, pixels + float focalLength = 28; // FocalLength, mm + float cropFactor = 0.975939; // SensorFormatFactor, "focal length multiplier", "crop factor" + float focalLengthX = 0.778962; // FocalLengthX + float focalLengthY = 0.778962; // FocalLengthY + float principalPointX = 0.500000; // ImageXCenter, ratio + float principalPointY = 0.500000; // ImageYCenter, ratio + + float k1 = -0.147131; // RadialDistortParam1 + float k2 = 0.084927; // RadialDistortParam2 + calibration.setDistortionCoefficients(k1, k2, 0, 0); + + Intrinsics intrinsics; + cv::Point2d sensorSize(35 * cropFactor, 35 * cropFactor * imageHeight / imageWidth); + cv::Size imageSize(distorted.getWidth(), distorted.getHeight()); + intrinsics.setup(focalLength, imageSize, sensorSize); + calibration.setFillFrame(false); + calibration.setIntrinsics(intrinsics); + + imitate(undistorted, distorted); + + Mat distortedMat = toCv(distorted); + Mat undistortedMat = toCv(undistorted); + calibration.undistort(distortedMat, undistortedMat); + undistorted.update(); +} + +void ofApp::update() { +} + +void ofApp::draw() { + float scale = ofGetHeight() / distorted.getHeight(); + ofScale(scale, scale); + distorted.draw(0, 0); + if(ofGetKeyPressed('r')) { + undistortedReference.draw(0, 0); + } + if(ofGetKeyPressed('u')) { + undistorted.draw(0, 0); + } +} diff --git a/addons/ofxCv/example-calibration-lcp/src/ofApp.h b/addons/ofxCv/example-calibration-lcp/src/ofApp.h new file mode 100644 index 00000000000..ba9a79fcb50 --- /dev/null +++ b/addons/ofxCv/example-calibration-lcp/src/ofApp.h @@ -0,0 +1,17 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofImage distorted; + ofImage undistorted; + ofImage undistortedReference; + + ofxCv::Calibration calibration; +}; diff --git a/addons/ofxCv/example-calibration/addons.make b/addons/ofxCv/example-calibration/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-calibration/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-calibration/bin/data/chessboard_a4.pdf b/addons/ofxCv/example-calibration/bin/data/chessboard_a4.pdf new file mode 100644 index 00000000000..00a42756654 Binary files /dev/null and b/addons/ofxCv/example-calibration/bin/data/chessboard_a4.pdf differ diff --git a/addons/ofxCv/example-calibration/bin/data/kinect-color.yml b/addons/ofxCv/example-calibration/bin/data/kinect-color.yml new file mode 100644 index 00000000000..c14beab2af1 --- /dev/null +++ b/addons/ofxCv/example-calibration/bin/data/kinect-color.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 5.3893796492134459e+02, 0., 3.1511337206790159e+02, 0., + 5.3979719485143403e+02, 2.4592265267769122e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ 2.4155128224014732e-01, -8.1011724263486762e-01, + -1.6681878296497415e-03, 4.4869134116476460e-03, + 9.3018013660014132e-01 ] +reprojectionError: 3.4450665116310120e-01 diff --git a/addons/ofxCv/example-calibration/bin/data/kinect-ir.yml b/addons/ofxCv/example-calibration/bin/data/kinect-ir.yml new file mode 100644 index 00000000000..e3a1985fa17 --- /dev/null +++ b/addons/ofxCv/example-calibration/bin/data/kinect-ir.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 5.5875867402259189e+02, 0., 3.2015962801017355e+02, 0., + 5.6313824661787146e+02, 2.0999445173364236e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -1.5785436725034177e-01, 5.3557992507168006e-01, + -1.6578843952074110e-02, -1.9955581754266837e-03, + -7.8997476053822979e-01 ] +reprojectionError: 1.3543131351470947e+00 diff --git a/addons/ofxCv/example-calibration/bin/data/mbp-2010-isight.yml b/addons/ofxCv/example-calibration/bin/data/mbp-2010-isight.yml new file mode 100644 index 00000000000..81c51cd0b0b --- /dev/null +++ b/addons/ofxCv/example-calibration/bin/data/mbp-2010-isight.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 6.3495680777673090e+02, 0., 3.2625860497501907e+02, 0., + 6.3477863477453502e+02, 2.3520271872789803e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ 2.8451937079021987e-02, -1.9665948438007733e-01, + 6.9577327727113007e-03, 2.2564188073174744e-04, + 2.6227259396885910e-01 ] +reprojectionError: 2.4427352845668793e-01 diff --git a/addons/ofxCv/example-calibration/bin/data/mbp-2011-isight.yml b/addons/ofxCv/example-calibration/bin/data/mbp-2011-isight.yml new file mode 100644 index 00000000000..5b55839ab2c --- /dev/null +++ b/addons/ofxCv/example-calibration/bin/data/mbp-2011-isight.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 6.6278599887122368e+02, 0., 3.1244256016006659e+02, 0., + 6.6129276875199082e+02, 2.2747179767124251e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -1.8848338341464690e-01, 1.0721890419183855e+00, + -3.5244467228016116e-03, -7.0195032848241403e-04, + -2.0412827999027101e+00 ] +reprojectionError: 2.1723265945911407e-01 diff --git a/addons/ofxCv/example-calibration/bin/data/ps3eye-zoomed.yml b/addons/ofxCv/example-calibration/bin/data/ps3eye-zoomed.yml new file mode 100644 index 00000000000..2de53136773 --- /dev/null +++ b/addons/ofxCv/example-calibration/bin/data/ps3eye-zoomed.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 7.7264424957184565e+02, 0., 3.1294659824780257e+02, 0., + 7.7346259977483453e+02, 2.4564109827326186e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -1.0149521517825388e-01, 1.1755924265868904e-01, + 2.3948205219152981e-03, -2.3766404831558285e-03, + 5.1112359194101821e-02 ] +reprojectionError: 2.0766365528106689e-01 diff --git a/addons/ofxCv/example-calibration/bin/data/settings.yml b/addons/ofxCv/example-calibration/bin/data/settings.yml new file mode 100644 index 00000000000..e0dc11cf871 --- /dev/null +++ b/addons/ofxCv/example-calibration/bin/data/settings.yml @@ -0,0 +1,5 @@ +%YAML:1.0 +xCount: 7 +yCount: 10 +squareSize: 2.5 +patternType: 0 diff --git a/addons/ofxCv/example-calibration/src/main.cpp b/addons/ofxCv/example-calibration/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-calibration/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-calibration/src/ofApp.cpp b/addons/ofxCv/example-calibration/src/ofApp.cpp new file mode 100644 index 00000000000..d9bb28268a9 --- /dev/null +++ b/addons/ofxCv/example-calibration/src/ofApp.cpp @@ -0,0 +1,89 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +const float diffThreshold = 2.5; // maximum amount of movement +const float timeThreshold = 1; // minimum time between snapshots +const int startCleaning = 10; // start cleaning outliers after this many samples + +void ofApp::setup() { + ofSetVerticalSync(true); + cam.setup(640, 480); + + FileStorage settings(ofToDataPath("settings.yml"), FileStorage::READ); + if(settings.isOpened()) { + int xCount = settings["xCount"], yCount = settings["yCount"]; + calibration.setPatternSize(xCount, yCount); + float squareSize = settings["squareSize"]; + calibration.setSquareSize(squareSize); + CalibrationPattern patternType; + switch(settings["patternType"]) { + case 0: patternType = CHESSBOARD; break; + case 1: patternType = CIRCLES_GRID; break; + case 2: patternType = ASYMMETRIC_CIRCLES_GRID; break; + } + calibration.setPatternType(patternType); + } + + imitate(undistorted, cam); + imitate(previous, cam); + imitate(diff, cam); + + lastTime = 0; + + active = true; +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + Mat camMat = toCv(cam); + Mat prevMat = toCv(previous); + Mat diffMat = toCv(diff); + + absdiff(prevMat, camMat, diffMat); + camMat.copyTo(prevMat); + + diffMean = mean(Mat(mean(diffMat)))[0]; + + float curTime = ofGetElapsedTimef(); + if(active && curTime - lastTime > timeThreshold && diffMean < diffThreshold) { + if(calibration.add(camMat)) { + cout << "re-calibrating" << endl; + calibration.calibrate(); + if(calibration.size() > startCleaning) { + calibration.clean(); + } + calibration.save("calibration.yml"); + lastTime = curTime; + } + } + + if(calibration.size() > 0) { + calibration.undistort(toCv(cam), toCv(undistorted)); + undistorted.update(); + } + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + undistorted.draw(640, 0); + + stringstream intrinsics; + intrinsics << "fov: " << toOf(calibration.getDistortedIntrinsics().getFov()) << " distCoeffs: " << calibration.getDistCoeffs(); + drawHighlightString(intrinsics.str(), 10, 20, yellowPrint, ofColor(0)); + drawHighlightString("movement: " + ofToString(diffMean), 10, 40, cyanPrint); + drawHighlightString("reproj error: " + ofToString(calibration.getReprojectionError()) + " from " + ofToString(calibration.size()), 10, 60, magentaPrint); + for(int i = 0; i < calibration.size(); i++) { + drawHighlightString(ofToString(i) + ": " + ofToString(calibration.getReprojectionError(i)), 10, 80 + 16 * i, magentaPrint); + } +} + +void ofApp::keyPressed(int key) { + if(key == ' ') { + active = !active; + } +} diff --git a/addons/ofxCv/example-calibration/src/ofApp.h b/addons/ofxCv/example-calibration/src/ofApp.h new file mode 100644 index 00000000000..6825cb5d909 --- /dev/null +++ b/addons/ofxCv/example-calibration/src/ofApp.h @@ -0,0 +1,23 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void keyPressed(int key); + + ofVideoGrabber cam; + ofImage undistorted; + ofPixels previous; + ofPixels diff; + float diffMean; + + float lastTime; + bool active; + + ofxCv::Calibration calibration; +}; diff --git a/addons/ofxCv/example-coherent-lines/addons.make b/addons/ofxCv/example-coherent-lines/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-coherent-lines/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-coherent-lines/bin/data/0162681551.png b/addons/ofxCv/example-coherent-lines/bin/data/0162681551.png new file mode 100644 index 00000000000..7048e9e268e Binary files /dev/null and b/addons/ofxCv/example-coherent-lines/bin/data/0162681551.png differ diff --git a/addons/ofxCv/example-coherent-lines/bin/data/0168366051.png b/addons/ofxCv/example-coherent-lines/bin/data/0168366051.png new file mode 100644 index 00000000000..e7cb7d08bba Binary files /dev/null and b/addons/ofxCv/example-coherent-lines/bin/data/0168366051.png differ diff --git a/addons/ofxCv/example-coherent-lines/bin/data/0168639352.png b/addons/ofxCv/example-coherent-lines/bin/data/0168639352.png new file mode 100644 index 00000000000..3f470748302 Binary files /dev/null and b/addons/ofxCv/example-coherent-lines/bin/data/0168639352.png differ diff --git a/addons/ofxCv/example-coherent-lines/src/main.cpp b/addons/ofxCv/example-coherent-lines/src/main.cpp new file mode 100644 index 00000000000..389bd6f034b --- /dev/null +++ b/addons/ofxCv/example-coherent-lines/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(1280, 720, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-coherent-lines/src/ofApp.cpp b/addons/ofxCv/example-coherent-lines/src/ofApp.cpp new file mode 100644 index 00000000000..b2bfb0595f4 --- /dev/null +++ b/addons/ofxCv/example-coherent-lines/src/ofApp.cpp @@ -0,0 +1,76 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofBackground(255); + ofDirectory dir; + dir.allowExt("png"); + dir.open("."); + dir.listDir(); + for(int i = 0; i < dir.size(); i++) { + ofImage cur; + cur.load(dir.getName(i)); + cur.setImageType(OF_IMAGE_GRAYSCALE); + input.push_back(cur); + output.push_back(cur); + canny.push_back(cur); + } + + gui.setup(); + gui.add(doFDoG.set("doFDoG", true)); + gui.add(halfw.set("halfw", 4, 1, 8)); + gui.add(smoothPasses.set("smoothPasses", 2, 1, 4)); + gui.add(sigma1.set("sigma1", 0.68, 0.01, 2.0)); + gui.add(sigma2.set("sigma2", 6.0, 0.01, 10.0)); + gui.add(tau.set("tau", 0.974, 0.8, 1.0)); + gui.add(black.set("black", -8, -255, 255)); + gui.add(doThresh.set("doThresh", true)); + gui.add(thresh.set("thresh", 150, 0, 255)); + gui.add(doThin.set("doThin", true)); + gui.add(doCanny.set("doCanny", true)); + gui.add(cannyParam1.set("cannyParam1", 400, 0, 1024)); + gui.add(cannyParam2.set("cannyParam2", 600, 0, 1024)); +} + +void ofApp::update(){ + for(int i = 0; i < input.size(); i++) { + if(doFDoG) { + CLD(input[i], output[i], halfw, smoothPasses, sigma1, sigma2, tau, black); + invert(output[i]); + if(doThresh) { + threshold(output[i], thresh); + } + if(doThin) { + thin(output[i]); + } + output[i].update(); + if(doCanny) { + Canny(input[i], canny[i], cannyParam1 * 2, cannyParam2 * 2, 5); + canny[i].update(); + } + } + } +} + +void ofApp::draw(){ + gui.draw(); + + ofTranslate(300, 0); + for(int i = 0; i < input.size(); i++) { + ofEnableBlendMode(OF_BLENDMODE_ALPHA); + input[i].draw(i * 256, 0); + ofEnableBlendMode(OF_BLENDMODE_ADD); + output[i].draw(i * 256, 0); + + ofEnableBlendMode(OF_BLENDMODE_ALPHA); + input[i].draw(i * 256, 256); + ofEnableBlendMode(OF_BLENDMODE_ADD); + canny[i].draw(i * 256, 256); + } + + ofEnableBlendMode(OF_BLENDMODE_ALPHA); + ofDrawBitmapStringHighlight("Coherent line drawing", 10, 20); + ofDrawBitmapStringHighlight("Canny edge detection", 10, 256 + 20); +} diff --git a/addons/ofxCv/example-coherent-lines/src/ofApp.h b/addons/ofxCv/example-coherent-lines/src/ofApp.h new file mode 100644 index 00000000000..0ab1bd5cc5e --- /dev/null +++ b/addons/ofxCv/example-coherent-lines/src/ofApp.h @@ -0,0 +1,20 @@ +#pragma once + +#include "ofMain.h" +#include "ofxGui.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp{ +public: + void setup(); + void update(); + void draw(); + + vector input, output, canny; + + ofParameter sigma1, sigma2, tau; + ofParameter halfw, smoothPasses, black, thresh, cannyParam1, cannyParam2; + ofParameter doFDoG, doThresh, doThin, doCanny; + + ofxPanel gui; +}; diff --git a/addons/ofxCv/example-contours-advanced/addons.make b/addons/ofxCv/example-contours-advanced/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-contours-advanced/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-contours-advanced/src/main.cpp b/addons/ofxCv/example-contours-advanced/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-contours-advanced/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-contours-advanced/src/ofApp.cpp b/addons/ofxCv/example-contours-advanced/src/ofApp.cpp new file mode 100644 index 00000000000..dda203f1bfb --- /dev/null +++ b/addons/ofxCv/example-contours-advanced/src/ofApp.cpp @@ -0,0 +1,119 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(640, 480); + contourFinder.setMinAreaRadius(10); + contourFinder.setMaxAreaRadius(150); + //contourFinder.setInvert(true); // find black instead of white + trackingColorMode = TRACK_COLOR_RGB; +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + threshold = ofMap(mouseX, 0, ofGetWidth(), 0, 255); + contourFinder.setThreshold(threshold); + contourFinder.findContours(cam); + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + + ofSetLineWidth(2); + contourFinder.draw(); + + ofNoFill(); + int n = contourFinder.size(); + for(int i = 0; i < n; i++) { + // smallest rectangle that fits the contour + ofSetColor(cyanPrint); + ofPolyline minAreRect = toOf(contourFinder.getMinAreaRect(i)); + minAreRect.draw(); + + // ellipse that best fits the contour + ofSetColor(magentaPrint); + cv::RotatedRect ellipse = contourFinder.getFitEllipse(i); + ofPushMatrix(); + ofVec2f ellipseCenter = toOf(ellipse.center); + ofVec2f ellipseSize = toOf(ellipse.size); + ofTranslate(ellipseCenter.x, ellipseCenter.y); + ofRotate(ellipse.angle); + ofDrawEllipse(0, 0, ellipseSize.x, ellipseSize.y); + ofPopMatrix(); + + // minimum area circle that encloses the contour + ofSetColor(cyanPrint); + float circleRadius; + ofVec2f circleCenter = toOf(contourFinder.getMinEnclosingCircle(i, circleRadius)); + ofDrawCircle(circleCenter, circleRadius); + + // convex hull of the contour + ofSetColor(yellowPrint); + ofPolyline convexHull = toOf(contourFinder.getConvexHull(i)); + convexHull.draw(); + + // defects of the convex hull + vector defects = contourFinder.getConvexityDefects(i); + for(int j = 0; j < defects.size(); j++) { + ofDrawLine(defects[j][0], defects[j][1], defects[j][2], defects[j][3]); + } + + // some different styles of contour centers + ofVec2f centroid = toOf(contourFinder.getCentroid(i)); + ofVec2f average = toOf(contourFinder.getAverage(i)); + ofVec2f center = toOf(contourFinder.getCenter(i)); + ofSetColor(cyanPrint); + ofDrawCircle(centroid, 1); + ofSetColor(magentaPrint); + ofDrawCircle(average, 1); + ofSetColor(yellowPrint); + ofDrawCircle(center, 1); + + // you can also get the area and perimeter using ofPolyline: + // ofPolyline::getArea() and ofPolyline::getPerimeter() + double area = contourFinder.getContourArea(i); + double length = contourFinder.getArcLength(i); + + // balance is useful for detecting when a shape has an "arm" sticking out + // if balance.length() is small, the shape is more symmetric: like I, O, X... + // if balance.length() is large, the shape is less symmetric: like L, P, F... + ofVec2f balance = toOf(contourFinder.getBalance(i)); + ofPushMatrix(); + ofTranslate(centroid.x, centroid.y); + ofScale(5, 5); + ofDrawLine(0, 0, balance.x, balance.y); + ofPopMatrix(); + } + + ofSetColor(255); + drawHighlightString(ofToString((int) ofGetFrameRate()) + " fps", 10, 10); + drawHighlightString(ofToString((int) threshold) + " threshold", 10, 30); + drawHighlightString(trackingColorMode == TRACK_COLOR_RGB ? "RGB tracking" : "hue tracking", 10, 50); + + ofTranslate(8, 75); + ofFill(); + ofSetColor(0); + ofDrawRectangle(-3, -3, 64+6, 64+6); + ofSetColor(targetColor); + ofDrawRectangle(0, 0, 64, 64); +} + +void ofApp::mousePressed(int x, int y, int button) { + targetColor = cam.getPixels().getColor(x, y); + contourFinder.setTargetColor(targetColor, trackingColorMode); +} + +void ofApp::keyPressed(int key) { + if(key == 'h') { + trackingColorMode = TRACK_COLOR_HS; + } + if(key == 'r') { + trackingColorMode = TRACK_COLOR_RGB; + } + contourFinder.setTargetColor(targetColor, trackingColorMode); +} \ No newline at end of file diff --git a/addons/ofxCv/example-contours-advanced/src/ofApp.h b/addons/ofxCv/example-contours-advanced/src/ofApp.h new file mode 100644 index 00000000000..3f6bc85d11e --- /dev/null +++ b/addons/ofxCv/example-contours-advanced/src/ofApp.h @@ -0,0 +1,19 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void mousePressed(int x, int y, int button); + void keyPressed(int key); + + ofVideoGrabber cam; + ofxCv::ContourFinder contourFinder; + float threshold; + ofxCv::TrackingColorMode trackingColorMode; + ofColor targetColor; +}; diff --git a/addons/ofxCv/example-contours-basic/addons.make b/addons/ofxCv/example-contours-basic/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-contours-basic/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-contours-basic/src/main.cpp b/addons/ofxCv/example-contours-basic/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-contours-basic/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-contours-basic/src/ofApp.cpp b/addons/ofxCv/example-contours-basic/src/ofApp.cpp new file mode 100644 index 00000000000..d9e8932b00d --- /dev/null +++ b/addons/ofxCv/example-contours-basic/src/ofApp.cpp @@ -0,0 +1,24 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(640, 480); + contourFinder.setMinAreaRadius(10); + contourFinder.setMaxAreaRadius(200); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + contourFinder.setThreshold(ofMap(mouseX, 0, ofGetWidth(), 0, 255)); + contourFinder.findContours(cam); + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + contourFinder.draw(); +} diff --git a/addons/ofxCv/example-contours-basic/src/ofApp.h b/addons/ofxCv/example-contours-basic/src/ofApp.h new file mode 100644 index 00000000000..bfaf524a4d2 --- /dev/null +++ b/addons/ofxCv/example-contours-basic/src/ofApp.h @@ -0,0 +1,14 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofxCv::ContourFinder contourFinder; +}; diff --git a/addons/ofxCv/example-contours-color/addons.make b/addons/ofxCv/example-contours-color/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-contours-color/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-contours-color/src/main.cpp b/addons/ofxCv/example-contours-color/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-contours-color/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-contours-color/src/ofApp.cpp b/addons/ofxCv/example-contours-color/src/ofApp.cpp new file mode 100644 index 00000000000..944cca6bf64 --- /dev/null +++ b/addons/ofxCv/example-contours-color/src/ofApp.cpp @@ -0,0 +1,51 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(640, 480); + contourFinder.setMinAreaRadius(10); + contourFinder.setMaxAreaRadius(200); + trackingColorMode = TRACK_COLOR_RGB; +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + threshold = ofMap(mouseX, 0, ofGetWidth(), 0, 255); + contourFinder.setThreshold(threshold); + contourFinder.findContours(cam); + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + contourFinder.draw(); + drawHighlightString(ofToString((int) ofGetFrameRate()) + " fps", 10, 20); + drawHighlightString(ofToString((int) threshold) + " threshold", 10, 40); + drawHighlightString(trackingColorMode == TRACK_COLOR_HSV ? "HSV tracking" : "RGB tracking", 10, 60); + + ofTranslate(8, 75); + ofFill(); + ofSetColor(0); + ofDrawRectangle(-3, -3, 64+6, 64+6); + ofSetColor(targetColor); + ofDrawRectangle(0, 0, 64, 64); +} + +void ofApp::mousePressed(int x, int y, int button) { + targetColor = cam.getPixels().getColor(x, y); + contourFinder.setTargetColor(targetColor, trackingColorMode); +} + +void ofApp::keyPressed(int key) { + if(key == 'h') { + trackingColorMode = TRACK_COLOR_HSV; + } + if(key == 'r') { + trackingColorMode = TRACK_COLOR_RGB; + } + contourFinder.setTargetColor(targetColor, trackingColorMode); +} \ No newline at end of file diff --git a/addons/ofxCv/example-contours-color/src/ofApp.h b/addons/ofxCv/example-contours-color/src/ofApp.h new file mode 100644 index 00000000000..3f6bc85d11e --- /dev/null +++ b/addons/ofxCv/example-contours-color/src/ofApp.h @@ -0,0 +1,19 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void mousePressed(int x, int y, int button); + void keyPressed(int key); + + ofVideoGrabber cam; + ofxCv::ContourFinder contourFinder; + float threshold; + ofxCv::TrackingColorMode trackingColorMode; + ofColor targetColor; +}; diff --git a/addons/ofxCv/example-contours-following/addons.make b/addons/ofxCv/example-contours-following/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-contours-following/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-contours-following/bin/data/video.mov b/addons/ofxCv/example-contours-following/bin/data/video.mov new file mode 100644 index 00000000000..30c0ea6dd06 Binary files /dev/null and b/addons/ofxCv/example-contours-following/bin/data/video.mov differ diff --git a/addons/ofxCv/example-contours-following/src/main.cpp b/addons/ofxCv/example-contours-following/src/main.cpp new file mode 100644 index 00000000000..7de8ee5da41 --- /dev/null +++ b/addons/ofxCv/example-contours-following/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(320, 240, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-contours-following/src/ofApp.cpp b/addons/ofxCv/example-contours-following/src/ofApp.cpp new file mode 100644 index 00000000000..268f8e5614a --- /dev/null +++ b/addons/ofxCv/example-contours-following/src/ofApp.cpp @@ -0,0 +1,80 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +const float dyingTime = 1; + +void Glow::setup(const cv::Rect& track) { + color.setHsb(ofRandom(0, 255), 255, 255); + cur = toOf(track).getCenter(); + smooth = cur; +} + +void Glow::update(const cv::Rect& track) { + cur = toOf(track).getCenter(); + smooth.interpolate(cur, .5); + all.addVertex(smooth); +} + +void Glow::kill() { + float curTime = ofGetElapsedTimef(); + if(startedDying == 0) { + startedDying = curTime; + } else if(curTime - startedDying > dyingTime) { + dead = true; + } +} + +void Glow::draw() { + ofPushStyle(); + float size = 16; + ofSetColor(255); + if(startedDying) { + ofSetColor(ofColor::red); + size = ofMap(ofGetElapsedTimef() - startedDying, 0, dyingTime, size, 0, true); + } + ofNoFill(); + ofDrawCircle(cur, size); + ofSetColor(color); + all.draw(); + ofSetColor(255); + ofDrawBitmapString(ofToString(label), cur); + ofPopStyle(); +} + +void ofApp::setup() { + ofSetVerticalSync(true); + ofBackground(0); + + movie.load("video.mov"); + movie.play(); + + contourFinder.setMinAreaRadius(1); + contourFinder.setMaxAreaRadius(100); + contourFinder.setThreshold(15); + + // wait for half a frame before forgetting something + tracker.setPersistence(15); + // an object can move up to 50 pixels per frame + tracker.setMaximumDistance(50); +} + +void ofApp::update() { + movie.update(); + if(movie.isFrameNew()) { + blur(movie, 10); + contourFinder.findContours(movie); + tracker.track(contourFinder.getBoundingRects()); + } +} + +void ofApp::draw() { + ofSetColor(255); + movie.draw(0, 0); + contourFinder.draw(); + vector& followers = tracker.getFollowers(); + for(int i = 0; i < followers.size(); i++) { + followers[i].draw(); + } +} diff --git a/addons/ofxCv/example-contours-following/src/ofApp.h b/addons/ofxCv/example-contours-following/src/ofApp.h new file mode 100644 index 00000000000..673d5c0eee3 --- /dev/null +++ b/addons/ofxCv/example-contours-following/src/ofApp.h @@ -0,0 +1,31 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class Glow : public ofxCv::RectFollower { +protected: + ofColor color; + ofVec2f cur, smooth; + float startedDying; + ofPolyline all; +public: + Glow() + :startedDying(0) { + } + void setup(const cv::Rect& track); + void update(const cv::Rect& track); + void kill(); + void draw(); +}; + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoPlayer movie; + ofxCv::ContourFinder contourFinder; + ofxCv::RectTrackerFollower tracker; +}; diff --git a/addons/ofxCv/example-contours-quad/addons.make b/addons/ofxCv/example-contours-quad/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-contours-quad/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-contours-quad/src/main.cpp b/addons/ofxCv/example-contours-quad/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-contours-quad/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-contours-quad/src/ofApp.cpp b/addons/ofxCv/example-contours-quad/src/ofApp.cpp new file mode 100644 index 00000000000..4efb7176a81 --- /dev/null +++ b/addons/ofxCv/example-contours-quad/src/ofApp.cpp @@ -0,0 +1,69 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(640, 480); + contourFinder.setMinAreaRadius(10); + contourFinder.setMaxAreaRadius(200); + unwarped.allocate(150, 100, OF_IMAGE_COLOR); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + threshold = ofMap(mouseX, 0, ofGetWidth(), 0, 255); + contourFinder.setThreshold(threshold); + contourFinder.findContours(cam); + + int n = contourFinder.size(); + quads.clear(); + quads.resize(n); + for(int i = 0; i < n; i++) { + quads[i] = contourFinder.getFitQuad(i); + + // convert integer image coordinates Point2i to unwarp positions Point2f + vector warpPoints; + copy(quads[i].begin(), quads[i].end(), back_inserter(warpPoints)); + unwarpPerspective(cam, unwarped, warpPoints); + unwarped.update(); + } + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + + ofSetLineWidth(2); + contourFinder.draw(); + + ofNoFill(); + ofSetColor(magentaPrint); + for(int i = 0; i < quads.size(); i++) { + toOf(quads[i]).draw(); + } + + ofSetColor(255); + drawHighlightString(ofToString((int) ofGetFrameRate()) + " fps", 10, 20); + drawHighlightString(ofToString((int) threshold) + " threshold", 10, 40); + + ofTranslate(8, 75); + ofFill(); + ofSetColor(0); + ofDrawRectangle(-3, -3, 64+6, 64+6); + ofSetColor(targetColor); + ofDrawRectangle(0, 0, 64, 64); + + ofSetColor(255); + unwarped.draw(0, 70); +} + +void ofApp::mousePressed(int x, int y, int button) { + targetColor = cam.getPixels().getColor(x, y); + contourFinder.setTargetColor(targetColor, TRACK_COLOR_HSV); +} + +void ofApp::keyPressed(int key) { +} \ No newline at end of file diff --git a/addons/ofxCv/example-contours-quad/src/ofApp.h b/addons/ofxCv/example-contours-quad/src/ofApp.h new file mode 100644 index 00000000000..7e7d4f68da6 --- /dev/null +++ b/addons/ofxCv/example-contours-quad/src/ofApp.h @@ -0,0 +1,20 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void mousePressed(int x, int y, int button); + void keyPressed(int key); + + ofVideoGrabber cam; + ofxCv::ContourFinder contourFinder; + vector< vector > quads; + float threshold; + ofColor targetColor; + ofImage unwarped; +}; diff --git a/addons/ofxCv/example-contours-tracking/addons.make b/addons/ofxCv/example-contours-tracking/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-contours-tracking/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-contours-tracking/bin/data/video.mov b/addons/ofxCv/example-contours-tracking/bin/data/video.mov new file mode 100644 index 00000000000..30c0ea6dd06 Binary files /dev/null and b/addons/ofxCv/example-contours-tracking/bin/data/video.mov differ diff --git a/addons/ofxCv/example-contours-tracking/src/main.cpp b/addons/ofxCv/example-contours-tracking/src/main.cpp new file mode 100644 index 00000000000..7de8ee5da41 --- /dev/null +++ b/addons/ofxCv/example-contours-tracking/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(320, 240, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-contours-tracking/src/ofApp.cpp b/addons/ofxCv/example-contours-tracking/src/ofApp.cpp new file mode 100644 index 00000000000..cab26d6350c --- /dev/null +++ b/addons/ofxCv/example-contours-tracking/src/ofApp.cpp @@ -0,0 +1,102 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofBackground(0); + + movie.load("video.mov"); + movie.play(); + + contourFinder.setMinAreaRadius(1); + contourFinder.setMaxAreaRadius(100); + contourFinder.setThreshold(15); + // wait for half a frame before forgetting something + contourFinder.getTracker().setPersistence(15); + // an object can move up to 32 pixels per frame + contourFinder.getTracker().setMaximumDistance(32); + + showLabels = true; +} + +void ofApp::update() { + movie.update(); + if(movie.isFrameNew()) { + blur(movie, 10); + contourFinder.findContours(movie); + } +} + +void ofApp::draw() { + ofSetBackgroundAuto(showLabels); + RectTracker& tracker = contourFinder.getTracker(); + + if(showLabels) { + ofSetColor(255); + movie.draw(0, 0); + contourFinder.draw(); + for(int i = 0; i < contourFinder.size(); i++) { + ofPoint center = toOf(contourFinder.getCenter(i)); + ofPushMatrix(); + ofTranslate(center.x, center.y); + int label = contourFinder.getLabel(i); + string msg = ofToString(label) + ":" + ofToString(tracker.getAge(label)); + ofDrawBitmapString(msg, 0, 0); + ofVec2f velocity = toOf(contourFinder.getVelocity(i)); + ofScale(5, 5); + ofDrawLine(0, 0, velocity.x, velocity.y); + ofPopMatrix(); + } + } else { + for(int i = 0; i < contourFinder.size(); i++) { + unsigned int label = contourFinder.getLabel(i); + // only draw a line if this is not a new label + if(tracker.existsPrevious(label)) { + // use the label to pick a random color + ofSeedRandom(label << 24); + ofSetColor(ofColor::fromHsb(ofRandom(255), 255, 255)); + // get the tracked object (cv::Rect) at current and previous position + const cv::Rect& previous = tracker.getPrevious(label); + const cv::Rect& current = tracker.getCurrent(label); + // get the centers of the rectangles + ofVec2f previousPosition(previous.x + previous.width / 2, previous.y + previous.height / 2); + ofVec2f currentPosition(current.x + current.width / 2, current.y + current.height / 2); + ofDrawLine(previousPosition, currentPosition); + } + } + } + + // this chunk of code visualizes the creation and destruction of labels + const vector& currentLabels = tracker.getCurrentLabels(); + const vector& previousLabels = tracker.getPreviousLabels(); + const vector& newLabels = tracker.getNewLabels(); + const vector& deadLabels = tracker.getDeadLabels(); + ofSetColor(cyanPrint); + for(int i = 0; i < currentLabels.size(); i++) { + int j = currentLabels[i]; + ofDrawLine(j, 0, j, 4); + } + ofSetColor(magentaPrint); + for(int i = 0; i < previousLabels.size(); i++) { + int j = previousLabels[i]; + ofDrawLine(j, 4, j, 8); + } + ofSetColor(yellowPrint); + for(int i = 0; i < newLabels.size(); i++) { + int j = newLabels[i]; + ofDrawLine(j, 8, j, 12); + } + ofSetColor(ofColor::white); + for(int i = 0; i < deadLabels.size(); i++) { + int j = deadLabels[i]; + ofDrawLine(j, 12, j, 16); + } +} + +void ofApp::keyPressed(int key) { + if(key == ' ') { + showLabels = !showLabels; + } +} \ No newline at end of file diff --git a/addons/ofxCv/example-contours-tracking/src/ofApp.h b/addons/ofxCv/example-contours-tracking/src/ofApp.h new file mode 100644 index 00000000000..4f36603243f --- /dev/null +++ b/addons/ofxCv/example-contours-tracking/src/ofApp.h @@ -0,0 +1,17 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + void keyPressed(int key); + + float threshold; + ofVideoPlayer movie; + ofxCv::ContourFinder contourFinder; + bool showLabels; +}; diff --git a/addons/ofxCv/example-difference-columns/addons.make b/addons/ofxCv/example-difference-columns/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-difference-columns/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-difference-columns/src/main.cpp b/addons/ofxCv/example-difference-columns/src/main.cpp new file mode 100644 index 00000000000..f3490db9325 --- /dev/null +++ b/addons/ofxCv/example-difference-columns/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(320 * 2, 240, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-difference-columns/src/ofApp.cpp b/addons/ofxCv/example-difference-columns/src/ofApp.cpp new file mode 100644 index 00000000000..7bfed6c84fb --- /dev/null +++ b/addons/ofxCv/example-difference-columns/src/ofApp.cpp @@ -0,0 +1,55 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + cam.setup(320, 240); + + // imitate() will set up previous and diff + // so they have the same size and type as cam + imitate(previous, cam); + imitate(diff, cam); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + // take the absolute difference of prev and cam and save it inside diff + absdiff(previous, cam, diff); + diff.update(); + + // like ofSetPixels, but more concise and cross-toolkit + copy(cam, previous); + + // this is the key line: get the average of each column + columnMean = meanCols(diff); + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + + ofTranslate(320, 0); + diff.draw(0, 0); + // draw the mean for each channel + for(int k = 0; k < 3; k++) { + // use the correct color for each channel + switch(k) { + case 0: ofSetColor(ofColor::red); break; + case 1: ofSetColor(ofColor::blue); break; + case 2: ofSetColor(ofColor::green); break; + } + + ofNoFill(); + ofBeginShape(); + for(int i = 0; i < columnMean.rows; i++) { + // Vec3b is one way of storing 24-bit (3 byte) colors + Vec3b cur = columnMean.at(i); + ofVertex(i, cur[k]); + } + ofEndShape(); + } +} diff --git a/addons/ofxCv/example-difference-columns/src/ofApp.h b/addons/ofxCv/example-difference-columns/src/ofApp.h new file mode 100644 index 00000000000..19519bfde50 --- /dev/null +++ b/addons/ofxCv/example-difference-columns/src/ofApp.h @@ -0,0 +1,16 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofPixels previous; + ofImage diff; + cv::Mat columnMean; +}; diff --git a/addons/ofxCv/example-difference/addons.make b/addons/ofxCv/example-difference/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-difference/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-difference/src/main.cpp b/addons/ofxCv/example-difference/src/main.cpp new file mode 100644 index 00000000000..f3490db9325 --- /dev/null +++ b/addons/ofxCv/example-difference/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(320 * 2, 240, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-difference/src/ofApp.cpp b/addons/ofxCv/example-difference/src/ofApp.cpp new file mode 100644 index 00000000000..618cee2e3a2 --- /dev/null +++ b/addons/ofxCv/example-difference/src/ofApp.cpp @@ -0,0 +1,51 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + cam.setup(320, 240); + + // imitate() will set up previous and diff + // so they have the same size and type as cam + imitate(previous, cam); + imitate(diff, cam); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + // take the absolute difference of prev and cam and save it inside diff + absdiff(previous, cam, diff); + diff.update(); + + // like ofSetPixels, but more concise and cross-toolkit + copy(cam, previous); + + // mean() returns a Scalar. it's a cv:: function so we have to pass a Mat + diffMean = mean(toCv(diff)); + + // you can only do math between Scalars, + // but it's easy to make a Scalar from an int (shown here) + diffMean *= Scalar(50); + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + diff.draw(320, 0); + + // use the [] operator to get elements from a Scalar + float diffRed = diffMean[0]; + float diffGreen = diffMean[1]; + float diffBlue = diffMean[2]; + + ofSetColor(255, 0, 0); + ofDrawRectangle(0, 0, diffRed, 10); + ofSetColor(0, 255, 0); + ofDrawRectangle(0, 15, diffGreen, 10); + ofSetColor(0, 0, 255); + ofDrawRectangle(0, 30, diffBlue, 10); +} diff --git a/addons/ofxCv/example-difference/src/ofApp.h b/addons/ofxCv/example-difference/src/ofApp.h new file mode 100644 index 00000000000..b4436871040 --- /dev/null +++ b/addons/ofxCv/example-difference/src/ofApp.h @@ -0,0 +1,18 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofPixels previous; + ofImage diff; + + // a scalar is like an ofVec4f but normally used for storing color information + cv::Scalar diffMean; +}; diff --git a/addons/ofxCv/example-edge/addons.make b/addons/ofxCv/example-edge/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-edge/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-edge/src/main.cpp b/addons/ofxCv/example-edge/src/main.cpp new file mode 100644 index 00000000000..76206abf7b8 --- /dev/null +++ b/addons/ofxCv/example-edge/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480 * 2, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-edge/src/ofApp.cpp b/addons/ofxCv/example-edge/src/ofApp.cpp new file mode 100644 index 00000000000..3b6b4c47009 --- /dev/null +++ b/addons/ofxCv/example-edge/src/ofApp.cpp @@ -0,0 +1,27 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(640, 480); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + convertColor(cam, gray, CV_RGB2GRAY); + Canny(gray, edge, mouseX, mouseY, 3); + Sobel(gray, sobel); + gray.update(); + sobel.update(); + edge.update(); + } +} + +void ofApp::draw() { + cam.draw(0, 0); + gray.draw(0,480); + edge.draw(640, 0); + sobel.draw(640, 480); +} diff --git a/addons/ofxCv/example-edge/src/ofApp.h b/addons/ofxCv/example-edge/src/ofApp.h new file mode 100644 index 00000000000..a6c6f8a0a29 --- /dev/null +++ b/addons/ofxCv/example-edge/src/ofApp.h @@ -0,0 +1,14 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofImage gray, edge, sobel; +}; diff --git a/addons/ofxCv/example-empty/addons.make b/addons/ofxCv/example-empty/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-empty/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-empty/src/main.cpp b/addons/ofxCv/example-empty/src/main.cpp new file mode 100644 index 00000000000..824b0386bc3 --- /dev/null +++ b/addons/ofxCv/example-empty/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(1025, 512, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-empty/src/ofApp.cpp b/addons/ofxCv/example-empty/src/ofApp.cpp new file mode 100644 index 00000000000..75d1dff7fc6 --- /dev/null +++ b/addons/ofxCv/example-empty/src/ofApp.cpp @@ -0,0 +1,14 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { +} + +void ofApp::update() { +} + +void ofApp::draw() { + +} diff --git a/addons/ofxCv/example-empty/src/ofApp.h b/addons/ofxCv/example-empty/src/ofApp.h new file mode 100644 index 00000000000..b025165ab16 --- /dev/null +++ b/addons/ofxCv/example-empty/src/ofApp.h @@ -0,0 +1,11 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); +}; diff --git a/addons/ofxCv/example-estimate-affine/addons.make b/addons/ofxCv/example-estimate-affine/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-estimate-affine/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-estimate-affine/src/main.cpp b/addons/ofxCv/example-estimate-affine/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-estimate-affine/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-estimate-affine/src/ofApp.cpp b/addons/ofxCv/example-estimate-affine/src/ofApp.cpp new file mode 100644 index 00000000000..6034ca9b387 --- /dev/null +++ b/addons/ofxCv/example-estimate-affine/src/ofApp.cpp @@ -0,0 +1,55 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + + ofMatrix4x4 rigid; + ofVec3f translation(ofRandomf(), ofRandomf(), ofRandomf()); + ofQuaternion rotation(ofRandomf(), ofVec3f(ofRandomf(), ofRandomf(), ofRandomf())); + rigid.translate(translation); + rigid.rotate(rotation); + + vector from; + for(int i = 0; i < 4; i++) { + from.push_back(ofVec3f(ofRandom(1,2), ofRandom(1,2), ofRandom(5,6))); + from.push_back(ofVec3f(ofRandom(3,4), ofRandom(3,4), ofRandom(5,6))); + from.push_back(ofVec3f(ofRandom(1,2), ofRandom(3,4), ofRandom(5,6))); + from.push_back(ofVec3f(ofRandom(3,4), ofRandom(1,2), ofRandom(5,6))); + } + + vector to; + for(int i = 0; i < from.size(); i++) { + // opencv assumes you're doing premultiplication + to.push_back(rigid.preMult(from[i])); + } + + ofMatrix4x4 rigidEstimate = estimateAffine3D(from, to); + + cout << "original matrix: " << endl << rigid << endl; + cout << "estimated as: " << endl << rigidEstimate << endl; + + for(int i = 0; i < from.size(); i++) { + // opencv assumes you're doing premultiplication + ofVec3f after = rigidEstimate.preMult(from[i]); + cout << from[i] << " -> " << to[i] << " estimated as: " << after << endl; + } + + ofVec3f decompTranslation, decompScale; + ofQuaternion decompRotation, decompSo; + rigidEstimate.decompose(decompTranslation, decompRotation, decompScale, decompSo); + cout << "translation: " << translation << endl; + cout << "estimated as: " << decompTranslation << endl; + cout << "rotation: " << endl << rotation << endl; + cout << "estimated as: " << endl << decompRotation << endl; +} + +void ofApp::update() { +} + +void ofApp::draw() { + ofBackground(0); + ofDrawBitmapString("See console window for results.", 10, 20); +} diff --git a/addons/ofxCv/example-estimate-affine/src/ofApp.h b/addons/ofxCv/example-estimate-affine/src/ofApp.h new file mode 100644 index 00000000000..b025165ab16 --- /dev/null +++ b/addons/ofxCv/example-estimate-affine/src/ofApp.h @@ -0,0 +1,11 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); +}; diff --git a/addons/ofxCv/example-face-follow/addons.make b/addons/ofxCv/example-face-follow/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-face-follow/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-face-follow/bin/data/readme.txt b/addons/ofxCv/example-face-follow/bin/data/readme.txt new file mode 100644 index 00000000000..680ab9396f1 --- /dev/null +++ b/addons/ofxCv/example-face-follow/bin/data/readme.txt @@ -0,0 +1 @@ +You need to add a copy of haarcascade_frontalface_default.xml to this directory. \ No newline at end of file diff --git a/addons/ofxCv/example-face-follow/bin/data/sunglasses.png b/addons/ofxCv/example-face-follow/bin/data/sunglasses.png new file mode 100644 index 00000000000..7b1384ced3c Binary files /dev/null and b/addons/ofxCv/example-face-follow/bin/data/sunglasses.png differ diff --git a/addons/ofxCv/example-face-follow/src/main.cpp b/addons/ofxCv/example-face-follow/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-face-follow/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-face-follow/src/ofApp.cpp b/addons/ofxCv/example-face-follow/src/ofApp.cpp new file mode 100644 index 00000000000..0059ff651ef --- /dev/null +++ b/addons/ofxCv/example-face-follow/src/ofApp.cpp @@ -0,0 +1,42 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofSetFrameRate(120); + finder.setup("haarcascade_frontalface_default.xml"); + finder.setPreset(ObjectFinder::Fast); + finder.getTracker().setSmoothingRate(.3); + cam.setup(640, 480); + sunglasses.load("sunglasses.png"); + ofEnableAlphaBlending(); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + finder.update(cam); + } +} + +void ofApp::draw() { + cam.draw(0, 0); + + for(int i = 0; i < finder.size(); i++) { + ofRectangle object = finder.getObjectSmoothed(i); + sunglasses.setAnchorPercent(.5, .5); + float scaleAmount = .85 * object.width / sunglasses.getWidth(); + ofPushMatrix(); + ofTranslate(object.x + object.width / 2., object.y + object.height * .42); + ofScale(scaleAmount, scaleAmount); + sunglasses.draw(0, 0); + ofPopMatrix(); + ofPushMatrix(); + ofTranslate(object.getPosition()); + ofDrawBitmapStringHighlight(ofToString(finder.getLabel(i)), 0, 0); + ofDrawLine(ofVec2f(), toOf(finder.getVelocity(i)) * 10); + ofPopMatrix(); + } +} diff --git a/addons/ofxCv/example-face-follow/src/ofApp.h b/addons/ofxCv/example-face-follow/src/ofApp.h new file mode 100644 index 00000000000..e106c0cf634 --- /dev/null +++ b/addons/ofxCv/example-face-follow/src/ofApp.h @@ -0,0 +1,15 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofxCv::ObjectFinder finder; + ofImage sunglasses; +}; diff --git a/addons/ofxCv/example-face-zoom/addons.make b/addons/ofxCv/example-face-zoom/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-face-zoom/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-face-zoom/bin/data/readme.txt b/addons/ofxCv/example-face-zoom/bin/data/readme.txt new file mode 100644 index 00000000000..680ab9396f1 --- /dev/null +++ b/addons/ofxCv/example-face-zoom/bin/data/readme.txt @@ -0,0 +1 @@ +You need to add a copy of haarcascade_frontalface_default.xml to this directory. \ No newline at end of file diff --git a/addons/ofxCv/example-face-zoom/src/main.cpp b/addons/ofxCv/example-face-zoom/src/main.cpp new file mode 100644 index 00000000000..1c4aae5c183 --- /dev/null +++ b/addons/ofxCv/example-face-zoom/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(512, 512, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-face-zoom/src/ofApp.cpp b/addons/ofxCv/example-face-zoom/src/ofApp.cpp new file mode 100644 index 00000000000..fcf4b2b75c2 --- /dev/null +++ b/addons/ofxCv/example-face-zoom/src/ofApp.cpp @@ -0,0 +1,32 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofSetFrameRate(120); + + objectFinder.setup(ofToDataPath("haarcascade_frontalface_default.xml")); + objectFinder.setPreset(ObjectFinder::Fast); + cam.setup(640, 480); + cropped.allocate(ofGetWidth(), ofGetHeight(), OF_IMAGE_COLOR); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + objectFinder.update(cam); + if(objectFinder.size() > 0) { + cv::Rect roi = toCv(objectFinder.getObject(0)); + Mat camMat = toCv(cam); + Mat croppedCamMat(camMat, roi); + resize(croppedCamMat, cropped); + cropped.update(); + } + } +} + +void ofApp::draw() { + cropped.draw(0, 0); +} diff --git a/addons/ofxCv/example-face-zoom/src/ofApp.h b/addons/ofxCv/example-face-zoom/src/ofApp.h new file mode 100644 index 00000000000..1ac0f663325 --- /dev/null +++ b/addons/ofxCv/example-face-zoom/src/ofApp.h @@ -0,0 +1,15 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofImage cropped; + ofxCv::ObjectFinder objectFinder; +}; diff --git a/addons/ofxCv/example-face/addons.make b/addons/ofxCv/example-face/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-face/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-face/bin/data/readme.txt b/addons/ofxCv/example-face/bin/data/readme.txt new file mode 100644 index 00000000000..680ab9396f1 --- /dev/null +++ b/addons/ofxCv/example-face/bin/data/readme.txt @@ -0,0 +1 @@ +You need to add a copy of haarcascade_frontalface_default.xml to this directory. \ No newline at end of file diff --git a/addons/ofxCv/example-face/src/main.cpp b/addons/ofxCv/example-face/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-face/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-face/src/ofApp.cpp b/addons/ofxCv/example-face/src/ofApp.cpp new file mode 100644 index 00000000000..fa5429c0a20 --- /dev/null +++ b/addons/ofxCv/example-face/src/ofApp.cpp @@ -0,0 +1,25 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofSetFrameRate(120); + finder.setup("haarcascade_frontalface_default.xml"); + finder.setPreset(ObjectFinder::Fast); + cam.setup(640, 480); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + finder.update(cam); + } +} + +void ofApp::draw() { + cam.draw(0, 0); + finder.draw(); + ofDrawBitmapStringHighlight(ofToString(finder.size()), 10, 20); +} diff --git a/addons/ofxCv/example-face/src/ofApp.h b/addons/ofxCv/example-face/src/ofApp.h new file mode 100644 index 00000000000..52625f82159 --- /dev/null +++ b/addons/ofxCv/example-face/src/ofApp.h @@ -0,0 +1,14 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofxCv::ObjectFinder finder; +}; diff --git a/addons/ofxCv/example-flow-distort-shader/addons.make b/addons/ofxCv/example-flow-distort-shader/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort-shader/bin/data/MotionAmplifier.frag b/addons/ofxCv/example-flow-distort-shader/bin/data/MotionAmplifier.frag new file mode 100644 index 00000000000..7999b23a64a --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/bin/data/MotionAmplifier.frag @@ -0,0 +1,8 @@ +#version 120 + +uniform sampler2DRect source; +varying vec2 texCoord; + +void main() { + gl_FragColor = texture2DRect(source, texCoord); +} \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort-shader/bin/data/MotionAmplifier.vert b/addons/ofxCv/example-flow-distort-shader/bin/data/MotionAmplifier.vert new file mode 100644 index 00000000000..1a41b62d69c --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/bin/data/MotionAmplifier.vert @@ -0,0 +1,21 @@ +#version 120 + +uniform sampler2DRect source; +uniform sampler2DRect flow; +uniform float scaleFactor; +uniform float strength; +uniform float sourceRescale; +uniform float flowRescale; +varying vec2 texCoord; + +void main() { + vec2 baseCoord = gl_Vertex.xy; + texCoord = baseCoord * sourceRescale; + vec2 offset = texture2DRect(flow, baseCoord * flowRescale).xy; + vec4 position = gl_Vertex; + offset = (offset - vec2(.5, .5)) / scaleFactor; + position.x += strength * offset.x; + position.y += strength * offset.y; + position.z += length(offset); + gl_Position = gl_ModelViewProjectionMatrix * position; +} \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort-shader/src/MotionAmplifier.h b/addons/ofxCv/example-flow-distort-shader/src/MotionAmplifier.h new file mode 100644 index 00000000000..407abd0ac24 --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/src/MotionAmplifier.h @@ -0,0 +1,128 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class MotionAmplifier { +private: + cv::Mat rescaled, flow3; + ofxCv::FlowFarneback flow; + ofShader shader; + float scaleFactor; + ofTexture flowTexture; + ofVboMesh mesh; + float rescale; + + int stepSize, xSteps, ySteps; + cv::Mat accumulator; + bool needToReset; + + float strength, learningRate; + int blurAmount, windowSize; + + void duplicateFirstChannel(cv::Mat& twoChannel, cv::Mat& threeChannel) { + vector each; + cv::split(twoChannel, each); + each.push_back(each[0]); + cv::merge(each, threeChannel); + } + +public: + void setup(int w, int h, int stepSize, float rescale = 1) { + this->rescale = rescale; + shader.load("MotionAmplifier"); + scaleFactor = 1. / 10; // could dynamically calculate this from flow3 + needToReset = false; + + mesh.setMode(OF_PRIMITIVE_TRIANGLES); + this->stepSize = stepSize; + xSteps = 1+((rescale * w) / stepSize); + ySteps = 1+((rescale * h) / stepSize); + for(int y = 0; y < ySteps; y++) { + for(int x = 0; x < xSteps; x++) { + mesh.addVertex(ofVec2f(x, y) * stepSize / rescale); + } + } + for(int y = 0; y + 1 < ySteps; y++) { + for(int x = 0; x + 1 < xSteps; x++) { + int nw = y * xSteps + x; + int ne = nw + 1; + int sw = nw + xSteps; + int se = sw + 1; + mesh.addIndex(nw); + mesh.addIndex(ne); + mesh.addIndex(se); + mesh.addIndex(nw); + mesh.addIndex(se); + mesh.addIndex(sw); + } + } + } + + template + void update(T& img) { + ofxCv::resize(img, rescaled, rescale, rescale); + flow.calcOpticalFlow(rescaled); + duplicateFirstChannel(flow.getFlow(), flow3); + flow3 *= scaleFactor; + flow3 += cv::Scalar_(.5, .5, 0); + ofxCv::blur(flow3, blurAmount); + int w = flow3.cols, h = flow3.rows; + if(needToReset || accumulator.size() != flow3.size()) { + needToReset = false; + ofxCv::copy(flow3, accumulator); + } + cv::accumulateWeighted(flow3, accumulator, learningRate); + // zero the edges + cv::rectangle(accumulator, cv::Point(0, 0), cv::Point(w-1, h-1), cv::Scalar(.5, .5, 0)); + flowTexture.loadData((float*) accumulator.ptr(), w, h, GL_RGB); + } + + void draw(ofBaseHasTexture& tex) { + if(flowTexture.isAllocated()) { + shader.begin(); + shader.setUniformTexture("source", tex, 1); + shader.setUniformTexture("flow", flowTexture, 2); + shader.setUniform1f("strength", strength); + shader.setUniform1f("scaleFactor", scaleFactor); + shader.setUniform1f("flowRescale", rescale); + shader.setUniform1f("sourceRescale", 1); + mesh.drawFaces(); + shader.end(); + } + } + + void drawMesh() { + if(flowTexture.isAllocated()) { + shader.begin(); + shader.setUniformTexture("source", flowTexture, 1); + shader.setUniformTexture("flow", flowTexture, 2); + shader.setUniform1f("strength", strength); + shader.setUniform1f("scaleFactor", scaleFactor); + shader.setUniform1f("flowRescale", rescale); + shader.setUniform1f("sourceRescale", rescale); + mesh.drawWireframe(); + shader.end(); + } + } + + ofTexture& getFlowTexture() { + return flowTexture; + } + + void setStrength(float strength) { + this->strength = strength; + } + + void setLearningRate(float learningRate) { + this->learningRate = learningRate; + } + + void setBlurAmount(int blurAmount) { + this->blurAmount = blurAmount; + } + + void setWindowSize(int windowSize) { + flow.setWindowSize(windowSize); + } +}; \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort-shader/src/main.cpp b/addons/ofxCv/example-flow-distort-shader/src/main.cpp new file mode 100644 index 00000000000..389bd6f034b --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(1280, 720, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-flow-distort-shader/src/ofApp.cpp b/addons/ofxCv/example-flow-distort-shader/src/ofApp.cpp new file mode 100644 index 00000000000..bd6860c1f0c --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/src/ofApp.cpp @@ -0,0 +1,30 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + cam.setup(1280, 720); + motionAmplifier.setup(cam.getWidth(), cam.getHeight(), 2, .25); +} + +void ofApp::update() { + motionAmplifier.setStrength(ofMap(mouseX, 0, ofGetWidth(), -10, 10)); + motionAmplifier.setLearningRate(ofMap(mouseY, 0, ofGetHeight(), 0, 1, true)); + motionAmplifier.setBlurAmount(0); + motionAmplifier.setWindowSize(8); + + cam.update(); + if(cam.isFrameNew()) { + motionAmplifier.update(cam); + } +} + +void ofApp::draw() { + ofBackground(0); + ofSetupScreenOrtho(ofGetWidth(), ofGetHeight(), -100, +100); + ofEnableDepthTest(); + motionAmplifier.draw(cam); +// motionAmplifier.drawMesh(); + ofDisableDepthTest(); +} \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort-shader/src/ofApp.h b/addons/ofxCv/example-flow-distort-shader/src/ofApp.h new file mode 100644 index 00000000000..5420051834e --- /dev/null +++ b/addons/ofxCv/example-flow-distort-shader/src/ofApp.h @@ -0,0 +1,15 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" +#include "MotionAmplifier.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + MotionAmplifier motionAmplifier; +}; \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort/addons.make b/addons/ofxCv/example-flow-distort/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-flow-distort/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort/src/main.cpp b/addons/ofxCv/example-flow-distort/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-flow-distort/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-flow-distort/src/ofApp.cpp b/addons/ofxCv/example-flow-distort/src/ofApp.cpp new file mode 100644 index 00000000000..0c119f2c8eb --- /dev/null +++ b/addons/ofxCv/example-flow-distort/src/ofApp.cpp @@ -0,0 +1,66 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofSetFrameRate(120); + cam.setup(320, 240); + + mesh.setMode(OF_PRIMITIVE_TRIANGLES); + stepSize = 8; + ySteps = cam.getHeight() / stepSize; + xSteps = cam.getWidth() / stepSize; + for(int y = 0; y < ySteps; y++) { + for(int x = 0; x < xSteps; x++) { + mesh.addVertex(ofVec2f(x * stepSize, y * stepSize)); + mesh.addTexCoord(ofVec2f(x * stepSize, y * stepSize)); + } + } + for(int y = 0; y + 1 < ySteps; y++) { + for(int x = 0; x + 1 < xSteps; x++) { + int nw = y * xSteps + x; + int ne = nw + 1; + int sw = nw + xSteps; + int se = sw + 1; + mesh.addIndex(nw); + mesh.addIndex(ne); + mesh.addIndex(se); + mesh.addIndex(nw); + mesh.addIndex(se); + mesh.addIndex(sw); + } + } +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + flow.setWindowSize(8); + flow.calcOpticalFlow(cam); + int i = 0; + float distortionStrength = 4; + for(int y = 1; y + 1 < ySteps; y++) { + for(int x = 1; x + 1 < xSteps; x++) { + int i = y * xSteps + x; + ofVec2f position(x * stepSize, y * stepSize); + ofRectangle area(position - ofVec2f(stepSize, stepSize) / 2, stepSize, stepSize); + ofVec2f offset = flow.getAverageFlowInRegion(area); + mesh.setVertex(i, position + distortionStrength * offset); + i++; + } + } + } +} + +void ofApp::draw() { + ofBackground(0); + ofScale(2, 2); + cam.getTextureReference().bind(); + mesh.draw(); + cam.getTextureReference().unbind(); + if(ofGetMousePressed()) { + mesh.drawWireframe(); + } +} \ No newline at end of file diff --git a/addons/ofxCv/example-flow-distort/src/ofApp.h b/addons/ofxCv/example-flow-distort/src/ofApp.h new file mode 100644 index 00000000000..2963bf25080 --- /dev/null +++ b/addons/ofxCv/example-flow-distort/src/ofApp.h @@ -0,0 +1,16 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofxCv::FlowFarneback flow; + ofMesh mesh; + int stepSize, xSteps, ySteps; +}; \ No newline at end of file diff --git a/addons/ofxCv/example-flow-keypoints/addons.make b/addons/ofxCv/example-flow-keypoints/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-flow-keypoints/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-flow-keypoints/src/main.cpp b/addons/ofxCv/example-flow-keypoints/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-flow-keypoints/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-flow-keypoints/src/ofApp.cpp b/addons/ofxCv/example-flow-keypoints/src/ofApp.cpp new file mode 100644 index 00000000000..9c04a008923 --- /dev/null +++ b/addons/ofxCv/example-flow-keypoints/src/ofApp.cpp @@ -0,0 +1,54 @@ +#include "ofApp.h" + +using namespace cv; +using namespace ofxCv; + +void ofApp::setup(){ + grabber.setup(640,480); +} + +void ofApp::update(){ + grabber.update(); + if(grabber.isFrameNew()){ + flow.calcOpticalFlow(grabber); + } +} + +void ofApp::draw(){ + grabber.draw(0,0); + flow.draw(); + if(ofGetMousePressed()){ + ofNoFill(); + ofDrawRectangle(rect); + } +} + +void ofApp::mouseDragged(int x, int y, int button){ + ofVec2f p2(x,y); + rect.set(p1,p2.x-p1.x,p2.y-p1.y); +} + +void ofApp::mousePressed(int x, int y, int button){ + p1.set(x,y); +} + +void ofApp::mouseReleased(int x, int y, int button){ + ofVec2f p2(x,y); + rect.set(p1,p2.x-p1.x,p2.y-p1.y); + vector keypoints; + vector keypointsInside; + vector featuresToTrack; + copyGray(grabber, grabberGray); + FAST(grabberGray,keypoints,2); + for(int i=0;i=2 && (CV_MINOR_VERSION>4 || (CV_MINOR_VERSION==4 && CV_SUBMINOR_VERSION>=1)) + KeyPointsFilter::retainBest(keypointsInside,30); + #endif + KeyPoint::convert(keypointsInside,featuresToTrack); + flow.setFeaturesToTrack(featuresToTrack); +} + diff --git a/addons/ofxCv/example-flow-keypoints/src/ofApp.h b/addons/ofxCv/example-flow-keypoints/src/ofApp.h new file mode 100644 index 00000000000..23309879e6c --- /dev/null +++ b/addons/ofxCv/example-flow-keypoints/src/ofApp.h @@ -0,0 +1,21 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp{ +public: + void setup(); + void update(); + void draw(); + + void mouseDragged(int x, int y, int button); + void mousePressed(int x, int y, int button); + void mouseReleased(int x, int y, int button); + + ofVideoGrabber grabber; + cv::Mat grabberGray; + ofxCv::FlowPyrLK flow; + ofVec2f p1; + ofRectangle rect; +}; diff --git a/addons/ofxCv/example-flow/addons.make b/addons/ofxCv/example-flow/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-flow/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-flow/src/main.cpp b/addons/ofxCv/example-flow/src/main.cpp new file mode 100644 index 00000000000..52bbc374705 --- /dev/null +++ b/addons/ofxCv/example-flow/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main(){ + ofSetupOpenGL(1024, 768, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-flow/src/ofApp.cpp b/addons/ofxCv/example-flow/src/ofApp.cpp new file mode 100644 index 00000000000..b5af9b19643 --- /dev/null +++ b/addons/ofxCv/example-flow/src/ofApp.cpp @@ -0,0 +1,66 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofBackground(0); + + camera.setup(320, 240); + + gui.setup(); + + gui.add(lkMaxLevel.set("lkMaxLevel", 3, 0, 8)); + gui.add(lkMaxFeatures.set("lkMaxFeatures", 200, 1, 1000)); + gui.add(lkQualityLevel.set("lkQualityLevel", 0.01, 0.001, .02)); + gui.add(lkMinDistance.set("lkMinDistance", 4, 1, 16)); + gui.add(lkWinSize.set("lkWinSize", 8, 4, 64)); + gui.add(usefb.set("Use Farneback", true)); + gui.add(fbPyrScale.set("fbPyrScale", .5, 0, .99)); + gui.add(fbLevels.set("fbLevels", 4, 1, 8)); + gui.add(fbIterations.set("fbIterations", 2, 1, 8)); + gui.add(fbPolyN.set("fbPolyN", 7, 5, 10)); + gui.add(fbPolySigma.set("fbPolySigma", 1.5, 1.1, 2)); + gui.add(fbUseGaussian.set("fbUseGaussian", false)); + gui.add(fbWinSize.set("winSize", 32, 4, 64)); + + curFlow = &fb; +} + +void ofApp::update(){ + camera.update(); + + if(camera.isFrameNew()) { + + if(usefb) { + curFlow = &fb; + fb.setPyramidScale(fbPyrScale); + fb.setNumLevels(fbLevels); + fb.setWindowSize(fbWinSize); + fb.setNumIterations(fbIterations); + fb.setPolyN(fbPolyN); + fb.setPolySigma(fbPolySigma); + fb.setUseGaussian(fbUseGaussian); + } else { + curFlow = &lk; + lk.setMaxFeatures(lkMaxFeatures); + lk.setQualityLevel(lkQualityLevel); + lk.setMinDistance(lkMinDistance); + lk.setWindowSize(lkWinSize); + lk.setMaxLevel(lkMaxLevel); + } + + // you can use Flow polymorphically + curFlow->calcOpticalFlow(camera); + } +} + +void ofApp::draw(){ + ofPushMatrix(); + ofTranslate(250, 100); + camera.draw(0,0,640,480); + curFlow->draw(0,0,640,480); + ofDrawBitmapStringHighlight(ofToString((int) ofGetFrameRate()) + "fps", 10, 20); + ofPopMatrix(); + gui.draw(); +} \ No newline at end of file diff --git a/addons/ofxCv/example-flow/src/ofApp.h b/addons/ofxCv/example-flow/src/ofApp.h new file mode 100644 index 00000000000..7fafd96e5de --- /dev/null +++ b/addons/ofxCv/example-flow/src/ofApp.h @@ -0,0 +1,25 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" +#include "ofxGui.h" + +class ofApp : public ofBaseApp{ +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber camera; + + ofxCv::FlowFarneback fb; + ofxCv::FlowPyrLK lk; + + ofxCv::Flow* curFlow; + + ofxPanel gui; + ofParameter fbPyrScale, lkQualityLevel, fbPolySigma; + ofParameter fbLevels, lkWinSize, fbIterations, fbPolyN, fbWinSize, lkMaxLevel, lkMaxFeatures, lkMinDistance; + ofParameter fbUseGaussian, usefb; +}; + diff --git a/addons/ofxCv/example-gesture/addons.make b/addons/ofxCv/example-gesture/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-gesture/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-gesture/src/GeometryHelpers.cpp b/addons/ofxCv/example-gesture/src/GeometryHelpers.cpp new file mode 100644 index 00000000000..3459f94a7f8 --- /dev/null +++ b/addons/ofxCv/example-gesture/src/GeometryHelpers.cpp @@ -0,0 +1,238 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +template +Real DistancePointEllipseSpecial (const Real e[2], const Real y[2], Real x[2]) +{ + Real distance; + if (y[1] > (Real)0) + { + if (y[0] > (Real)0) + { + // Bisect to compute the root of F(t) for t >= -e1*e1. + Real esqr[2] = { e[0]*e[0], e[1]*e[1] }; + Real ey[2] = { e[0]*y[0], e[1]*y[1] }; + Real t0 = -esqr[1] + ey[1]; + Real t1 = -esqr[1] + sqrt(ey[0]*ey[0] + ey[1]*ey[1]); + Real t = t0; + const int imax = 2*std::numeric_limits::max_exponent; + for (int i = 0; i < imax; ++i) + { + t = ((Real)0.5)*(t0 + t1); + if (t == t0 || t == t1) + { + break; + } + + Real r[2] = { ey[0]/(t + esqr[0]), ey[1]/(t + esqr[1]) }; + Real f = r[0]*r[0] + r[1]*r[1] - (Real)1; + if (f > (Real)0) + { + t0 = t; + } + else if (f < (Real)0) + { + t1 = t; + } + else + { + break; + } + } + + x[0] = esqr[0]*y[0]/(t + esqr[0]); + x[1] = esqr[1]*y[1]/(t + esqr[1]); + Real d[2] = { x[0] - y[0], x[1] - y[1] }; + distance = sqrt(d[0]*d[0] + d[1]*d[1]); + } + else // y0 == 0 + { + x[0] = (Real)0; + x[1] = e[1]; + distance = fabs(y[1] - e[1]); + } + } + else // y1 == 0 + { + Real denom0 = e[0]*e[0] - e[1]*e[1]; + Real e0y0 = e[0]*y[0]; + if (e0y0 < denom0) + { + // y0 is inside the subinterval. + Real x0de0 = e0y0/denom0; + Real x0de0sqr = x0de0*x0de0; + x[0] = e[0]*x0de0; + x[1] = e[1]*sqrt(fabs((Real)1 - x0de0sqr)); + Real d0 = x[0] - y[0]; + distance = sqrt(d0*d0 + x[1]*x[1]); + } + else + { + // y0 is outside the subinterval. The closest ellipse point has + // x1 == 0 and is on the domain-boundary interval (x0/e0)^2 = 1. + x[0] = e[0]; + x[1] = (Real)0; + distance = fabs(y[0] - e[0]); + } + } + return distance; +} +//---------------------------------------------------------------------------- +// The ellipse is (x0/e0)^2 + (x1/e1)^2 = 1. The query point is (y0,y1). +// The function returns the distance from the query point to the ellipse. +// It also computes the ellipse point (x0,x1) that is closest to (y0,y1). +//---------------------------------------------------------------------------- +template +Real DistancePointEllipse (const Real e[2], const Real y[2], Real x[2]) +{ + // Determine reflections for y to the first quadrant. + bool reflect[2]; + int i, j; + for (i = 0; i < 2; ++i) + { + reflect[i] = (y[i] < (Real)0); + } + + // Determine the axis order for decreasing extents. + int permute[2]; + if (e[0] < e[1]) + { + permute[0] = 1; permute[1] = 0; + } + else + { + permute[0] = 0; permute[1] = 1; + } + + int invpermute[2]; + for (i = 0; i < 2; ++i) + { + invpermute[permute[i]] = i; + } + + Real locE[2], locY[2]; + for (i = 0; i < 2; ++i) + { + j = permute[i]; + locE[i] = e[j]; + locY[i] = y[j]; + if (reflect[j]) + { + locY[i] = -locY[i]; + } + } + + Real locX[2]; + Real distance = DistancePointEllipseSpecial(locE, locY, locX); + + // Restore the axis order and reflections. + for (i = 0; i < 2; ++i) + { + j = invpermute[i]; + if (reflect[j]) + { + locX[j] = -locX[j]; + } + x[i] = locX[j]; + } + + return distance; +} + +ofVec2f closestPointOnRay(const ofVec2f& p1, const ofVec2f& p2, const ofVec2f& p3) { + if(p1 == p2) { + return p1; + } + + float u = (p3.x - p1.x) * (p2.x - p1.x); + u += (p3.y - p1.y) * (p2.y - p1.y); + float len = (p2 - p1).length(); + u /= (len * len); + + return p1.getInterpolated(p2, u); +} + +ofVec2f closestPointOnLine(const ofVec2f& p1, const ofVec2f& p2, const ofVec2f& p3) { + if(p1 == p2) { + return p1; + } + + float u = (p3.x - p1.x) * (p2.x - p1.x); + u += (p3.y - p1.y) * (p2.y - p1.y); + float len = (p2 - p1).length(); + u /= (len * len); + + // clamp u + if(u > 1) { + u = 1; + } else if(u < 0) { + u = 0; + } + return p1.getInterpolated(p2, u); +} + +ofVec2f closestPointOnRect(const cv::RotatedRect& rect, const ofVec2f& point) { + ofVec2f norm = point; + ofVec2f offset(rect.center.x, rect.center.y); + norm -= offset; + norm.rotate(-rect.angle); + float w = rect.size.width / 2, h = rect.size.height / 2; + ofVec2f nearest; + if((norm.x > w || norm.x < -w) || (norm.y < -h || norm.y > h)) { + nearest.set(ofClamp(norm.x, -w, w), ofClamp(norm.y, -h, h)); + } else { + if(fabsf(fabsf(norm.x) - w) < fabsf(fabsf(norm.y) - h)) { + nearest.set(w * (norm.x > 0 ? 1 : -1), norm.y); + } else { + nearest.set(norm.x, h * (norm.y > 0 ? 1 : -1)); + } + } + nearest.rotate(rect.angle); + nearest += offset; + return nearest; +} + +ofVec2f closestPointOnCircle(const ofVec2f& center, float radius, const ofVec2f& point) { + ofVec2f nearest = point - center; + nearest *= radius / nearest.length(); + nearest += center; + return nearest; +} + +ofVec2f closestPointOnEllipse(const cv::RotatedRect& ellipse, const ofVec2f& point) { + ofVec2f norm = point; + ofVec2f offset(ellipse.center.x, ellipse.center.y); + norm -= offset; + norm.rotate(-ellipse.angle); + bool flipX = norm.x < 0, flipY = norm.y < 0; + if(flipX) norm.x *= -1; + if(flipY) norm.y *= -1; + float e[] = {ellipse.size.width / 2, ellipse.size.height / 2}; + float y[] = {norm.x, norm.y}; + float x[2]; + DistancePointEllipse(e, y, x); + ofVec2f result(x[0], x[1]); + if(flipX) result.x *= -1; + if(flipY) result.y *= -1; + result.rotate(ellipse.angle); + result += offset; + return result; +} + +float distanceToEllipse(const ofVec2f& point, const cv::RotatedRect& ellipse) { + return closestPointOnEllipse(ellipse, point).distance(point); +} + +float distanceToRect(const ofVec2f& point, const cv::RotatedRect& rect) { + return closestPointOnRect(rect, point).distance(point); +} + +float distanceToLine(const ofVec2f& point, const ofVec2f& start, const ofVec2f& end) { + return closestPointOnLine(start, end, point).distance(point); +} + +float distanceToRay(const ofVec2f& point, const ofVec2f& start, const ofVec2f& end) { + return closestPointOnRay(start, end, point).distance(point); +} diff --git a/addons/ofxCv/example-gesture/src/GeometryHelpers.h b/addons/ofxCv/example-gesture/src/GeometryHelpers.h new file mode 100644 index 00000000000..cf38c19fd6c --- /dev/null +++ b/addons/ofxCv/example-gesture/src/GeometryHelpers.h @@ -0,0 +1,15 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +ofVec2f closestPointOnRay(const ofVec2f& p1, const ofVec2f& p2, const ofVec2f& p3); +ofVec2f closestPointOnLine(const ofVec2f& p1, const ofVec2f& p2, const ofVec2f& p3); +ofVec2f closestPointOnRect(const cv::RotatedRect& rect, const ofVec2f& point); +ofVec2f closestPointOnCircle(const ofVec2f& center, float radius, const ofVec2f& point); +ofVec2f closestPointOnEllipse(const cv::RotatedRect& ellipse, const ofVec2f& point); + +float distanceToEllipse(const ofVec2f& point, const cv::RotatedRect& ellipse); +float distanceToRect(const ofVec2f& point, const cv::RotatedRect& rect); +float distanceToRay(const ofVec2f& point, const ofVec2f& start, const ofVec2f& end); +float distanceToLine(const ofVec2f& point, const ofVec2f& start, const ofVec2f& end); \ No newline at end of file diff --git a/addons/ofxCv/example-gesture/src/main.cpp b/addons/ofxCv/example-gesture/src/main.cpp new file mode 100644 index 00000000000..d41893b4f4a --- /dev/null +++ b/addons/ofxCv/example-gesture/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(1280, 720, OF_FULLSCREEN); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-gesture/src/ofApp.cpp b/addons/ofxCv/example-gesture/src/ofApp.cpp new file mode 100644 index 00000000000..4ddf249e6bd --- /dev/null +++ b/addons/ofxCv/example-gesture/src/ofApp.cpp @@ -0,0 +1,39 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofEnableSmoothing(); + ofEnableAlphaBlending(); + ofSetLineWidth(3); + ofSetFrameRate(120); +} + +void ofApp::update() { + +} + +void ofApp::draw() { + ofBackground(0); + ofSetColor(255, 64); + polyline.draw(); + + switch(recognizer.getGestureType()) { + case Recognizer::GESTURE_LINE: ofSetColor(magentaPrint); break; + case Recognizer::GESTURE_ARC: ofSetColor(cyanPrint); break; + } + if(recognizer.getFitError() < .5) { + recognizer.getPolyline().draw(); + } +} + +void ofApp::mousePressed(int x, int y, int button) { + polyline.clear(); +} + +void ofApp::mouseDragged(int x, int y, int button) { + polyline.addVertex(x, y); + recognizer.update(polyline); +} diff --git a/addons/ofxCv/example-gesture/src/ofApp.h b/addons/ofxCv/example-gesture/src/ofApp.h new file mode 100644 index 00000000000..e05b42f632a --- /dev/null +++ b/addons/ofxCv/example-gesture/src/ofApp.h @@ -0,0 +1,90 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +#include "GeometryHelpers.h" + +class Recognizer { +public: + enum GestureType {GESTURE_LINE, GESTURE_ARC}; + + Recognizer() + :lineRatio(6) + ,gestureType(GESTURE_LINE) + ,fitError(0) + {} + void setLineRatio(float lineRatio) { + this->lineRatio = lineRatio; + } + GestureType getGestureType() const { + return gestureType; + } + float getFitError() const { + return fitError; + } + ofPolyline& getPolyline() { + return idealized; + } + void update(ofPolyline& polyline) { + if(polyline.size() > 5) { + ellipse = fitEllipse(polyline); + } + rect = minAreaRect(polyline); + fitLine(polyline, linePoint, lineDirection); + + float lineSum = 0, ellipseSum = 0; + for(int i = 0; i < polyline.size(); i++) { + lineSum += distanceToRay(polyline[i], linePoint, linePoint + lineDirection); + ellipseSum += distanceToEllipse(polyline[i], ellipse); + } + float perimeter = polyline.getPerimeter(); + lineSum /= perimeter, ellipseSum /= perimeter; + + bool isLine = rect.size.width / rect.size.height > lineRatio + || rect.size.height / rect.size.width > lineRatio + || (lineSum < ellipseSum) + || ellipseSum != ellipseSum; + idealized.clear(); + if(isLine) { + gestureType = GESTURE_LINE; + fitError = lineSum; + idealized.addVertex(closestPointOnRay(linePoint, linePoint + lineDirection, polyline[0])); + idealized.addVertex(closestPointOnRay(linePoint, linePoint + lineDirection, polyline[polyline.size() - 1])); + } else { + gestureType = GESTURE_ARC; + fitError = ellipseSum; + ofVec2f center(ellipse.center.x, ellipse.center.y); + // it would make more sense to do this at a fixed resolution + for(int i = 0; i < polyline.size(); i++) { + ofVec2f cur = polyline[i]; + cur -= center, cur.rotate(-ellipse.angle); + float a = ellipse.size.width / 2, b = ellipse.size.height / 2, x0 = cur.x, y0 = cur.y; + cur *= (a * b) / sqrtf(a * a * y0 * y0 + b * b * x0 * x0); + cur.rotate(ellipse.angle), cur += center; + idealized.addVertex(cur); + } + } + } + +protected: + cv::RotatedRect ellipse, rect; + ofVec2f linePoint, lineDirection; + ofPolyline idealized; + float lineRatio, fitError; + GestureType gestureType; +}; + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + void mousePressed(int x, int y, int button); + void mouseDragged(int x, int y, int button); + + ofPolyline polyline; + + Recognizer recognizer; +}; diff --git a/addons/ofxCv/example-homography/addons.make b/addons/ofxCv/example-homography/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-homography/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-homography/bin/data/left.jpg b/addons/ofxCv/example-homography/bin/data/left.jpg new file mode 100755 index 00000000000..866a0c8d013 Binary files /dev/null and b/addons/ofxCv/example-homography/bin/data/left.jpg differ diff --git a/addons/ofxCv/example-homography/bin/data/right.jpg b/addons/ofxCv/example-homography/bin/data/right.jpg new file mode 100755 index 00000000000..da653b74585 Binary files /dev/null and b/addons/ofxCv/example-homography/bin/data/right.jpg differ diff --git a/addons/ofxCv/example-homography/src/main.cpp b/addons/ofxCv/example-homography/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-homography/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-homography/src/ofApp.cpp b/addons/ofxCv/example-homography/src/ofApp.cpp new file mode 100644 index 00000000000..f7f21402764 --- /dev/null +++ b/addons/ofxCv/example-homography/src/ofApp.cpp @@ -0,0 +1,123 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + + left.load("left.jpg"); + right.load("right.jpg"); + imitate(warpedColor, right); + + movingPoint = false; + saveMatrix = false; + homographyReady = false; + + // load the previous homography if it's available + ofFile previous("homography.yml"); + if(previous.exists()) { + FileStorage fs(ofToDataPath("homography.yml"), FileStorage::READ); + fs["homography"] >> homography; + homographyReady = true; + } +} + +void ofApp::update() { + if(leftPoints.size() >= 4) { + vector srcPoints, dstPoints; + for(int i = 0; i < leftPoints.size(); i++) { + srcPoints.push_back(Point2f(rightPoints[i].x - left.getWidth(), rightPoints[i].y)); + dstPoints.push_back(Point2f(leftPoints[i].x, leftPoints[i].y)); + } + + // generate a homography from the two sets of points + homography = findHomography(Mat(srcPoints), Mat(dstPoints)); + homographyReady = true; + + if(saveMatrix) { + FileStorage fs(ofToDataPath("homography.yml"), FileStorage::WRITE); + fs << "homography" << homography; + saveMatrix = false; + } + } + + if(homographyReady) { + // this is how you warp one ofImage into another ofImage given the homography matrix + // CV INTER NN is 113 fps, CV_INTER_LINEAR is 93 fps + warpPerspective(right, warpedColor, homography, CV_INTER_LINEAR); + warpedColor.update(); + } +} + +void drawPoints(vector& points) { + ofNoFill(); + for(int i = 0; i < points.size(); i++) { + ofDrawCircle(points[i], 10); + ofDrawCircle(points[i], 1); + } +} + +void ofApp::draw() { + + ofSetColor(255); + left.draw(0, 0); + right.draw(left.getWidth(), 0); + if(homographyReady) { + ofEnableBlendMode(OF_BLENDMODE_MULTIPLY); + ofSetColor(255, 128); + warpedColor.draw(0, 0); + ofDisableBlendMode(); + } + + ofSetColor(ofColor::red); + drawPoints(leftPoints); + ofSetColor(ofColor::blue); + drawPoints(rightPoints); + ofSetColor(128); + for(int i = 0; i < leftPoints.size(); i++) { + ofDrawLine(leftPoints[i], rightPoints[i]); + } + + ofSetColor(255); + ofDrawBitmapString(ofToString((int) ofGetFrameRate()), 10, 20); +} + +bool ofApp::movePoint(vector& points, ofVec2f point) { + for(int i = 0; i < points.size(); i++) { + if(points[i].distance(point) < 20) { + movingPoint = true; + curPoint = &points[i]; + return true; + } + } + return false; +} + +void ofApp::mousePressed(int x, int y, int button) { + ofVec2f cur(x, y); + ofVec2f rightOffset(left.getWidth(), 0); + if(!movePoint(leftPoints, cur) && !movePoint(rightPoints, cur)) { + if(x > left.getWidth()) { + cur -= rightOffset; + } + leftPoints.push_back(cur); + rightPoints.push_back(cur + rightOffset); + } +} + +void ofApp::mouseDragged(int x, int y, int button) { + if(movingPoint) { + curPoint->set(x, y); + } +} + +void ofApp::mouseReleased(int x, int y, int button) { + movingPoint = false; +} + +void ofApp::keyPressed(int key) { + if(key == ' ') { + saveMatrix = true; + } +} \ No newline at end of file diff --git a/addons/ofxCv/example-homography/src/ofApp.h b/addons/ofxCv/example-homography/src/ofApp.h new file mode 100644 index 00000000000..72a79071615 --- /dev/null +++ b/addons/ofxCv/example-homography/src/ofApp.h @@ -0,0 +1,25 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + bool movePoint(vector& points, ofVec2f point); + void mousePressed(int x, int y, int button); + void mouseDragged(int x, int y, int button); + void mouseReleased(int x, int y, int button); + void keyPressed(int key); + + ofImage left, right, warpedColor; + vector leftPoints, rightPoints; + bool movingPoint; + ofVec2f* curPoint; + bool saveMatrix; + bool homographyReady; + + cv::Mat homography; +}; diff --git a/addons/ofxCv/example-kalman-euler/addons.make b/addons/ofxCv/example-kalman-euler/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-kalman-euler/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-kalman-euler/src/main.cpp b/addons/ofxCv/example-kalman-euler/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-kalman-euler/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-kalman-euler/src/ofApp.cpp b/addons/ofxCv/example-kalman-euler/src/ofApp.cpp new file mode 100644 index 00000000000..bb79a933001 --- /dev/null +++ b/addons/ofxCv/example-kalman-euler/src/ofApp.cpp @@ -0,0 +1,59 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + + kalman.init(1e-5, 1e-1); // invert of (smoothness, rapidness) +} + +void ofApp::update() { + m.makeIdentityMatrix(); + // generate rotation for demo + m.rotate(ofMap(mouseX, 0, ofGetWidth(), -180, 180), 1, 0, 0); + m.rotate(ofMap(mouseY, 0, ofGetHeight(), -180, 180), 0, 1, 0); + m.rotate(ofMap(ofGetElapsedTimef(), 0, 1, 0, 180), 0, 1, 1); + + ofQuaternion q; + q = m.getRotate(); + kalman.update(q); + mPredicted.setRotate(kalman.getEstimation()); +} + +void drawTransparentAxis(float size, int alpha) { + ofPushStyle(); + ofSetLineWidth(3); + + // draw x axis + ofSetColor(255, 0, 0, alpha); + ofDrawLine(0, 0, 0, size, 0, 0); + + // draw y axis + ofSetColor(0, 255, 0, alpha); + ofDrawLine(0, 0, 0, 0, size, 0); + + // draw z axis + ofSetColor(0, 0, 255, alpha); + ofDrawLine(0, 0, 0, 0, 0, size); + ofPopStyle(); +} + +void ofApp::draw() { + ofBackground(54); + + cam.begin(); + + ofPushMatrix(); + glMultMatrixf((GLfloat*)m.getPtr()); + drawTransparentAxis(200, 128); + ofPopMatrix(); + + ofPushMatrix(); + glMultMatrixf((GLfloat*)mPredicted.getPtr()); + ofDrawAxis(200); + ofPopMatrix(); + + cam.end(); +} diff --git a/addons/ofxCv/example-kalman-euler/src/ofApp.h b/addons/ofxCv/example-kalman-euler/src/ofApp.h new file mode 100644 index 00000000000..12e89fe7471 --- /dev/null +++ b/addons/ofxCv/example-kalman-euler/src/ofApp.h @@ -0,0 +1,16 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofxCv::KalmanEuler kalman; + + ofMatrix4x4 m, mPredicted; + ofEasyCam cam; +}; diff --git a/addons/ofxCv/example-kalman/addons.make b/addons/ofxCv/example-kalman/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-kalman/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-kalman/src/main.cpp b/addons/ofxCv/example-kalman/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-kalman/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-kalman/src/ofApp.cpp b/addons/ofxCv/example-kalman/src/ofApp.cpp new file mode 100644 index 00000000000..c4b53d4c08a --- /dev/null +++ b/addons/ofxCv/example-kalman/src/ofApp.cpp @@ -0,0 +1,48 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + + kalman.init(1e-4, 1e-1); // invert of (smoothness, rapidness) + + line.setMode(OF_PRIMITIVE_LINE_STRIP); + predicted.setMode(OF_PRIMITIVE_LINE_STRIP); + estimated.setMode(OF_PRIMITIVE_LINE_STRIP); + + speed = 0.f; +} + +void ofApp::update() { + ofVec2f curPoint(mouseX, mouseY); + line.addVertex(curPoint); + + kalman.update(curPoint); // feed measurement + + point = kalman.getPrediction(); // prediction before measurement + predicted.addVertex(point); + estimated.addVertex(kalman.getEstimation()); // corrected estimation after measurement + + speed = kalman.getVelocity().length(); + int alpha = ofMap(speed, 0, 20, 50, 255, true); + line.addColor(ofColor(255, 255, 255, alpha)); + predicted.addColor(ofColor(255, 0, 0, alpha)); + estimated.addColor(ofColor(0, 255, 0, alpha)); +} + +void ofApp::draw() { + ofBackground(0); + + line.draw(); + + predicted.draw(); + ofPushStyle(); + ofSetColor(ofColor::red, 128); + ofFill(); + ofDrawCircle(point, speed * 2); + ofPopStyle(); + + estimated.draw(); +} diff --git a/addons/ofxCv/example-kalman/src/ofApp.h b/addons/ofxCv/example-kalman/src/ofApp.h new file mode 100644 index 00000000000..52f9c8a90e0 --- /dev/null +++ b/addons/ofxCv/example-kalman/src/ofApp.h @@ -0,0 +1,17 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofxCv::KalmanPosition kalman; + + ofMesh predicted, line, estimated; + ofVec2f point; + float speed; +}; diff --git a/addons/ofxCv/example-smile/addons.make b/addons/ofxCv/example-smile/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-smile/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-smile/bin/data/readme.txt b/addons/ofxCv/example-smile/bin/data/readme.txt new file mode 100644 index 00000000000..144119f6264 --- /dev/null +++ b/addons/ofxCv/example-smile/bin/data/readme.txt @@ -0,0 +1,9 @@ +You need to add a copy of haarcascade_frontalface_default.xml and smiled_05.xml to this directory. + +haarcascade_frontalface_default.xml can be found at: + +openFrameworks/examples/addons/opencvHaarFinderExample/bin/haarcascade_frontalface_default.xml + +smiled_05.xml can be found at: + +https://github.com/hromi/SMILEsmileD/blob/master/smileD/smiled_05.xml \ No newline at end of file diff --git a/addons/ofxCv/example-smile/src/main.cpp b/addons/ofxCv/example-smile/src/main.cpp new file mode 100644 index 00000000000..6250422053f --- /dev/null +++ b/addons/ofxCv/example-smile/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-smile/src/ofApp.cpp b/addons/ofxCv/example-smile/src/ofApp.cpp new file mode 100644 index 00000000000..49c6949d22f --- /dev/null +++ b/addons/ofxCv/example-smile/src/ofApp.cpp @@ -0,0 +1,35 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + ofSetFrameRate(120); + cam.setup(640, 480); + smile.setup(); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + smile.update(cam); + if(smile.getFaceFound()) { + float cur = smile.getSmileAmount(); + graph.add(cur); + ofLog() << graph.getNormalized(cur); + } + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + smile.draw(); + + ofTranslate(10, 10); + ofSetColor(0); + ofDrawRectangle(0, 0, 300, 100); + ofSetColor(255); + graph.draw(300, 100); +} diff --git a/addons/ofxCv/example-smile/src/ofApp.h b/addons/ofxCv/example-smile/src/ofApp.h new file mode 100644 index 00000000000..2e899a0cd81 --- /dev/null +++ b/addons/ofxCv/example-smile/src/ofApp.h @@ -0,0 +1,114 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +// single-person smile strength detector +// not robust against rotation +class SmileDetector { +protected: + ofRectangle roi; + +public: + ofxCv::ObjectFinder faceFinder, smileFinder; + + void setup() { + faceFinder.setup("haarcascade_frontalface_default.xml"); + faceFinder.setPreset(ofxCv::ObjectFinder::Accurate); + faceFinder.setFindBiggestObject(true); + smileFinder.setup("smiled_05.xml"); + smileFinder.setPreset(ofxCv::ObjectFinder::Sensitive); + smileFinder.setMinNeighbors(0); + } + template + void update(T& img) { + update(ofxCv::toCv(img)); + } + void update(const cv::Mat& mat) { + faceFinder.update(mat); + if(faceFinder.size()) { + roi = faceFinder.getObject(0); + float lowerRatio = .35; + roi.y += roi.height * (1 - lowerRatio); + roi.height *= lowerRatio; + cv::Mat faceMat(mat, ofxCv::toCv(roi)); + smileFinder.update(faceMat); + } + } + void draw() const { + faceFinder.draw(); + if(faceFinder.size()) { + ofPushMatrix(); + ofTranslate(roi.position); + smileFinder.draw(); + ofPopMatrix(); + } + } + bool getFaceFound() const { + return faceFinder.size(); + } + ofRectangle getFace() const { + return faceFinder.getObject(0); + } + int getSmileAmount() const { + if(faceFinder.size()) { + return smileFinder.size(); + } else { + return 0; + } + } +}; + +class LineGraph { +protected: + ofMesh mesh; + ofVec2f min, max; + int n; + +public: + LineGraph() + :n(0) { + } + void reset() { + n = 0; + min = ofVec2f(); + max = ofVec2f(); + } + void add(float y) { + ofVec2f cur(n, y); + mesh.addVertex(cur); + if(n == 0) { + min = cur; + max = cur; + } else { + min.set(MIN(min.x, cur.x), MIN(min.y, cur.y)); + max.set(MAX(max.x, cur.x), MAX(max.y, cur.y)); + } + n++; + } + float getNormalized(float y) { + return ofNormalize(y, min.y, max.y); + } + void draw(float w, float h) { + if(n > 2) { + ofPushMatrix(); + ofTranslate(0, h); + ofScale(w / (max.x - min.x), -h / (max.y - min.y)); + ofTranslate(-min); + mesh.setMode(OF_PRIMITIVE_LINE_STRIP); + mesh.draw(); + ofPopMatrix(); + } + } +}; + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + SmileDetector smile; + LineGraph graph; +}; diff --git a/addons/ofxCv/example-threshold/addons.make b/addons/ofxCv/example-threshold/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-threshold/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-threshold/src/main.cpp b/addons/ofxCv/example-threshold/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-threshold/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-threshold/src/ofApp.cpp b/addons/ofxCv/example-threshold/src/ofApp.cpp new file mode 100644 index 00000000000..e2e18e6e856 --- /dev/null +++ b/addons/ofxCv/example-threshold/src/ofApp.cpp @@ -0,0 +1,28 @@ +#include "ofApp.h" + +using namespace cv; +using namespace ofxCv; + +void ofApp::setup() { + cam.setup(640, 480); + thresh.allocate(640, 480, OF_IMAGE_GRAYSCALE); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + convertColor(cam, thresh, CV_RGB2GRAY); + if(ofGetMousePressed()) { + autothreshold(thresh); + } else { + float thresholdValue = ofMap(mouseX, 0, ofGetWidth(), 0, 255); + threshold(thresh, thresholdValue); + } + thresh.update(); + } +} + +void ofApp::draw() { + cam.draw(0, 0); + thresh.draw(640, 0); +} diff --git a/addons/ofxCv/example-threshold/src/ofApp.h b/addons/ofxCv/example-threshold/src/ofApp.h new file mode 100644 index 00000000000..76ae4e9e336 --- /dev/null +++ b/addons/ofxCv/example-threshold/src/ofApp.h @@ -0,0 +1,14 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofImage thresh; +}; diff --git a/addons/ofxCv/example-undistortion/addons.make b/addons/ofxCv/example-undistortion/addons.make new file mode 100644 index 00000000000..979d12cc077 --- /dev/null +++ b/addons/ofxCv/example-undistortion/addons.make @@ -0,0 +1,3 @@ +ofxCv +ofxOpenCv +ofxGui \ No newline at end of file diff --git a/addons/ofxCv/example-undistortion/bin/data/mbp-2011-isight.yml b/addons/ofxCv/example-undistortion/bin/data/mbp-2011-isight.yml new file mode 100644 index 00000000000..5b55839ab2c --- /dev/null +++ b/addons/ofxCv/example-undistortion/bin/data/mbp-2011-isight.yml @@ -0,0 +1,19 @@ +%YAML:1.0 +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 6.6278599887122368e+02, 0., 3.1244256016006659e+02, 0., + 6.6129276875199082e+02, 2.2747179767124251e+02, 0., 0., 1. ] +imageSize_width: 640 +imageSize_height: 480 +sensorSize_width: 0 +sensorSize_height: 0 +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -1.8848338341464690e-01, 1.0721890419183855e+00, + -3.5244467228016116e-03, -7.0195032848241403e-04, + -2.0412827999027101e+00 ] +reprojectionError: 2.1723265945911407e-01 diff --git a/addons/ofxCv/example-undistortion/src/main.cpp b/addons/ofxCv/example-undistortion/src/main.cpp new file mode 100644 index 00000000000..d0c88615fd8 --- /dev/null +++ b/addons/ofxCv/example-undistortion/src/main.cpp @@ -0,0 +1,6 @@ +#include "ofApp.h" + +int main() { + ofSetupOpenGL(640 * 2, 480, OF_WINDOW); + ofRunApp(new ofApp()); +} diff --git a/addons/ofxCv/example-undistortion/src/ofApp.cpp b/addons/ofxCv/example-undistortion/src/ofApp.cpp new file mode 100644 index 00000000000..b75dd56fc28 --- /dev/null +++ b/addons/ofxCv/example-undistortion/src/ofApp.cpp @@ -0,0 +1,26 @@ +#include "ofApp.h" + +using namespace ofxCv; +using namespace cv; + +void ofApp::setup() { + ofSetVerticalSync(true); + cam.setup(640, 480); + calibration.setFillFrame(true); // true by default + calibration.load("mbp-2011-isight.yml"); + imitate(undistorted, cam); +} + +void ofApp::update() { + cam.update(); + if(cam.isFrameNew()) { + calibration.undistort(toCv(cam), toCv(undistorted)); + undistorted.update(); + } +} + +void ofApp::draw() { + ofSetColor(255); + cam.draw(0, 0); + undistorted.draw(640, 0); +} diff --git a/addons/ofxCv/example-undistortion/src/ofApp.h b/addons/ofxCv/example-undistortion/src/ofApp.h new file mode 100644 index 00000000000..507380fd448 --- /dev/null +++ b/addons/ofxCv/example-undistortion/src/ofApp.h @@ -0,0 +1,15 @@ +#pragma once + +#include "ofMain.h" +#include "ofxCv.h" + +class ofApp : public ofBaseApp { +public: + void setup(); + void update(); + void draw(); + + ofVideoGrabber cam; + ofImage undistorted; + ofxCv::Calibration calibration; +}; diff --git a/addons/ofxCv/libs/.gitignore b/addons/ofxCv/libs/.gitignore new file mode 100644 index 00000000000..e49fad10309 --- /dev/null +++ b/addons/ofxCv/libs/.gitignore @@ -0,0 +1 @@ +opencv/ \ No newline at end of file diff --git a/addons/ofxCv/libs/CLD/include/CLD/ETF.h b/addons/ofxCv/libs/CLD/include/CLD/ETF.h new file mode 100755 index 00000000000..453a1273d02 --- /dev/null +++ b/addons/ofxCv/libs/CLD/include/CLD/ETF.h @@ -0,0 +1,77 @@ +#ifndef _ETF_H_ +#define _ETF_H_ + +#include "imatrix.h" + +struct Vect { + double tx, ty, mag; +}; + + +class ETF { +private: + int Nr, Nc; + Vect** p; + double max_grad; +public: + ETF() + { + Nr = 1, Nc = 1; + p = new Vect*[Nr]; + for(int i = 0; i < Nr; i++) + p[i] = new Vect[Nc]; + p[0][0].tx=1.0; p[0][0].ty=0.0; p[0][0].mag=1.0; + max_grad = 1.0; + }; + ETF(int i, int j) + { + Nr = i, Nc = j; + p = new Vect*[Nr]; + for(i = 0; i < Nr; i++) + p[i] = new Vect[Nc]; + max_grad = 1.0; + }; + void delete_all() { + for (int i = 0; i < Nr; i++) + delete[] p[i]; + delete[] p; + } + ~ETF() { delete_all(); } + Vect* operator[](int i) { return p[i]; }; + Vect& get( int i, int j ) const { return p[i][j]; } + int getRow() const { return Nr; } + int getCol() const { return Nc; } + void init(int i, int j) + { + delete_all(); + Nr = i, Nc = j; + p = new Vect*[Nr]; + for(i = 0; i < Nr; i++) + p[i] = new Vect[Nc]; + max_grad = 1.0; + }; + void copy(ETF& s) + { + for (int i = 0; i < Nr; i++) + for (int j = 0; j < Nc; j++) { + p[i][j].tx = s.p[i][j].tx; + p[i][j].ty = s.p[i][j].ty; + p[i][j].mag = s.p[i][j].mag; + } + max_grad = s.max_grad; + }; + void zero() + { + for (int i = 0; i < Nr; i++) + for (int j = 0; j < Nc; j++) + p[i][j].tx = p[i][j].ty = p[i][j].mag = 0.0; + } + void set(imatrix& image); + void set2(imatrix& image); + void Smooth(int half_w, int M); + double GetMaxGrad() { return max_grad; } + void normalize(); +}; + + +#endif \ No newline at end of file diff --git a/addons/ofxCv/libs/CLD/include/CLD/fdog.h b/addons/ofxCv/libs/CLD/include/CLD/fdog.h new file mode 100755 index 00000000000..1bfbb78bd47 --- /dev/null +++ b/addons/ofxCv/libs/CLD/include/CLD/fdog.h @@ -0,0 +1,12 @@ + +#ifndef _FDOG_H_ +#define _FDOG_H_ + +extern void GaussSmoothSep(imatrix& image, double sigma); +extern void ConstructMergedImage(imatrix& image, imatrix& gray, imatrix& merged); +extern void ConstructMergedImageMult(imatrix& image, imatrix& gray, imatrix& merged); +extern void GetFDoG(imatrix& image, ETF& e, double sigma, double sigma3, double tau); +extern void Binarize(imatrix& image, double thres); +extern void GrayThresholding(imatrix& image, double thres); + +#endif \ No newline at end of file diff --git a/addons/ofxCv/libs/CLD/include/CLD/imatrix.h b/addons/ofxCv/libs/CLD/include/CLD/imatrix.h new file mode 100755 index 00000000000..21c21a45f92 --- /dev/null +++ b/addons/ofxCv/libs/CLD/include/CLD/imatrix.h @@ -0,0 +1,76 @@ +#ifndef _IMATRIX_H_ +#define _IMATRIX_H_ + +class imatrix { +private: + int Nr, Nc; + int** p; + void delete_all() { + for (int i = 0; i < Nr; i++) + delete[] p[i]; + delete[] p; + } +public: + imatrix() + { + Nr = 1, Nc = 1; + p = new int*[Nr]; + for(int i = 0; i < Nr; i++) + p[i] = new int[Nc]; + p[0][0]=1; + }; + imatrix(int i, int j) + { + Nr = i, Nc = j; + + p = new int*[Nr]; + for(i = 0; i < Nr; i++) + p[i] = new int[Nc]; + }; + imatrix(imatrix& b) { + Nr = b.Nr; + Nc = b.Nc; + p = new int*[Nr]; + for (int i = 0; i < Nr; i++) { + p[i] = new int[Nc]; + for (int j = 0; j < Nc; j++) { + p[i][j] = b[i][j]; + } + } + } + void init(int i, int j) + { + delete_all(); + Nr = i, Nc = j; + p = new int*[Nr]; + for(i = 0; i < Nr; i++) + p[i] = new int[Nc]; + }; + + ~imatrix() + { + delete_all(); + } + int* operator[](int i) { return p[i]; }; + + int& get( int i, int j ) const { return p[i][j]; } + int getRow() const { return Nr; } + int getCol() const { return Nc; } + + void zero() + { + for (int i = 0; i < Nr; i++) + for (int j = 0; j < Nc; j++) + p[i][j] = 0; + } + void copy(imatrix& b) + { + init(b.Nr, b.Nc); + for (int i = 0; i < Nr; i++) + for (int j = 0; j < Nc; j++) + p[i][j] = b.p[i][j]; + } +}; + + +#endif \ No newline at end of file diff --git a/addons/ofxCv/libs/CLD/include/CLD/myvec.h b/addons/ofxCv/libs/CLD/include/CLD/myvec.h new file mode 100755 index 00000000000..326f1c015d2 --- /dev/null +++ b/addons/ofxCv/libs/CLD/include/CLD/myvec.h @@ -0,0 +1,122 @@ +#ifndef _MYVEC_H_ +#define _MYVEC_H_ + +#include + +class myvec { +private: + +public: + int N; + double* p; + myvec() + { + N = 1; + p = new double[1]; + p[0]=1.0; + }; + myvec(int i) + { + N = i; + p = new double[N]; + }; + ~myvec() + { + delete[] p; + } + double& operator[](int i) { return p[i]; } + const double& operator[](int i) const { return p[i]; } + void zero() { + for (int i = 0; i < N; i++) + p[i] = 0.0; + } + void make_unit() { + double sum = 0.0; + for (int i = 0; i < N; i++) { + sum += p[i]*p[i]; + } + sum = sqrt(sum); + if (sum > 0.0) { + for (int i = 0; i < N; i++) { + p[i] = p[i] / sum; + } + } + } + double norm() { + double sum = 0.0; + for (int i = 0; i < N; i++) { + sum += p[i]*p[i]; + } + sum = sqrt(sum); + return sum; + } + double get(int n) const { return p[n]; } + int getMax() { return N; } + void init(int i) { + delete[] p; + N = i; + p = new double[N]; + } +}; + +class mymatrix { +private: + int Nr, Nc; + double** p; + void delete_all() { + for (int i = 0; i < Nr; i++) + delete[] p[i]; + delete[] p; + } +public: + mymatrix() + { + Nr = 1, Nc = 1; + p = new double*[Nr]; + for(int i = 0; i < Nr; i++) + p[i] = new double[Nc]; + p[0][0]=1.0; + }; + mymatrix(int i, int j) + { + Nr = i, Nc = j; + p = new double*[Nr]; + for(i = 0; i < Nr; i++) + p[i] = new double[Nc]; + }; + mymatrix(mymatrix& b) { + Nr = b.Nr; + Nc = b.Nc; + p = new double*[Nr]; + for (int i = 0; i < Nr; i++) { + p[i] = new double[Nc]; + for (int j = 0; j < Nc; j++) { + p[i][j] = b[i][j]; + } + } + } + ~mymatrix() { + delete_all(); + } + double* operator[](int i) { return p[i]; }; + double& get( int i, int j ) const { return p[i][j]; } + int getRow() const { return Nr; } + int getCol() const { return Nc; } + void init(int i, int j) + { + delete_all(); + Nr = i, Nc = j; + p = new double*[Nr]; + for(i = 0; i < Nr; i++) + p[i] = new double[Nc]; + }; + void zero() + { + for (int i = 0; i < Nr; i++) + for (int j = 0; j < Nc; j++) + p[i][j] = 0.0; + } +}; + + +#endif \ No newline at end of file diff --git a/addons/ofxCv/libs/CLD/src/ETF.cpp b/addons/ofxCv/libs/CLD/src/ETF.cpp new file mode 100755 index 00000000000..24784d85b2e --- /dev/null +++ b/addons/ofxCv/libs/CLD/src/ETF.cpp @@ -0,0 +1,298 @@ +#include "ETF.h" +#include "imatrix.h" + +#include "ofMain.h" + +void ETF::set(imatrix& image) +{ + int i, j; + double MAX_VAL = 1020.; + double v[2]; + + max_grad = -1.; + + for (i = 1; i < Nr - 1; i++) { + for (j = 1; j < Nc - 1; j++) { + //////////////////////////////////////////////////////////////// + p[i][j].tx = (image[i+1][j-1] + 2*(double)image[i+1][j] + image[i+1][j+1] + - image[i-1][j-1] - 2*(double)image[i-1][j] - image[i-1][j+1]) / MAX_VAL; + p[i][j].ty = (image[i-1][j+1] + 2*(double)image[i][j+1] + image[i+1][j+1] + - image[i-1][j-1] - 2*(double)image[i][j-1] - image[i+1][j-1]) / MAX_VAL; + ///////////////////////////////////////////// + v[0] = p[i][j].tx; + v[1] = p[i][j].ty; + p[i][j].tx = -v[1]; + p[i][j].ty = v[0]; + ////////////////////////////////////////////// + p[i][j].mag = sqrt(p[i][j].tx * p[i][j].tx + p[i][j].ty * p[i][j].ty); + + if (p[i][j].mag > max_grad) { + max_grad = p[i][j].mag; + } + } + } + + for (i = 1; i <= Nr - 2; i++) { + p[i][0].tx = p[i][1].tx; + p[i][0].ty = p[i][1].ty; + p[i][0].mag = p[i][1].mag; + p[i][Nc - 1].tx = p[i][Nc - 2].tx; + p[i][Nc - 1].ty = p[i][Nc - 2].ty; + p[i][Nc - 1].mag = p[i][Nc - 2].mag; + } + + for (j = 1; j <= Nc - 2; j++) { + p[0][j].tx = p[1][j].tx; + p[0][j].ty = p[1][j].ty; + p[0][j].mag = p[1][j].mag; + p[Nr - 1][j].tx = p[Nr - 2][j].tx; + p[Nr - 1][j].ty = p[Nr - 2][j].ty; + p[Nr - 1][j].mag = p[Nr - 2][j].mag; + } + + p[0][0].tx = ( p[0][1].tx + p[1][0].tx ) / 2; + p[0][0].ty = ( p[0][1].ty + p[1][0].ty ) / 2; + p[0][0].mag = ( p[0][1].mag + p[1][0].mag ) / 2; + p[0][Nc-1].tx = ( p[0][Nc-2].tx + p[1][Nc-1].tx ) / 2; + p[0][Nc-1].ty = ( p[0][Nc-2].ty + p[1][Nc-1].ty ) / 2; + p[0][Nc-1].mag = ( p[0][Nc-2].mag + p[1][Nc-1].mag ) / 2; + p[Nr-1][0].tx = ( p[Nr-1][1].tx + p[Nr-2][0].tx ) / 2; + p[Nr-1][0].ty = ( p[Nr-1][1].ty + p[Nr-2][0].ty ) / 2; + p[Nr-1][0].mag = ( p[Nr-1][1].mag + p[Nr-2][0].mag ) / 2; + p[Nr - 1][Nc - 1].tx = ( p[Nr - 1][Nc - 2].tx + p[Nr - 2][Nc - 1].tx ) / 2; + p[Nr - 1][Nc - 1].ty = ( p[Nr - 1][Nc - 2].ty + p[Nr - 2][Nc - 1].ty ) / 2; + p[Nr - 1][Nc - 1].mag = ( p[Nr - 1][Nc - 2].mag + p[Nr - 2][Nc - 1].mag ) / 2; + + normalize(); + +} + +void ETF::set2(imatrix& image) +{ + int i, j; + double MAX_VAL = 1020.; + double v[2]; + + max_grad = -1.; + + imatrix tmp(Nr, Nc); + + for (i = 1; i < Nr - 1; i++) { + for (j = 1; j < Nc - 1; j++) { + //////////////////////////////////////////////////////////////// + p[i][j].tx = (image[i+1][j-1] + 2*(double)image[i+1][j] + image[i+1][j+1] + - image[i-1][j-1] - 2*(double)image[i-1][j] - image[i-1][j+1]) / MAX_VAL; + p[i][j].ty = (image[i-1][j+1] + 2*(double)image[i][j+1] + image[i+1][j+1] + - image[i-1][j-1] - 2*(double)image[i][j-1] - image[i+1][j-1]) / MAX_VAL; + ///////////////////////////////////////////// + v[0] = p[i][j].tx; + v[1] = p[i][j].ty; + ////////////////////////////////////////////// + tmp[i][j] = sqrt(p[i][j].tx * p[i][j].tx + p[i][j].ty * p[i][j].ty); + + if (tmp[i][j] > max_grad) { + max_grad = tmp[i][j]; + } + } + } + + for (i = 1; i <= Nr - 2; i++) { + tmp[i][0] = tmp[i][1]; + tmp[i][Nc - 1] = tmp[i][Nc - 2]; + } + + for (j = 1; j <= Nc - 2; j++) { + tmp[0][j] = tmp[1][j]; + tmp[Nr - 1][j] = tmp[Nr - 2][j]; + } + + tmp[0][0] = ( tmp[0][1] + tmp[1][0] ) / 2; + tmp[0][Nc-1] = ( tmp[0][Nc-2] + tmp[1][Nc-1] ) / 2; + tmp[Nr-1][0] = ( tmp[Nr-1][1] + tmp[Nr-2][0] ) / 2; + tmp[Nr - 1][Nc - 1] = ( tmp[Nr - 1][Nc - 2] + tmp[Nr - 2][Nc - 1] ) / 2; + + imatrix gmag(Nr, Nc); + + // normalize the magnitude + for (i = 0; i < Nr; i++) { + for (j = 0; j < Nc; j++) { + tmp[i][j] /= max_grad; + gmag[i][j] = (int) (tmp[i][j] * 255.0); + } + } + + for (i = 1; i < Nr - 1; i++) { + for (j = 1; j < Nc - 1; j++) { + //////////////////////////////////////////////////////////////// + p[i][j].tx = (gmag[i+1][j-1] + 2*(double)gmag[i+1][j] + gmag[i+1][j+1] + - gmag[i-1][j-1] - 2*(double)gmag[i-1][j] - gmag[i-1][j+1]) / MAX_VAL; + p[i][j].ty = (gmag[i-1][j+1] + 2*(double)gmag[i][j+1] + gmag[i+1][j+1] + - gmag[i-1][j-1] - 2*(double)gmag[i][j-1] - gmag[i+1][j-1]) / MAX_VAL; + ///////////////////////////////////////////// + v[0] = p[i][j].tx; + v[1] = p[i][j].ty; + p[i][j].tx = -v[1]; + p[i][j].ty = v[0]; + ////////////////////////////////////////////// + p[i][j].mag = sqrt(p[i][j].tx * p[i][j].tx + p[i][j].ty * p[i][j].ty); + + if (p[i][j].mag > max_grad) { + max_grad = p[i][j].mag; + } + } + } + + for (i = 1; i <= Nr - 2; i++) { + p[i][0].tx = p[i][1].tx; + p[i][0].ty = p[i][1].ty; + p[i][0].mag = p[i][1].mag; + p[i][Nc - 1].tx = p[i][Nc - 2].tx; + p[i][Nc - 1].ty = p[i][Nc - 2].ty; + p[i][Nc - 1].mag = p[i][Nc - 2].mag; + } + + for (j = 1; j <= Nc - 2; j++) { + p[0][j].tx = p[1][j].tx; + p[0][j].ty = p[1][j].ty; + p[0][j].mag = p[1][j].mag; + p[Nr - 1][j].tx = p[Nr - 2][j].tx; + p[Nr - 1][j].ty = p[Nr - 2][j].ty; + p[Nr - 1][j].mag = p[Nr - 2][j].mag; + } + + p[0][0].tx = ( p[0][1].tx + p[1][0].tx ) / 2; + p[0][0].ty = ( p[0][1].ty + p[1][0].ty ) / 2; + p[0][0].mag = ( p[0][1].mag + p[1][0].mag ) / 2; + p[0][Nc-1].tx = ( p[0][Nc-2].tx + p[1][Nc-1].tx ) / 2; + p[0][Nc-1].ty = ( p[0][Nc-2].ty + p[1][Nc-1].ty ) / 2; + p[0][Nc-1].mag = ( p[0][Nc-2].mag + p[1][Nc-1].mag ) / 2; + p[Nr-1][0].tx = ( p[Nr-1][1].tx + p[Nr-2][0].tx ) / 2; + p[Nr-1][0].ty = ( p[Nr-1][1].ty + p[Nr-2][0].ty ) / 2; + p[Nr-1][0].mag = ( p[Nr-1][1].mag + p[Nr-2][0].mag ) / 2; + p[Nr - 1][Nc - 1].tx = ( p[Nr - 1][Nc - 2].tx + p[Nr - 2][Nc - 1].tx ) / 2; + p[Nr - 1][Nc - 1].ty = ( p[Nr - 1][Nc - 2].ty + p[Nr - 2][Nc - 1].ty ) / 2; + p[Nr - 1][Nc - 1].mag = ( p[Nr - 1][Nc - 2].mag + p[Nr - 2][Nc - 1].mag ) / 2; + + normalize(); +} + + +inline void make_unit(double& vx, double& vy) +{ + double mag = sqrt( vx*vx + vy*vy ); + if (mag != 0.0) { + vx /= mag; + vy /= mag; + } +} + +void ETF::normalize() +{ + int i, j; + + for (i = 0; i < Nr; i++) { + for (j = 0; j < Nc; j++) { + make_unit(p[i][j].tx, p[i][j].ty); + p[i][j].mag /= max_grad; + } + } +} + + +void ETF::Smooth(int half_w, int M) +{ + int i, j, k; + int MAX_GRADIENT = -1; + double weight; + int s, t; + int x, y; + double mag_diff; + + int image_x = getRow(); + int image_y = getCol(); + + ETF e2; + + e2.init(image_x, image_y); + e2.copy(*this); + + double v[2], w[2], g[2]; + double angle; + double factor; + + for (k = 0; k < M; k++) { + //////////////////////// + // horizontal + for (j = 0; j < image_y; j++) { + for (i = 0; i < image_x; i++) { + g[0] = g[1] = 0.0; + v[0] = p[i][j].tx; + v[1] = p[i][j].ty; + for (s = -half_w; s <= half_w; s++) { + //////////////////////////////////////// + x = i+s; y = j; + if (x > image_x-1) x = image_x-1; + else if (x < 0) x = 0; + if (y > image_y-1) y = image_y-1; + else if (y < 0) y = 0; + //////////////////////////////////////// + mag_diff = p[x][y].mag - p[i][j].mag; + ////////////////////////////////////////////////////// + w[0] = p[x][y].tx; + w[1] = p[x][y].ty; + //////////////////////////////// + factor = 1.0; + angle = v[0] * w[0] + v[1] * w[1]; + if (angle < 0.0) { + factor = -1.0; + } + weight = mag_diff + 1; + ////////////////////////////////////////////////////// + g[0] += weight * p[x][y].tx * factor; + g[1] += weight * p[x][y].ty * factor; + } + make_unit(g[0], g[1]); + e2[i][j].tx = g[0]; + e2[i][j].ty = g[1]; + } + } + this->copy(e2); + ///////////////////////////////// + // vertical + for (j = 0; j < image_y; j++) { + for (i = 0; i < image_x; i++) { + g[0] = g[1] = 0.0; + v[0] = p[i][j].tx; + v[1] = p[i][j].ty; + for (t = -half_w; t <= half_w; t++) { + //////////////////////////////////////// + x = i; y = j+t; + if (x > image_x-1) x = image_x-1; + else if (x < 0) x = 0; + if (y > image_y-1) y = image_y-1; + else if (y < 0) y = 0; + //////////////////////////////////////// + mag_diff = p[x][y].mag - p[i][j].mag; + ////////////////////////////////////////////////////// + w[0] = p[x][y].tx; + w[1] = p[x][y].ty; + //////////////////////////////// + factor = 1.0; + /////////////////////////////// + angle = v[0] * w[0] + v[1] * w[1]; + if (angle < 0.0) factor = -1.0; + ///////////////////////////////////////////////////////// + weight = mag_diff + 1; + ////////////////////////////////////////////////////// + g[0] += weight * p[x][y].tx * factor; + g[1] += weight * p[x][y].ty * factor; + } + make_unit(g[0], g[1]); + e2[i][j].tx = g[0]; + e2[i][j].ty = g[1]; + } + } + this->copy(e2); + } + //////////////////////////////////////////// +} diff --git a/addons/ofxCv/libs/CLD/src/fdog.cpp b/addons/ofxCv/libs/CLD/src/fdog.cpp new file mode 100755 index 00000000000..d7d5e8b55ca --- /dev/null +++ b/addons/ofxCv/libs/CLD/src/fdog.cpp @@ -0,0 +1,380 @@ +//#include "stdafx.h" +#include "ofMain.h" +#include + +#include "ETF.h" +#include "fdog.h" +#include "myvec.h" +#include "imatrix.h" + +#ifndef ABS + #define ABS(x) ( ((x)>0) ? (x) : (-(x)) ) +#endif +#define round(x) ((int) ((x) + 0.5)) + +inline double gauss(double x, double mean, double sigma) +{ + return ( exp( (-(x-mean)*(x-mean)) / (2*sigma*sigma) ) / sqrt(PI * 2.0 * sigma * sigma) ); +} + +void MakeGaussianVector(double sigma, myvec& GAU) +{ + int i, j; + + double threshold = 0.001; + + i = 0; + while(1) { + i++; + if ( gauss((double)i, 0.0, sigma) < threshold ) + break; + } + GAU.init(i+1); + GAU.zero(); + + GAU[0] = gauss((double)0.0, 0.0, sigma); + for (j = 1; j < GAU.getMax(); j++) { + GAU[j] = gauss((double)j, 0.0, sigma); + } +} + +void GetDirectionalDoG(imatrix& image, ETF& e, mymatrix& dog, myvec& GAU1, myvec& GAU2, double tau) +{ + myvec vn(2); + double x, y, d_x, d_y; + double weight1, weight2, w_sum1, sum1, sum2, w_sum2; + + int s; + int x1, y1; + int i, j; + int dd; + double val; + + int half_w1, half_w2; + + half_w1 = GAU1.getMax()-1; + half_w2 = GAU2.getMax()-1; + + int image_x, image_y; + + image_x = image.getRow(); + image_y = image.getCol(); + + for (i = 0; i < image_x; i++) { + for (j = 0; j < image_y; j++) { + sum1 = sum2 = 0.0; + w_sum1 = w_sum2 = 0.0; + weight1 = weight2 = 0.0; + + vn[0] = -e[i][j].ty; + vn[1] = e[i][j].tx; + + if (vn[0] == 0.0 && vn[1] == 0.0) { + sum1 = 255.0; + sum2 = 255.0; + dog[i][j] = sum1 - tau * sum2; + continue; + } + d_x = i; d_y = j; + //////////////////////////////////////// + for (s = -half_w2; s <= half_w2; s++) { + //////////////////////// + x = d_x + vn[0] * s; + y = d_y + vn[1] * s; + ///////////////////////////////////////////////////// + if (x > (double)image_x-1 || x < 0.0 || y > (double)image_y-1 || y < 0.0) + continue; + x1 = round(x); if (x1 < 0) x1 = 0; if (x1 > image_x-1) x1 = image_x-1; + y1 = round(y); if (y1 < 0) y1 = 0; if (y1 > image_y-1) y1 = image_y-1; + val = image[x1][y1]; + ///////////////////////////////////////////////////////// + dd = ABS(s); + if (dd > half_w1) weight1 = 0.0; + else weight1 = GAU1[dd]; + ////////////////////////////////// + sum1 += val * weight1; + w_sum1 += weight1; + ///////////////////////////////////////////////////// + weight2 = GAU2[dd]; + sum2 += val * weight2; + w_sum2 += weight2; + } + ///////////////////////// + sum1 /= w_sum1; + sum2 /= w_sum2; + ////////////////////////////////////// + dog[i][j] = sum1 - tau * sum2; + } + } + +} + +void GetFlowDoG(ETF& e, mymatrix& dog, mymatrix& tmp, myvec& GAU3) +{ + myvec vt(2); + double x, y, d_x, d_y; + double weight1, w_sum1, sum1; + + int i_x, i_y, k; + int x1, y1; + double val; + int i, j; + + int image_x = dog.getRow(); + int image_y = dog.getCol(); + + int half_l; + half_l = GAU3.getMax()-1; + + int flow_DOG_sign = 0; + + double step_size = 1.0; + + for (i = 0; i < image_x; i++) { + for (j = 0; j < image_y; j++) { + sum1 = 0.0; + w_sum1 = 0.0; + weight1 = 0.0; + ///////////////////////////////// + val = dog[i][j]; + weight1 = GAU3[0]; + sum1 = val * weight1; + w_sum1 += weight1; + //////////////////////////////////////////////// + d_x = (double)i; d_y = (double)j; + i_x = i; i_y = j; + //////////////////////////// + for (k = 0; k < half_l; k++) { + vt[0] = e[i_x][i_y].tx; + vt[1] = e[i_x][i_y].ty; + if (vt[0] == 0.0 && vt[1] == 0.0) { + break; + } + x = d_x; + y = d_y; + ///////////////////////////////////////////////////// + if (x > (double)image_x-1 || x < 0.0 || y > (double)image_y-1 || y < 0.0) + break; + x1 = round(x); if (x1 < 0) x1 = 0; if (x1 > image_x-1) x1 = image_x-1; + y1 = round(y); if (y1 < 0) y1 = 0; if (y1 > image_y-1) y1 = image_y-1; + val = dog[x1][y1]; + ////////////////////////////// + weight1 = GAU3[k]; + //////////////////// + sum1 += val * weight1; + w_sum1 += weight1; + ///////////////////////////////////////// + d_x += vt[0] * step_size; + d_y += vt[1] * step_size; + ///////////////////////////////////////// + i_x = round(d_x); + i_y = round(d_y); + if (d_x < 0 || d_x > image_x-1 || d_y < 0 || d_y > image_y-1) break; + ///////////////////////// + } + //////////////////////////////////////////////// + d_x = (double)i; d_y = (double)j; + i_x = i; i_y = j; + for (k = 0; k < half_l; k++) { + vt[0] = -e[i_x][i_y].tx; + vt[1] = -e[i_x][i_y].ty; + if (vt[0] == 0.0 && vt[1] == 0.0) { + break; + } + x = d_x; + y = d_y; + ///////////////////////////////////////////////////// + if (x > (double)image_x-1 || x < 0.0 || y > (double)image_y-1 || y < 0.0) + break; + x1 = round(x); if (x1 < 0) x1 = 0; if (x1 > image_x-1) x1 = image_x-1; + y1 = round(y); if (y1 < 0) y1 = 0; if (y1 > image_y-1) y1 = image_y-1; + val = dog[x1][y1]; + ////////////////////////////// + weight1 = GAU3[k]; + //////////////////// + sum1 += val * weight1; + w_sum1 += weight1; + ///////////////////////////////////////// + d_x += vt[0] * step_size; + d_y += vt[1] * step_size; + ///////////////////////////////////////// + i_x = round(d_x); + i_y = round(d_y); + if (d_x < 0 || d_x > image_x-1 || d_y < 0 || d_y > image_y-1) break; + ///////////////////////// + } + //////////////////////////////////////// + sum1 /= w_sum1; + ////////////////////////////////////// + if (sum1 > 0) tmp[i][j] = 1.0; + else tmp[i][j] = 1.0 + tanh(sum1); + } + } +} + +void GetFDoG(imatrix& image, ETF& e, double sigma, double sigma3, double tau) +{ + int i, j; + + int image_x = image.getRow(); + int image_y = image.getCol(); + + myvec GAU1, GAU2, GAU3; + MakeGaussianVector(sigma, GAU1); + MakeGaussianVector(sigma*1.6, GAU2); + + int half_w1, half_w2, half_l; + half_w1 = GAU1.getMax()-1; + half_w2 = GAU2.getMax()-1; + + MakeGaussianVector(sigma3, GAU3); + half_l = GAU3.getMax()-1; + + mymatrix tmp(image_x, image_y); + mymatrix dog(image_x, image_y); + + GetDirectionalDoG(image, e, dog, GAU1, GAU2, tau); + GetFlowDoG(e, dog, tmp, GAU3); + + for (i = 0; i < image_x; i++) { + for (j = 0; j < image_y; j++) { + image[i][j] = round(tmp[i][j] * 255.); + } + } +} + +void GaussSmoothSep(imatrix& image, double sigma) +{ + int i, j; + int MAX_GRADIENT = -1; + double g, max_g, min_g; + int s, t; + int x, y; + double weight, w_sum; + + int image_x = image.getRow(); + int image_y = image.getCol(); + + myvec GAU1; + MakeGaussianVector(sigma, GAU1); + int half = GAU1.getMax()-1; + + mymatrix tmp(image_x, image_y); + + max_g = -1; + min_g = 10000000; + for (j = 0; j < image_y; j++) { + for (i = 0; i < image_x; i++) { + g = 0.0; + weight = w_sum = 0.0; + for (s = -half; s <= half; s++) { + x = i+s; y = j; + if (x > image_x-1) x = image_x-1; + else if (x < 0) x = 0; + if (y > image_y-1) y = image_y-1; + else if (y < 0) y = 0; + weight = GAU1[ABS(s)]; + g += weight * image[x][y]; + w_sum += weight; + } + g /= w_sum; + if (g > max_g) max_g = g; + if (g < min_g) min_g = g; + tmp[i][j] = g; + } + } + for (j = 0; j < image_y; j++) { + for (i = 0; i < image_x; i++) { + g = 0.0; + weight = w_sum = 0.0; + for (t = -half; t <= half; t++) { + x = i; y = j+t; + if (x > image_x-1) x = image_x-1; + else if (x < 0) x = 0; + if (y > image_y-1) y = image_y-1; + else if (y < 0) y = 0; + weight = GAU1[ABS(t)]; + g += weight * tmp[x][y]; + w_sum += weight; + } + g /= w_sum; + if (g > max_g) max_g = g; + if (g < min_g) min_g = g; + image[i][j] = round(g); + } + } + + printf("max_g = %f\n", max_g); + printf("min_g = %f\n", min_g); +} + +void ConstructMergedImage(imatrix& image, imatrix& gray, imatrix& merged) +{ + int x, y; + + int image_x = image.getRow(); + int image_y = image.getCol(); + + for (y = 0; y < image_y; y++) { + for (x = 0; x < image_x; x++) { + if (gray[x][y] == 0) merged[x][y] = 0; + else merged[x][y] = image[x][y]; + } + } +} + +void ConstructMergedImageMult(imatrix& image, imatrix& gray, imatrix& merged) +// using multiplication +{ + int x, y; + double gray_val, line_darkness; + + int image_x = image.getRow(); + int image_y = image.getCol(); + + for (y = 0; y < image_y; y++) { + for (x = 0; x < image_x; x++) { + gray_val = image[x][y] / 255.0; + line_darkness = gray[x][y] / 255.0; + gray_val *= line_darkness; + merged[x][y] = round(gray_val * 255.0); + } + } +} + +void Binarize(imatrix& image, double thres) +{ + int i, j; + double val; + + int image_x = image.getRow(); + int image_y = image.getCol(); + + for (i = 0; i < image_x; i++) { + for (j = 0; j < image_y; j++) { + val = image[i][j] / 255.0; + if (val < thres) + image[i][j] = 0; + else image[i][j] = 255; + } + } +} + +void GrayThresholding(imatrix& image, double thres) +{ + int i, j; + double val; + + int image_x = image.getRow(); + int image_y = image.getCol(); + + for (i = 0; i < image_x; i++) { + for (j = 0; j < image_y; j++) { + val = image[i][j] / 255.0; + if (val < thres) + image[i][j] = round(val * 255.0); + else image[i][j] = 255; + } + } +} + diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Calibration.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Calibration.h new file mode 100644 index 00000000000..1aa112ca428 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Calibration.h @@ -0,0 +1,132 @@ +/* + this class handles per-camera intrinsic calibration and undistortion. + given a series of chessboard images, it will calculate the intrinsics. + to use it: + + 0 either load() from a yml file (skip to 5), + or do the calibration as follows + 1 set the board and physical square size of the chess board. whatever + if your squares are in mm, your focal length will also be in mm. + 2 add() each image containing a chess board + 3 when all the images are added, call calibrate() + 4 now you can save() a yml calibration file + 5 now you can undistort() incoming images. + + to do inter-camera (extrinsics) calibration, you need to first calibrate + each camera individually. then use getTransformation to determine the + rotation and translation from camera to another. + */ + +#pragma once + +#include "ofxCv.h" +#include "ofNode.h" + +namespace ofxCv { + class Intrinsics { + public: + void setup(float focalLengthMm, cv::Size imageSizePx, cv::Size2f sensorSizeMm, cv::Point2d principalPointPct = cv::Point2d(.5,.5)); + void setup(cv::Mat cameraMatrix, cv::Size imageSizePx, cv::Size2f sensorSizeMm = cv::Size2f(0, 0)); + void setImageSize(cv::Size imgSize); + cv::Mat getCameraMatrix() const; + cv::Size getImageSize() const; + cv::Size2f getSensorSize() const; + cv::Point2d getFov() const; + double getFocalLength() const; + double getAspectRatio() const; + cv::Point2d getPrincipalPoint() const; + void loadProjectionMatrix(float nearDist = 10., float farDist = 10000., cv::Point2d viewportOffset = cv::Point2d(0, 0)) const; + protected: + void updateValues(); + cv::Mat cameraMatrix; + cv::Size imageSize; + cv::Size2f sensorSize; + cv::Point2d fov; + double focalLength, aspectRatio; + cv::Point2d principalPoint; + }; + + enum CalibrationPattern {CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID}; + + class Calibration : public ofNode { + public: + Calibration(); + + void save(std::string filename, bool absolute = false) const; + void load(std::string filename, bool absolute = false); + void reset(); + + void setPatternType(CalibrationPattern patternType); + void setPatternSize(int xCount, int yCount); + void setSquareSize(float squareSize); + /// set this to the pixel size of your smallest square. default is 11 + void setSubpixelSize(int subpixelSize); + + bool add(cv::Mat img); + bool clean(float minReprojectionError = 2.f); + bool calibrate(); + bool calibrateFromDirectory(std::string directory); + bool findBoard(cv::Mat img, std::vector &pointBuf, bool refine = true); + void setIntrinsics(Intrinsics& distortedIntrinsics); + void setDistortionCoefficients(float k1, float k2, float p1, float p2, float k3=0, float k4=0, float k5=0, float k6=0); + + void undistort(cv::Mat img, int interpolationMode = cv::INTER_NEAREST); + void undistort(cv::Mat src, cv::Mat dst, int interpolationMode = cv::INTER_NEAREST); + + ofVec2f undistort(ofVec2f& src) const; + void undistort(std::vector& src, std::vector& dst) const; + + bool getTransformation(Calibration& dst, cv::Mat& rotation, cv::Mat& translation); + + float getReprojectionError() const; + float getReprojectionError(int i) const; + + const Intrinsics& getDistortedIntrinsics() const; + const Intrinsics& getUndistortedIntrinsics() const; + cv::Mat getDistCoeffs() const; + + // if you want a wider fov, say setFillFrame(false) before load() or calibrate() + void setFillFrame(bool fillFrame); + + int size() const; + cv::Size getPatternSize() const; + float getSquareSize() const; + static std::vector createObjectPoints(cv::Size patternSize, float squareSize, CalibrationPattern patternType); + + void customDraw(); + void draw(int i) const; + void draw3d() const; + void draw3d(int i) const; + + bool isReady(); + std::vector > imagePoints; + + protected: + CalibrationPattern patternType; + cv::Size patternSize, addedImageSize, subpixelSize; + float squareSize; + cv::Mat grayMat; + + cv::Mat distCoeffs; + + std::vector boardRotations, boardTranslations; + std::vector > objectPoints; + + float reprojectionError; + std::vector perViewErrors; + + bool fillFrame; + cv::Mat undistortBuffer; + cv::Mat undistortMapX, undistortMapY; + + void updateObjectPoints(); + void updateReprojectionError(); + void updateUndistortion(); + + Intrinsics distortedIntrinsics; + Intrinsics undistortedIntrinsics; + + bool ready; + }; + +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/ContourFinder.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/ContourFinder.h new file mode 100644 index 00000000000..2fc5ef34796 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/ContourFinder.h @@ -0,0 +1,116 @@ +/* + the contour finder will automatically convert and threshold your image for you. + by default, it finds bright regions. to find dark regions call setInvert(true). + to track a color, call setTargetColor(). by default, it tracks in RGB space. + to track in HSV or just hue space, pass TRACK_COLOR_HSV or TRACK_COLOR_H. + to change the threshold value, use setThreshold(). by default, the threshold is + 128. when finding bright regions, 128 is halfway between white and black. when + tracking a color, 0 means "exactly similar" and 255 is "all colors". + + by default, the results are unfiltered by area. to filter by area use one of + set(Min/Max)(Area/Radius/Norm) functions. set(Min/Max)Area is in pixels. + set(Min/Max)Radius uses the area of a circle with the given radius for a more + "linear" feeling. set(Min/Max)Norm uses values between (0-1) and multiplies + by the input image area. to reset the min/max area call reset(Min/Max)Area. + + keeping with the ofxCv philosophy, no new objects (like ofxCvBlob) are used. + you can get contours as vector or ofPolyline. for other features, + you can use methods of ofPolyline (getArea(), getPerimiter()) or cv methods + by asking ContourFinder (getContourArea(), getArcLength()). + */ + +// to implement in ContourFinder: +// holes/no holes +// CV_THRESH_OTSU? +// cv::pointPolygonTest - inside, edge, outside +// cv::matchShapes - similarity between two contours +// cv::estimateRigidTransform? subdivision-based estimation for outline-flow? + +#pragma once + +#include "ofxCv/Utilities.h" +#include "ofxCv/Tracker.h" + +namespace ofxCv { + + enum TrackingColorMode {TRACK_COLOR_RGB, TRACK_COLOR_HSV, TRACK_COLOR_H, TRACK_COLOR_HS}; + + class ContourFinder { + public: + ContourFinder(); + + template + void findContours(T& img) { + findContours(toCv(img)); + } + void findContours(cv::Mat img); + const std::vector >& getContours() const; + const std::vector& getPolylines() const; + const std::vector& getBoundingRects() const; + + unsigned int size() const; + std::vector& getContour(unsigned int i); + ofPolyline& getPolyline(unsigned int i); + + cv::Rect getBoundingRect(unsigned int i) const; + cv::Point2f getCenter(unsigned int i) const; // center of bounding box (most stable) + cv::Point2f getCentroid(unsigned int i) const; // center of mass (less stable) + cv::Point2f getAverage(unsigned int i) const; // average of contour vertices (least stable) + cv::Vec2f getBalance(unsigned int i) const; // difference between centroid and center + double getContourArea(unsigned int i) const; + double getArcLength(unsigned int i) const; + std::vector getConvexHull(unsigned int i) const; + std::vector getConvexityDefects(unsigned int i) const; + cv::RotatedRect getMinAreaRect(unsigned int i) const; + cv::Point2f getMinEnclosingCircle(unsigned int i, float& radius) const; + cv::RotatedRect getFitEllipse(unsigned int i) const; + std::vector getFitQuad(unsigned int i) const; + cv::Vec2f getVelocity(unsigned int i) const; + + RectTracker& getTracker(); + unsigned int getLabel(unsigned int i) const; + + void setThreshold(float thresholdValue); + void setAutoThreshold(bool autoThreshold); + void setInvert(bool invert); + void setUseTargetColor(bool useTargetColor); + void setTargetColor(ofColor targetColor, TrackingColorMode trackingColorMode = TRACK_COLOR_RGB); + void setFindHoles(bool findHoles); + void setSortBySize(bool sortBySize); + + void resetMinArea(); + void resetMaxArea(); + void setMinArea(float minArea); + void setMaxArea(float maxArea); + void setMinAreaRadius(float minAreaRadius); + void setMaxAreaRadius(float maxAreaRadius); + void setMinAreaNorm(float minAreaNorm); + void setMaxAreaNorm(float maxAreaNorm); + + void setSimplify(bool simplify); + + void draw(); + + protected: + cv::Mat hsvBuffer, thresh; + bool autoThreshold, invert, simplify; + float thresholdValue; + + bool useTargetColor; + TrackingColorMode trackingColorMode; + ofColor targetColor; + + float minArea, maxArea; + bool minAreaNorm, maxAreaNorm; + + std::vector > contours; + std::vector polylines; + + RectTracker tracker; + std::vector boundingRects; + + int contourFindingMode; + bool sortBySize; + }; + +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Distance.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Distance.h new file mode 100644 index 00000000000..b6fd4104d09 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Distance.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include +#include + +namespace ofxCv { + + // edit distance is the number of transformations required to turn one string into another + int editDistance(const std::string& a, const std::string& b); + + // cross correlation using edit distance gives the most representative string from a set + const std::string& mostRepresentative(const std::vector& strs); +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Flow.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Flow.h new file mode 100644 index 00000000000..66e281fdda1 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Flow.h @@ -0,0 +1,157 @@ +#pragma once + +#include "ofxCv.h" + +namespace ofxCv { + + class Flow { + public: + // should constructor be protected? + Flow(); + virtual ~Flow(); + + //call these functions to calculate flow on sequential images. + //After this call the flow field will be populated and + //subsequent calls to getFlow() will be updated + + //call with two contiguous images + template + void calcOpticalFlow(T& lastImage, T& currentImage) { + calcOpticalFlow(toCv(lastImage), toCv(currentImage)); + } + void calcOpticalFlow(cv::Mat lastImage, cv::Mat currentImage); + + //call with subsequent images to do running optical flow. + //the Flow class internally stores the last image for convenience + template + void calcOpticalFlow(T& currentImage) { + calcOpticalFlow(toCv(currentImage)); + } + void calcOpticalFlow(cv::Mat nextImage); + + void draw(); + void draw(float x, float y); + void draw(float x, float y, float width, float height); + void draw(ofRectangle rect); + int getWidth(); + int getHeight(); + + virtual void resetFlow(); + + private: + cv::Mat last, curr; + + protected: + bool hasFlow; + + //specific flow implementation + virtual void calcFlow(cv::Mat prev, cv::Mat next) = 0; + //specific drawing implementation + virtual void drawFlow(ofRectangle r) = 0; + }; + + //there are two implementations of Flow + //use Farneback for a dense flow field, + //use PyrLK for specific features + + //see http://opencv.willowgarage.com/documentation/cpp/motion_analysis_and_object_tracking.html + //for more info on the meaning of these parameters + + class FlowPyrLK : public Flow { + public: + FlowPyrLK(); + virtual ~FlowPyrLK(); + + //flow parameters + void setMinDistance(int minDistance); + void setWindowSize(int winsize); + + //feature finding parameters + void setMaxLevel(int maxLevel); + void setMaxFeatures(int maxFeatures); + void setQualityLevel(float qualityLevel); + void setPyramidLevels(int levels); + + //returns tracking features for this image + std::vector getFeatures(); + std::vector getCurrent(); + std::vector getMotion(); + + // recalculates features to track + void resetFeaturesToTrack(); + void setFeaturesToTrack(const std::vector & features); + void setFeaturesToTrack(const std::vector & features); + void resetFlow(); + protected: + + void drawFlow(ofRectangle r); + void calcFlow(cv::Mat prev, cv::Mat next); + void calcFeaturesToTrack(std::vector & features, cv::Mat next); + + std::vector prevPts, nextPts; + + //LK feature finding parameters + int windowSize; + int maxLevel; + int maxFeatures; + float qualityLevel; + + //min distance for PyrLK + int minDistance; + + //pyramid levels + int pyramidLevels; + + bool calcFeaturesNextFrame; + + //pyramid + err/status data + std::vector pyramid; + std::vector prevPyramid; + std::vector status; + std::vector err; + }; + + class FlowFarneback : public Flow { + public: + + FlowFarneback(); + virtual ~FlowFarneback(); + + //see http://opencv.willowgarage.com/documentation/cpp/motion_analysis_and_object_tracking.html + //for a description of these parameters + + void setPyramidScale(float scale); + void setNumLevels(int levels); + void setWindowSize(int winsize); + void setNumIterations(int interations); + void setPolyN(int polyN); + void setPolySigma(float polySigma); + void setUseGaussian(bool gaussian); + + cv::Mat& getFlow(); + ofVec2f getTotalFlow(); + ofVec2f getAverageFlow(); + ofVec2f getFlowOffset(int x, int y); + ofVec2f getFlowPosition(int x, int y); + ofVec2f getTotalFlowInRegion(ofRectangle region); + ofVec2f getAverageFlowInRegion(ofRectangle region); + + //call this if you switch to a new video file to reset internal caches + void resetFlow(); + + protected: + cv::Mat flow; + + void drawFlow(ofRectangle rect); + void calcFlow(cv::Mat prev, cv::Mat next); + + float pyramidScale; + int numLevels; + int windowSize; + int numIterations; + int polyN; + float polySigma; + bool farnebackGaussian; + }; + +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Helpers.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Helpers.h new file mode 100644 index 00000000000..ff8880173d7 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Helpers.h @@ -0,0 +1,232 @@ +/* + helpers offer new, commonly-needed functionality that is not quite present in + OpenCv or openFrameworks. + */ + +#pragma once + +#include "opencv2/opencv.hpp" +#include "ofVectorMath.h" +#include "ofRectangle.h" +#include "ofColor.h" + +namespace ofxCv { + + ofMatrix4x4 makeMatrix(cv::Mat rotation, cv::Mat translation); + void applyMatrix(const ofMatrix4x4& matrix); + + void drawMat(cv::Mat& mat, float x, float y); + void drawMat(cv::Mat& mat, float x, float y, float width, float height); + + template + ofVec2f findMaxLocation(T& img) { + cv::Mat mat = toCv(img); + double minVal, maxVal; + cv::Point minLoc, maxLoc; + minMaxLoc(mat, &minVal, &maxVal, &minLoc, &maxLoc); + return ofVec2f(maxLoc.x, maxLoc.y); + } + + template + cv::Mat meanCols(T& img) { + cv::Mat mat = toCv(img); + cv::Mat colMat(mat.cols, 1, mat.type()); + for(int i = 0; i < mat.cols; i++) { + colMat.row(i) = mean(mat.col(i)); + } + return colMat; + } + + template + cv::Mat meanRows(T& img) { + cv::Mat mat = toCv(img); + cv::Mat rowMat(mat.rows, 1, mat.type()); + for(int i = 0; i < mat.rows; i++) { + rowMat.row(i) = mean(mat.row(i)); + } + return rowMat; + } + + template + cv::Mat sumCols(T& img) { + cv::Mat mat = toCv(img); + cv::Mat colMat(mat.cols, 1, CV_32FC1); + for(int i = 0; i < mat.cols; i++) { + colMat.row(i) = sum(mat.col(i)); + } + return colMat; + } + + template + cv::Mat sumRows(T& img) { + cv::Mat mat = toCv(img); + cv::Mat rowMat(mat.rows, 1, CV_32FC1); + for(int i = 0; i < mat.rows; i++) { + rowMat.row(i) = sum(mat.row(i)); + } + return rowMat; + } + + template + cv::Mat minCols(T& img) { + cv::Mat mat = toCv(img); + cv::Mat colMat(mat.cols, 1, CV_32FC1); + double minVal, maxVal; + for(int i = 0; i < mat.cols; i++) { + minMaxLoc(mat.col(i), &minVal, &maxVal); + colMat.row(i) = minVal; + } + return colMat; + } + + template + cv::Mat minRows(T& img) { + cv::Mat mat = toCv(img); + cv::Mat rowMat(mat.rows, 1, CV_32FC1); + double minVal, maxVal; + for(int i = 0; i < mat.rows; i++) { + minMaxLoc(mat.row(i), &minVal, &maxVal); + rowMat.row(i) = minVal; + } + return rowMat; + } + + template + cv::Mat maxCols(T& img) { + cv::Mat mat = toCv(img); + cv::Mat colMat(mat.cols, 1, CV_32FC1); + double minVal, maxVal; + for(int i = 0; i < mat.cols; i++) { + minMaxLoc(mat.col(i), &minVal, &maxVal); + colMat.row(i) = maxVal; + } + return colMat; + } + + template + cv::Mat maxRows(T& img) { + cv::Mat mat = toCv(img); + cv::Mat rowMat(mat.rows, 1, CV_32FC1); + double minVal, maxVal; + for(int i = 0; i < mat.rows; i++) { + minMaxLoc(mat.row(i), &minVal, &maxVal); + rowMat.row(i) = maxVal; + } + return rowMat; + } + + int findFirst(const cv::Mat& arr, unsigned char target); + int findLast(const cv::Mat& arr, unsigned char target); + + template + void getBoundingBox(T& img, ofRectangle& box, int thresh, bool invert) { + cv::Mat mat = toCv(img); + int flags = (invert ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY); + + cv::Mat rowMat = meanRows(mat); + threshold(rowMat, rowMat, thresh, 255, flags); + box.y = findFirst(rowMat, 255); + box.height = findLast(rowMat, 255); + box.height -= box.y; + + cv::Mat colMat = meanCols(mat); + threshold(colMat, colMat, thresh, 255, flags); + box.x = findFirst(colMat, 255); + box.width = findLast(colMat, 255); + box.width -= box.x; + } + + float weightedAverageAngle(const vector& lines); + + // (nearest point) to the two given lines + template + cv::Point3_ intersectLineLine(cv::Point3_ lineStart1, cv::Point3_ lineEnd1, cv::Point3_ lineStart2, cv::Point3_ lineEnd2) { + cv::Point3_ v1(lineEnd1 - lineStart1), v2(lineEnd2 - lineStart2); + T v1v1 = v1.dot(v1), v2v2 = v2.dot(v2), v1v2 = v1.dot(v2), v2v1 = v2.dot(v1); + cv::Mat_ lambda = (1. / (v1v1 * v2v2 - v1v2 * v1v2)) + * ((cv::Mat_(2, 2) << v2v2, v1v2, v2v1, v1v1) + * (cv::Mat_(2, 1) << v1.dot(lineStart2 - lineStart1), v2.dot(lineStart1 - lineStart2))); + return (1./2) * ((lineStart1 + v1 * lambda(0)) + (lineStart2 + v2 * lambda(1))); + } + + // (nearest point on a line) to the given point + template + cv::Point3_ intersectPointLine(cv::Point3_ point, cv::Point3_ lineStart, cv::Point3_ lineEnd) { + cv::Point3_ ray = lineEnd - lineStart; + T u = (point - lineStart).dot(ray) / ray.dot(ray); + return lineStart + u * ray; + } + + // (nearest point on a ray) to the given point + template + cv::Point3_ intersectPointRay(cv::Point3_ point, cv::Point3_ ray) { + return ray * (point.dot(ray) / ray.dot(ray)); + } + + // morphological thinning, also called skeletonization, strangely missing from opencv + // here is a description of the algorithm http://homepages.inf.ed.ac.uk/rbf/HIPR2/thin.htm + template + void thin(T& img) { + cv::Mat mat = toCv(img); + int w = mat.cols, h = mat.rows; + int ia1=-w-1,ia2=-w-0,ia3=-w+1,ib1=-0-1,ib3=-0+1,ic1=+w-1,ic2=+w-0,ic3=+w+1; + unsigned char* p = mat.ptr(); + vector q; + for(int y = 1; y + 1 < h; y++) { + for(int x = 1; x + 1 < w; x++) { + int i = y * w + x; + if(p[i]) { + q.push_back(i); + } + } + } + int n = q.size(); + for(int i=0;i& lines); + + // finds the average angle of hough lines, unrotates by that amount and + // returns the average rotation. you can supply your own thresholded image + // for hough lines, or let it run canny detection for you. + template + float autorotate(S& src, D& dst, float threshold1 = 50, float threshold2 = 200) { + cv::Mat thresh; + cv::Canny(src, thresh, threshold1, threshold2); + return autorotate(src, thresh, dst); + } + + template + float autorotate(S& src, T& thresh, D& dst) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), threshMat = toCv(thresh); + std::vector lines; + double distanceResolution = 1; + double angleResolution = CV_PI / 180; + // these three values are just heuristics that have worked for me + int voteThreshold = 10; + double minLineLength = (srcMat.rows + srcMat.cols) / 8; + double maxLineGap = 3; + HoughLinesP(threshMat, lines, distanceResolution, angleResolution, voteThreshold, minLineLength, maxLineGap); + float rotationAmount = ofRadToDeg(weightedAverageAngle(lines)); + rotate(src, dst, rotationAmount); + return rotationAmount; + } + + std::vector getConvexPolygon(const std::vector& convexHull, int targetPoints); + + static const ofColor cyanPrint = ofColor::fromHex(0x00abec); + static const ofColor magentaPrint = ofColor::fromHex(0xec008c); + static const ofColor yellowPrint = ofColor::fromHex(0xffee00); + + void drawHighlightString(string text, ofPoint position, ofColor background = ofColor::black, ofColor foreground = ofColor::white); + void drawHighlightString(string text, int x, int y, ofColor background = ofColor::black, ofColor foreground = ofColor::white); +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Kalman.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Kalman.h new file mode 100644 index 00000000000..774931ef540 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Kalman.h @@ -0,0 +1,38 @@ +#pragma once + +#include "ofxCv.h" +#include "ofVectorMath.h" + +namespace ofxCv { + + // Kalman filter for positioning + template + class KalmanPosition_ { + cv::KalmanFilter KF; + cv::Mat_ measurement, prediction, estimated; + public: + // smoothness, rapidness: smaller is more smooth/rapid + // bUseAccel: set true to smooth out velocity + void init(T smoothness = 0.1, T rapidness = 0.1, bool bUseAccel = false); + void update(const ofVec3f&); + ofVec3f getPrediction(); + ofVec3f getEstimation(); + ofVec3f getVelocity(); + }; + + typedef KalmanPosition_ KalmanPosition; + + // Kalman filter for orientation + template + class KalmanEuler_ : public KalmanPosition_ { + ofVec3f eulerPrev; // used for finding appropriate dimension + public: + void init(T smoothness = 0.1, T rapidness = 0.1, bool bUseAccel = false); + void update(const ofQuaternion&); + ofQuaternion getPrediction(); + ofQuaternion getEstimation(); + //ofQuaternion getVelocity(); + }; + + typedef KalmanEuler_ KalmanEuler; +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/ObjectFinder.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/ObjectFinder.h new file mode 100644 index 00000000000..db2733b02c8 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/ObjectFinder.h @@ -0,0 +1,77 @@ +/* + this class is good for tracking things like faces. usually you want to speed + up the tracking by doing detection on a smaller image, say setRescale(.25) for + example to track on a 1/4 size image at a higher fps. other options for faster + tracking include setCannyPruning(true) which will ignore low contrast regions, + setFindBiggestObject(true) which finds the biggest object and returns it alone. + it's rarely faster to do both of these together (learning opencv p 513). + setMultiScaleFactor() provides a tradeoff between scale accuracy and speed. + setMultiScaleFactor(1.01) will give a very accurate size for detected objects + but will take longer to run. setMinNeighbors() tries to group multiple results + from the tracker into a single result, and rejects anything that doesn't have + enough results. setMinSizeScale() and setMaxSizeScale() set the minimum and + maximum size for searching the space. you can set all these parameters at once + with a preset using (for example) setPreset(ObjectFinder::Fast). + + need to add: + - allow rotations + */ + +#pragma once + +#include "ofxCv/Utilities.h" +#include "ofxCv/Tracker.h" +#include "ofRectangle.h" + +#include "ofxCv.h" +namespace ofxCv { + class ObjectFinder { + public: + + ObjectFinder(); + void setup(string cascadeFilename); + template + void update(T& img) { + update(toCv(img)); + } + void update(cv::Mat img); + unsigned int size() const; + ofRectangle getObject(unsigned int i) const; + ofRectangle getObjectSmoothed(unsigned int i) const; + RectTracker& getTracker(); + unsigned int getLabel(unsigned int i) const; + cv::Vec2f getVelocity(unsigned int i) const; + void draw() const; + + enum Preset {Fast, Accurate, Sensitive}; + void setPreset(ObjectFinder::Preset preset); + + void setRescale(float rescale); + void setMinNeighbors(int minNeighbors); + void setMultiScaleFactor(float multiScaleFactor); + void setCannyPruning(bool cannyPruning); + void setFindBiggestObject(bool findBiggestObject); + void setUseHistogramEqualization(bool useHistogramEqualization); + void setMinSizeScale(float minSizeScale); + void setMaxSizeScale(float maxSizeScale); + + float getRescale() const; + int getMinNeighbors() const; + float getMultiScaleFactor() const; + bool getCannyPruning() const; + bool getFindBiggestObject() const; + bool getUseHistogramEqualization() const; + float getMinSizeScale() const; + float getMaxSizeScale() const; + + protected: + float rescale, multiScaleFactor; + int minNeighbors; + bool useHistogramEqualization, cannyPruning, findBiggestObject; + float minSizeScale, maxSizeScale; + cv::Mat gray, graySmall; + cv::CascadeClassifier classifier; + std::vector objects; + RectTracker tracker; + }; +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/RunningBackground.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/RunningBackground.h new file mode 100644 index 00000000000..ad0f412fb7f --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/RunningBackground.h @@ -0,0 +1,66 @@ +/* + this class is designed to handle the common task of detecting foreground + objects in a complex scene by subtracting a known background. a good summary + of different background subtraction techniques is available at: + + http://www-staff.it.uts.edu.au/~massimo/BackgroundSubtractionReview-Piccardi.pdf + + this class only implements the running average technique. this technique is + also described in the opencv 2 cookbook in chapter 10, under "extracting the + foreground objects in video". the example ignores foreground pixels during + accumulation. this class uses all pixels by default, but can be set to ignore + foreground pixels by calling setIgnoreForeground(true). + + learningRate determines how quickly the background is learned. a smaller value + means the background takes longer to learn. default the learningRate is .0001 + and you can use setLearningRate() to change this. + + guessing a learningRate can be hard, because it's related to the threshold and + your camera framerate. setLearningTime() will let you set the learningRate + in terms of frames. larger values meaning the background takes longer to learn. + a learning time of 900 means a 30 fps camera would take 30 seconds before + a foreground object could leave a "shadow", or "trace" in the background + that appears as foreground after thresholding. in practice, it will only take + this amount of time exactly when the background is completely black and the + foreground is completely white. most of the time it will take longer than + learningTime, so it's safe to under-shoot. + + to do: + - use hsb space, or sb space for differencing (like ContourFinder) + */ + +#pragma once + +#include "ofxCv/Utilities.h" + +namespace ofxCv { + class RunningBackground { + public: + enum DifferenceMode {ABSDIFF, BRIGHTER, DARKER}; + + RunningBackground(); + template + void update(F& frame, T& thresholded) { + ofxCv::imitate(thresholded, frame, CV_8UC1); + cv::Mat frameMat = toCv(frame); + cv::Mat thresholdedMat = toCv(thresholded); + update(frameMat, thresholdedMat); + } + void update(cv::Mat frame, cv::Mat& thresholded); + cv::Mat& getBackground(); + cv::Mat& getForeground(); + float getPresence() const; + void setThresholdValue(unsigned int thresholdValue); + void setLearningRate(double learningRate); + void setLearningTime(double learningTime); + void setIgnoreForeground(bool ignoreForeground); + void setDifferenceMode(DifferenceMode differenceMode); + void reset(); + protected: + cv::Mat accumulator, background, foreground, foregroundGray; + double learningRate, learningTime; + unsigned int thresholdValue; + bool useLearningTime, needToReset, ignoreForeground; + DifferenceMode differenceMode; + }; +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Tracker.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Tracker.h new file mode 100644 index 00000000000..b1ce13b6d4b --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Tracker.h @@ -0,0 +1,440 @@ +/* + the tracker is used for tracking the identities of a collection of objects that + change slightly over time. example applications are in contour tracking and + face tracking. when using a tracker, the two most important things to know are + the persistence and maximumDistance. persistence determines how many frames an + object can last without being seen until the tracker forgets about it. + maximumDistance determines how far an object can move until the tracker + considers it a new object. + + the default trackers are for cv::Rect and cv::Point2f (RectTracker and + PointTracker). to create a new kind of tracker, you need to add a + trackingDistance() function that returns the distance between two tracked + objects. + + the tracking algorithm calls the distance function approximately n^2 times. + it then filters the distances using maximumDistance, which can significantly + reduce the possible matches. then it sorts the distance using std::sort, which + runs in nlogn time. the primary bottleneck for most data is the distance + function. in practical terms, the tracker can become non-realtime when + tracking more than a few hundred objects. to optimize the tracker, consider + avoiding usage of sqrt() in the trackingDistance() function. + + this tracker doesn't find a global minimum, but a local minimum. for example: + when a dense set of points moves farther than the average point-to-point radius + (like a line of points 5 pixels apart moving up and to the right 5 pixels). it + also fails to model the data, so two objects might be swapped if they cross + paths quickly. + + usually you don't just want to know the labels of tracked objects, but you also + want to maintain a collection of your own objects that are paired with those + tracked/labeled objects. use the TrackerFollower extension for this: create + your own MyFollower extending Follower, and create TrackerFollower. + + for example: + class MyFollower : public ofxCv::PointFollower { ... } + ofxCv::PointTrackerFollower tracker; + + then whenever you call tracker.track(), the tracker will maintain a list of + MyFollower objects internally: when a new label is created, it will call + MyFollower::setup(), when an old label is updated MyFollower::update(), + then when a label has been lost it will switch to MyFollower::kill(). when + MyFollower::getDead() is true, the MyFollower object will be removed. + */ + +#pragma once + +#include "opencv2/opencv.hpp" +#include +#include +#include "ofMath.h" + +namespace ofxCv { + float trackingDistance(const cv::Rect& a, const cv::Rect& b); + float trackingDistance(const cv::Point2f& a, const cv::Point2f& b); + + template + class TrackedObject { + protected: + unsigned int lastSeen, label, age; + int index; + public: + T object; + + TrackedObject(const T& object, unsigned int label, int index) + :lastSeen(0) + ,label(label) + ,age(0) + ,index(index) + ,object(object){ + } + TrackedObject(const T& object, const TrackedObject& previous, int index) + :lastSeen(0) + ,label(previous.label) + ,age(previous.age) + ,index(index) + ,object(object){ + } + TrackedObject(const TrackedObject& old) + :lastSeen(old.lastSeen) + ,label(old.label) + ,age(old.age) + ,index(-1) + ,object(old.object){ + } + void timeStep(bool visible) { + age++; + if(!visible) { + lastSeen++; + } + } + unsigned int getLastSeen() const { + return lastSeen; + } + unsigned long getAge() const { + return age; + } + unsigned int getLabel() const { + return label; + } + int getIndex() const { + return index; + } + }; + + struct bySecond { + template + bool operator()(std::pair const &a, std::pair const &b) { + return a.second < b.second; + } + }; + + template + class Tracker { + protected: + vector > previous, current; + vector currentLabels, previousLabels, newLabels, deadLabels; + std::map*> previousLabelMap, currentLabelMap; + + unsigned int persistence, curLabel; + float maximumDistance; + unsigned int getNewLabel() { + return curLabel++; + } + + public: + Tracker() + :persistence(15) + ,curLabel(0) + ,maximumDistance(64) { + } + virtual ~Tracker(){}; + void setPersistence(unsigned int persistence); + void setMaximumDistance(float maximumDistance); + virtual const std::vector& track(const std::vector& objects); + + // organized in the order received by track() + const std::vector& getCurrentLabels() const; + const std::vector& getPreviousLabels() const; + const std::vector& getNewLabels() const; + const std::vector& getDeadLabels() const; + unsigned int getLabelFromIndex(unsigned int i) const; + + // organized by label + int getIndexFromLabel(unsigned int label) const; + const T& getPrevious(unsigned int label) const; + const T& getCurrent(unsigned int label) const; + bool existsCurrent(unsigned int label) const; + bool existsPrevious(unsigned int label) const; + int getAge(unsigned int label) const; + int getLastSeen(unsigned int label) const; + }; + + template + void Tracker::setPersistence(unsigned int persistence) { + this->persistence = persistence; + } + + template + void Tracker::setMaximumDistance(float maximumDistance) { + this->maximumDistance = maximumDistance; + } + + template + const std::vector& Tracker::track(const std::vector& objects) { + previous = current; + int n = objects.size(); + int m = previous.size(); + + // build NxM distance matrix + typedef std::pair MatchPair; + typedef std::pair MatchDistancePair; + std::vector all; + for(int i = 0; i < n; i++) { + for(int j = 0; j < m; j++) { + float curDistance = trackingDistance(objects[i], previous[j].object); + if(curDistance < maximumDistance) { + all.push_back(MatchDistancePair(MatchPair(i, j), curDistance)); + } + } + } + + // sort all possible matches by distance + sort(all.begin(), all.end(), bySecond()); + + previousLabels = currentLabels; + currentLabels.clear(); + currentLabels.resize(n); + current.clear(); + std::vector matchedObjects(n, false); + std::vector matchedPrevious(m, false); + // walk through matches in order + for(int k = 0; k < (int)all.size(); k++) { + MatchPair& match = all[k].first; + int i = match.first; + int j = match.second; + // only use match if both objects are unmatched, lastSeen is set to 0 + if(!matchedObjects[i] && !matchedPrevious[j]) { + matchedObjects[i] = true; + matchedPrevious[j] = true; + int index = current.size(); + current.push_back(TrackedObject(objects[i], previous[j], index)); + current.back().timeStep(true); + currentLabels[i] = current.back().getLabel(); + } + } + + // create new labels for new unmatched objects, lastSeen is set to 0 + newLabels.clear(); + for(int i = 0; i < n; i++) { + if(!matchedObjects[i]) { + int curLabel = getNewLabel(); + int index = current.size(); + current.push_back(TrackedObject(objects[i], curLabel, index)); + current.back().timeStep(true); + currentLabels[i] = curLabel; + newLabels.push_back(curLabel); + } + } + + // copy old unmatched objects if young enough, lastSeen is increased + deadLabels.clear(); + for(int j = 0; j < m; j++) { + if(!matchedPrevious[j]) { + if(previous[j].getLastSeen() < persistence) { + current.push_back(previous[j]); + current.back().timeStep(false); + } + deadLabels.push_back(previous[j].getLabel()); + } + } + + // build label maps + currentLabelMap.clear(); + for(int i = 0; i < (int)current.size(); i++) { + unsigned int label = current[i].getLabel(); + currentLabelMap[label] = &(current[i]); + } + previousLabelMap.clear(); + for(int i = 0; i < (int)previous.size(); i++) { + unsigned int label = previous[i].getLabel(); + previousLabelMap[label] = &(previous[i]); + } + + return currentLabels; + } + + template + const std::vector& Tracker::getCurrentLabels() const { + return currentLabels; + } + + template + const std::vector& Tracker::getPreviousLabels() const { + return previousLabels; + } + + template + const std::vector& Tracker::getNewLabels() const { + return newLabels; + } + + template + const vector& Tracker::getDeadLabels() const { + return deadLabels; + } + + template + unsigned int Tracker::getLabelFromIndex(unsigned int i) const { + return currentLabels[i]; + } + + template + int Tracker::getIndexFromLabel(unsigned int label) const { + return currentLabelMap.find(label)->second->getIndex(); + } + + template + const T& Tracker::getPrevious(unsigned int label) const { + return previousLabelMap.find(label)->second->object; + } + + template + const T& Tracker::getCurrent(unsigned int label) const { + return currentLabelMap.find(label)->second->object; + } + + template + bool Tracker::existsCurrent(unsigned int label) const { + return currentLabelMap.count(label) > 0; + } + + template + bool Tracker::existsPrevious(unsigned int label) const { + return previousLabelMap.count(label) > 0; + } + + template + int Tracker::getAge(unsigned int label) const{ + return currentLabelMap.find(label)->second->getAge(); + } + + template + int Tracker::getLastSeen(unsigned int label) const{ + return currentLabelMap.find(label)->second->getLastSeen(); + } + + class RectTracker : public Tracker { + protected: + float smoothingRate; + std::map smoothed; + public: + RectTracker() + :smoothingRate(.5) { + } + void setSmoothingRate(float smoothingRate) { + this->smoothingRate = smoothingRate; + } + float getSmoothingRate() const { + return smoothingRate; + } + const std::vector& track(const std::vector& objects) { + const std::vector& labels = Tracker::track(objects); + // add new objects, update old objects + for(int i = 0; i < labels.size(); i++) { + unsigned int label = labels[i]; + const cv::Rect& cur = getCurrent(label); + if(smoothed.count(label) > 0) { + cv::Rect& smooth = smoothed[label]; + smooth.x = ofLerp(smooth.x, cur.x, smoothingRate); + smooth.y = ofLerp(smooth.y, cur.y, smoothingRate); + smooth.width = ofLerp(smooth.width, cur.width, smoothingRate); + smooth.height = ofLerp(smooth.height, cur.height, smoothingRate); + } else { + smoothed[label] = cur; + } + } + std::map::iterator smoothedItr = smoothed.begin(); + while(smoothedItr != smoothed.end()) { + unsigned int label = smoothedItr->first; + if(!existsCurrent(label)) { + smoothed.erase(smoothedItr++); + } else { + ++smoothedItr; + } + } + return labels; + } + const cv::Rect& getSmoothed(unsigned int label) const { + return smoothed.find(label)->second; + } + cv::Vec2f getVelocity(unsigned int i) const { + unsigned int label = getLabelFromIndex(i); + if(existsPrevious(label)) { + const cv::Rect& previous = getPrevious(label); + const cv::Rect& current = getCurrent(label); + cv::Vec2f previousPosition(previous.x + previous.width / 2, previous.y + previous.height / 2); + cv::Vec2f currentPosition(current.x + current.width / 2, current.y + current.height / 2); + return currentPosition - previousPosition; + } else { + return cv::Vec2f(0, 0); + } + } + }; + + typedef Tracker PointTracker; + + template + class Follower { + protected: + bool dead; + unsigned int label; + public: + Follower() + :dead(false) + ,label(0) {} + + virtual ~Follower(){}; + virtual void setup(const T& track) {} + virtual void update(const T& track) {} + virtual void kill() { + dead = true; + } + + void setLabel(unsigned int label) { + this->label = label; + } + unsigned int getLabel() const { + return label; + } + bool getDead() const { + return dead; + } + }; + + typedef Follower RectFollower; + typedef Follower PointFollower; + + template + class TrackerFollower : public Tracker { + protected: + std::vector labels; + std::vector followers; + public: + const std::vector& track(const std::vector& objects) { + Tracker::track(objects); + // kill missing, update old + for(int i = 0; i < labels.size(); i++) { + unsigned int curLabel = labels[i]; + F& curFollower = followers[i]; + if(!Tracker::existsCurrent(curLabel)) { + curFollower.kill(); + } else { + curFollower.update(Tracker::getCurrent(curLabel)); + } + } + // add new + for(int i = 0; i < Tracker::newLabels.size(); i++) { + unsigned int curLabel = Tracker::newLabels[i]; + labels.push_back(curLabel); + followers.push_back(F()); + followers.back().setup(Tracker::getCurrent(curLabel)); + followers.back().setLabel(curLabel); + } + // remove dead + for(int i = labels.size() - 1; i >= 0; i--) { + if(followers[i].getDead()) { + followers.erase(followers.begin() + i); + labels.erase(labels.begin() + i); + } + } + return labels; + } + std::vector& getFollowers() { + return followers; + } + }; + + template class RectTrackerFollower : public TrackerFollower {}; + template class PointTrackerFollower : public TrackerFollower {}; +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Utilities.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Utilities.h new file mode 100644 index 00000000000..3caaa48161f --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Utilities.h @@ -0,0 +1,297 @@ +/* + utilities are used internally by ofxCv, and make it easier to write code that + can work with OpenCv and openFrameworks data. + + useful functions from this file: + - imitate and copy + - toCv and toOf + */ + +#pragma once + +#include "opencv2/opencv.hpp" +#include "ofRectangle.h" +#include "ofTexture.h" +#include "ofPixels.h" +#include "ofBaseTypes.h" +#include "ofPolyline.h" +#include "ofVectorMath.h" + +namespace ofxCv { + // these functions are for accessing Mat, ofPixels and ofImage consistently. + // they're very important for imitate(). + + // width, height + template inline int getWidth(T& src) {return src.getWidth();} + template inline int getHeight(T& src) {return src.getHeight();} + inline int getWidth(cv::Mat& src) {return src.cols;} + inline int getHeight(cv::Mat& src) {return src.rows;} + template inline bool getAllocated(T& src) { + return getWidth(src) > 0 && getHeight(src) > 0; + } + + // depth + inline int getDepth(int cvImageType) { + return CV_MAT_DEPTH(cvImageType); + } + inline int getDepth(cv::Mat& mat) { + return mat.depth(); + } + inline int getDepth(ofTexture& tex) { + // avoid "texture not allocated" warning + if(!tex.isAllocated()) { + return CV_8U; + } + int type = tex.getTextureData().glInternalFormat; + switch(type) { + case GL_RGBA: + case GL_RGB: + case GL_LUMINANCE_ALPHA: + case GL_LUMINANCE: + return CV_8U; + +#ifndef TARGET_OPENGLES + case GL_RGBA8: + case GL_RGB8: + case GL_LUMINANCE8: + case GL_LUMINANCE8_ALPHA8: + return CV_8U; + + case GL_RGBA32F_ARB: + case GL_RGB32F_ARB: + case GL_LUMINANCE32F_ARB: + return CV_32F; +#endif + default: return 0; + } + } + template inline int getDepth(ofPixels_& pixels) { + switch(pixels.getBytesPerChannel()) { + case 4: return CV_32F; + case 2: return CV_16U; + case 1: default: return CV_8U; + } + } + template <> inline int getDepth(ofPixels_& pixels) { + return CV_16S; + } + template <> inline int getDepth(ofPixels_& pixels) { + return CV_8S; + } + template inline int getDepth(ofBaseHasPixels_& img) { + return getDepth(img.getPixels()); + } + + // channels + inline int getChannels(int cvImageType) { + return CV_MAT_CN(cvImageType); + } + inline int getChannels(ofImageType imageType) { + switch(imageType) { + case OF_IMAGE_COLOR_ALPHA: return 4; + case OF_IMAGE_COLOR: return 3; + case OF_IMAGE_GRAYSCALE: default: return 1; + } + } + inline int getChannels(cv::Mat& mat) { + return mat.channels(); + } + inline int getChannels(ofTexture& tex) { + // avoid "texture not allocated" warning + if(!tex.isAllocated()) { + return GL_RGB; + } + int type = tex.getTextureData().glInternalFormat; + switch(type) { + case GL_RGBA: return 4; + case GL_RGB: return 3; + case GL_LUMINANCE_ALPHA: return 2; + case GL_LUMINANCE: return 1; + +#ifndef TARGET_OPENGLES + case GL_RGBA8: return 4; + case GL_RGB8: return 3; + case GL_LUMINANCE8: return 1; + case GL_LUMINANCE8_ALPHA8: return 2; + + case GL_RGBA32F_ARB: return 4; + case GL_RGB32F_ARB: return 3; + case GL_LUMINANCE32F_ARB: return 1; +#endif + default: return 0; + } + } + template inline int getChannels(ofPixels_& pixels) { + return pixels.getNumChannels(); + } + template inline int getChannels(ofBaseHasPixels_& img) { + return getChannels(img.getPixels()); + } + + // image type + inline int getCvImageType(int channels, int cvDepth = CV_8U) { + return CV_MAKETYPE(cvDepth, channels); + } + inline int getCvImageType(ofImageType imageType, int cvDepth = CV_8U) { + return CV_MAKETYPE(cvDepth, getChannels(imageType)); + } + template inline int getCvImageType(T& img) { + return CV_MAKETYPE(getDepth(img), getChannels(img)); + } + inline ofImageType getOfImageType(int cvImageType) { + switch(getChannels(cvImageType)) { + case 4: return OF_IMAGE_COLOR_ALPHA; + case 3: return OF_IMAGE_COLOR; + case 1: default: return OF_IMAGE_GRAYSCALE; + } + } + inline int getGlImageType(int cvImageType) { + int channels = getChannels(cvImageType); + int depth = getDepth(cvImageType); + switch(depth) { + case CV_8U: + switch(channels) { + case 1: return GL_LUMINANCE; + case 3: return GL_RGB; + case 4: return GL_RGBA; + } +#ifndef TARGET_OPENGLES + case CV_32F: + switch(channels) { + case 1: return GL_LUMINANCE32F_ARB; + case 3: return GL_RGB32F; + case 4: return GL_RGBA32F; + } +#endif + } + return 0; + } + + // allocation + // only happens when necessary + template inline void allocate(T& img, int width, int height, int cvType) { + if (!img.isAllocated() || + getWidth(img) != width || + getHeight(img) != height || + getCvImageType(img) != cvType) + { + img.allocate(width, height, getOfImageType(cvType)); + } + } + inline void allocate(ofTexture& img, int width, int height, int cvType) { + if (!img.isAllocated() || + getWidth(img) != width || + getHeight(img) != height || + getCvImageType(img) != cvType) + { + img.allocate(width, height, getGlImageType(cvType)); + } + } + inline void allocate(cv::Mat& img, int width, int height, int cvType) { + if (getWidth(img) != width || + getHeight(img) != height || + getCvImageType(img) != cvType) { + img.create(height, width, cvType); + } + } + // ofVideoPlayer/Grabber can't be allocated, so we assume we don't need to do anything + inline void allocate(ofBaseVideoDraws & img, int width, int height, int cvType) {} + + // imitate() is good for preparing buffers + // it's like allocate(), but uses the size and type of the original as a reference + // like allocate(), the image being allocated is the first argument + + // this version copies size, but manually specifies mirror's image type + template void imitate(M& mirror, O& original, int mirrorCvImageType) { + int ow = getWidth(original), oh = getHeight(original); + allocate(mirror, ow, oh, mirrorCvImageType); + } + + // this version copies size and image type + template void imitate(M& mirror, O& original) { + imitate(mirror, original, getCvImageType(original)); + } + + // maximum possible values for that depth or matrix + float getMaxVal(int cvDepth); + float getMaxVal(const cv::Mat& mat); + int getTargetChannelsFromCode(int conversionCode); + + // toCv functions + // for conversion functions, the signature reveals the behavior: + // 1 Type& argument // creates a shallow copy of the data + // 2 const Type& argument // creates a deep copy of the data + // 3 Type argument // creates a deep copy of the data + // style 1 is used when possible (for Mat conversion). style 2 is used when + // dealing with a lot of data that can't/shouldn't be shallow copied. style 3 + // is used for small objects where the compiler can optimize the copying if + // necessary. the reference is avoided to make inline toCv/toOf use easier. + + cv::Mat toCv(cv::Mat& mat); + template inline cv::Mat toCv(ofPixels_& pix) { + return cv::Mat(pix.getHeight(), pix.getWidth(), getCvImageType(pix), pix.getData(), 0); + } + template inline cv::Mat toCv(ofBaseHasPixels_& img) { + return toCv(img.getPixels()); + } + cv::Mat toCv(ofMesh& mesh); + cv::Point2f toCv(ofVec2f vec); + cv::Point3f toCv(ofVec3f vec); + cv::Rect toCv(ofRectangle rect); + std::vector toCv(const ofPolyline& polyline); + std::vector toCv(const std::vector& points); + std::vector toCv(const std::vector& points); + cv::Scalar toCv(ofColor color); + + // cross-toolkit, cross-bitdepth copying + template + void copy(S& src, D& dst, int dstDepth) { + imitate(dst, src, getCvImageType(getChannels(src), dstDepth)); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + if(srcMat.type() == dstMat.type()) { + srcMat.copyTo(dstMat); + } else { + double alpha = getMaxVal(dstMat) / getMaxVal(srcMat); + srcMat.convertTo(dstMat, dstMat.depth(), alpha); + } + } + + // most of the time you want the destination to be the same as the source. but + // sometimes your destination is a different depth, and copy() will notice and + // do the conversion for you. + template + void copy(S& src, D& dst) { + int dstDepth; + if(getAllocated(dst)) { + dstDepth = getDepth(dst); + } else { + dstDepth = getDepth(src); + } + copy(src, dst, dstDepth); + } + + // toOf functions + ofVec2f toOf(cv::Point2f point); + ofVec3f toOf(cv::Point3f point); + ofRectangle toOf(cv::Rect rect); + ofPolyline toOf(cv::RotatedRect rect); + template inline ofPolyline toOf(const std::vector >& contour) { + ofPolyline polyline; + polyline.resize(contour.size()); + for(int i = 0; i < (int)contour.size(); i++) { + polyline[i].x = contour[i].x; + polyline[i].y = contour[i].y; + } + polyline.close(); + return polyline; + } + template + void toOf(cv::Mat mat, ofPixels_& pixels) { + pixels.setFromExternalPixels(mat.ptr(), mat.cols, mat.rows, mat.channels()); + } + template + void toOf(cv::Mat mat, ofImage_& img) { + imitate(img, mat); + toOf(mat, img.getPixels()); + } +} diff --git a/addons/ofxCv/libs/ofxCv/include/ofxCv/Wrappers.h b/addons/ofxCv/libs/ofxCv/include/ofxCv/Wrappers.h new file mode 100644 index 00000000000..0d0ba9f33aa --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/include/ofxCv/Wrappers.h @@ -0,0 +1,444 @@ +/* + wrappers provide an easy-to-use interface to OpenCv functions when using data + from openFrameworks. they don't implement anything novel, they just wrap OpenCv + functions in a very direct way. many of the functions have in-place and + not-in-place variations. + + high level image operations: + - Canny (edge detection), medianBlur, blur, convertColor + - Coherent line drawing + + low level image manipulation and comparison: + - threshold, normalize, invert, lerp + - bitwise_and, bitwise_or, bitwise_xor + - max, min, multiply, divide, add, subtract, absdiff + - erode, dilate + + image transformation: + - rotate, resize, warpPerspective + + point set/ofPolyline functions: + - convexHull, minAreaRect, fitEllipse, unwarpPerspective, warpPerspective + + utility wrappers: + - load and save Mat + + */ + +#pragma once + +#include "opencv2/opencv.hpp" +#include "ofxCv/Utilities.h" +#include "ofVectorMath.h" +#include "ofImage.h" + +// coherent line drawing +#include "imatrix.h" +#include "ETF.h" +#include "fdog.h" +#include "myvec.h" + +namespace ofxCv { + + void loadMat(cv::Mat& mat, std::string filename); + void saveMat(cv::Mat mat, std::string filename); + void saveImage(cv::Mat& mat, std::string filename, ofImageQualityType qualityLevel = OF_IMAGE_QUALITY_BEST); + + // wrapThree are based on functions that operate on three Mat objects. + // the first two are inputs, and the third is an output. for example, + // the min() function: min(x, y, result) will calculate the per-element min + // between x and y, and store that in result. both y and result need to + // match x in dimensions and type. while wrapThree functions will use + // imitate() to make sure your data is allocated correctly, you shouldn't + // epect the function to behave properly if you haven't already allocated + // your y argument. in general, OF images contain noise when newly allocated + // so the result will also contain that noise. +#define wrapThree(name) \ +template \ +void name(X& x, Y& y, Result& result) {\ +imitate(y, x);\ +imitate(result, x);\ +cv::Mat xMat = toCv(x), yMat = toCv(y);\ +cv::Mat resultMat = toCv(result);\ +cv::name(xMat, yMat, resultMat);\ +} + wrapThree(max); + wrapThree(min); + wrapThree(multiply); + wrapThree(divide); + wrapThree(add); + wrapThree(subtract); + wrapThree(absdiff); + wrapThree(bitwise_and); + wrapThree(bitwise_or); + wrapThree(bitwise_xor); + + // inverting non-floating point images is a just a bitwise not operation + template void invert(S& src, D& dst) { + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + bitwise_not(srcMat, dstMat); + } + + template void invert(SD& srcDst) { + ofxCv::invert(srcDst, srcDst); + } + + // also useful for taking the average/mixing two images + template + void lerp(X& x, Y& y, R& result, float amt = .5) { + imitate(result, x); + cv::Mat xMat = toCv(x), yMat = toCv(y); + cv::Mat resultMat = toCv(result); + if(yMat.cols == 0) { + copy(x, result); + } else if(xMat.cols == 0) { + copy(y, result); + } else { + cv::addWeighted(xMat, amt, yMat, 1. - amt, 0., resultMat); + } + } + + // normalize the min/max to [0, max for this type] out of place + template + void normalize(S& src, D& dst) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::normalize(srcMat, dstMat, 0, getMaxVal(getDepth(dst)), cv::NORM_MINMAX); + } + + // normalize the min/max to [0, max for this type] in place + template + void normalize(SD& srcDst) { + normalize(srcDst, srcDst); + } + + // threshold out of place + template + void threshold(S& src, D& dst, float thresholdValue, bool invert = false) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + int thresholdType = invert ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY; + float maxVal = getMaxVal(dstMat); + cv::threshold(srcMat, dstMat, thresholdValue, maxVal, thresholdType); + } + + // threshold in place + template + void threshold(SD& srcDst, float thresholdValue, bool invert = false) { + ofxCv::threshold(srcDst, srcDst, thresholdValue, invert); + } + + // erode out of place + template + void erode(S& src, D& dst, int iterations = 1) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::erode(srcMat, dstMat, cv::Mat(), cv::Point(-1, -1), iterations); + } + + // erode in place + template + void erode(SD& srcDst, int iterations = 1) { + ofxCv::erode(srcDst, srcDst, iterations); + } + + // dilate out of place + template + void dilate(S& src, D& dst, int iterations = 1) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::dilate(srcMat, dstMat, cv::Mat(), cv::Point(-1, -1), iterations); + } + + // dilate in place + template + void dilate(SD& srcDst, int iterations = 1) { + ofxCv::dilate(srcDst, srcDst, iterations); + } + + // automatic threshold (grayscale 8-bit only) out of place + template + void autothreshold(S& src, D& dst, bool invert = false) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + int flags = cv::THRESH_OTSU | (invert ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY); + threshold(srcMat, dstMat, 0, 255, flags); + } + + // automatic threshold (grayscale 8-bit only) in place + template + void autothreshold(SD& srcDst, bool invert = false) { + ofxCv::autothreshold(srcDst, srcDst, invert); + } + + // CV_RGB2GRAY, CV_HSV2RGB, etc. with [RGB, BGR, GRAY, HSV, HLS, XYZ, YCrCb, Lab, Luv] + // you can convert whole images... + template + void convertColor(S& src, D& dst, int code) { + // cvtColor allocates Mat for you, but we need this to handle ofImage etc. + int targetChannels = getTargetChannelsFromCode(code); + imitate(dst, src, getCvImageType(targetChannels, getDepth(src))); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cvtColor(srcMat, dstMat, code); + } + // ...or single colors. + cv::Vec3b convertColor(cv::Vec3b color, int code); + ofColor convertColor(ofColor color, int code); + + // a common cv task is to convert something to grayscale. this function will + // do that quickly for RGBA, RGB, and 1-channel images. + template + void copyGray(S& src, D& dst) { + int channels = getChannels(src); + if(channels == 4) { + convertColor(src, dst, CV_RGBA2GRAY); + } else if(channels == 3) { + convertColor(src, dst, CV_RGB2GRAY); + } else if(channels == 1) { + copy(src, dst); + } + } + + int forceOdd(int x); + + // box blur + template + void blur(S& src, D& dst, int size) { + imitate(dst, src); + size = forceOdd(size); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::blur(srcMat, dstMat, cv::Size(size, size)); + } + + // in-place box blur + template + void blur(SD& srcDst, int size) { + ofxCv::blur(srcDst, srcDst, size); + } + + // Gaussian blur + template + void GaussianBlur(S& src, D& dst, int size) { + imitate(dst, src); + size = forceOdd(size); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::GaussianBlur(srcMat, dstMat, cv::Size(size, size), 0, 0); + } + + // in-place Gaussian blur + template + void GaussianBlur(SD& srcDst, int size) { + ofxCv::GaussianBlur(srcDst, srcDst, size); + } + + // Median blur + template + void medianBlur(S& src, D& dst, int size) { + imitate(dst, src); + size = forceOdd(size); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::medianBlur(srcMat, dstMat, size); + } + + // in-place Median blur + template + void medianBlur(SD& srcDst, int size) { + ofxCv::medianBlur(srcDst, srcDst, size); + } + + // histogram equalization, adds support for color images + template + void equalizeHist(S& src, D& dst) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + if(srcMat.channels() > 1) { + std::vector srcEach, dstEach; + split(srcMat, srcEach); + split(dstMat, dstEach); + for(int i = 0; i < srcEach.size(); i++) { + cv::equalizeHist(srcEach[i], dstEach[i]); + } + cv::merge(dstEach, dstMat); + } else { + cv::equalizeHist(srcMat, dstMat); + } + } + + // in-place histogram equalization + template + void equalizeHist(SD& srcDst) { + equalizeHist(srcDst, srcDst); + } + + // Canny edge detection assumes your input and output are grayscale 8-bit + // example thresholds might be 0,30 or 50,200 + template + void Canny(S& src, D& dst, double threshold1, double threshold2, int apertureSize=3, bool L2gradient=false) { + imitate(dst, src, CV_8UC1); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::Canny(srcMat, dstMat, threshold1, threshold2, apertureSize, L2gradient); + } + + // Sobel edge detection + template + void Sobel(S& src, D& dst, int ddepth=-1, int dx=1, int dy=1, int ksize=3, double scale=1, double delta=0, int borderType=cv::BORDER_DEFAULT ) { + imitate(dst, src, CV_8UC1); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::Sobel(srcMat, dstMat, ddepth, dx, dy, ksize, scale, delta, borderType ); + } + + // coherent line drawing: good values for halfw are between 1 and 8, + // smoothPasses 1, and 4, sigma1 between .01 and 2, sigma2 between .01 and 10, + // tau between .8 and 1.0 + // this could be rewritten into a class so we're not doing an allocate and copy each time + template + void CLD(S& src, D& dst, int halfw = 4, int smoothPasses = 2, double sigma1 = .4, double sigma2 = 3, double tau = .97, int black = 0) { + copy(src, dst); + int width = getWidth(src), height = getHeight(src); + imatrix img; + img.init(height, width); + cv::Mat dstMat = toCv(dst); + if(black != 0) { + add(dstMat, cv::Scalar(black), dstMat); + } + // copy from dst (unsigned char) to img (int) + for(int y = 0; y < height; y++) { + for(int x = 0; x < width; x++) { + img[y][x] = dstMat.at(y, x); + } + } + ETF etf; + etf.init(height, width); + etf.set(img); + etf.Smooth(halfw, smoothPasses); + GetFDoG(img, etf, sigma1, sigma2, tau); + // copy result from img (int) to dst (unsigned char) + for(int y = 0; y < height; y++) { + for(int x = 0; x < width; x++) { + dstMat.at(y, x) = img[y][x]; + } + } + } + + // dst does not imitate src + template + void warpPerspective(S& src, D& dst, std::vector& dstPoints, int flags = cv::INTER_LINEAR) { + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + int w = srcMat.cols; + int h = srcMat.rows; + std::vector srcPoints(4); + srcPoints[0] = cv::Point2f(0, 0); + srcPoints[1] = cv::Point2f(w, 0); + srcPoints[2] = cv::Point2f(w, h); + srcPoints[3] = cv::Point2f(0, h); + cv::Mat transform = getPerspectiveTransform(&srcPoints[0], &dstPoints[0]); + warpPerspective(srcMat, dstMat, transform, dstMat.size(), flags); + } + + // dst does not imitate src + template + void unwarpPerspective(S& src, D& dst, std::vector& srcPoints, int flags = cv::INTER_LINEAR) { + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + int w = dstMat.cols; + int h = dstMat.rows; + std::vector dstPoints(4); + dstPoints[0] = cv::Point2f(0, 0); + dstPoints[1] = cv::Point2f(w, 0); + dstPoints[2] = cv::Point2f(w, h); + dstPoints[3] = cv::Point2f(0, h); + cv::Mat transform = getPerspectiveTransform(&srcPoints[0], &dstPoints[0]); + warpPerspective(srcMat, dstMat, transform, dstMat.size(), flags); + } + + // dst does not imitate src + template + void warpPerspective(S& src, D& dst, cv::Mat& transform, int flags = cv::INTER_LINEAR) { + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + warpPerspective(srcMat, dstMat, transform, dstMat.size(), flags); + } + + template + void resize(S& src, D& dst, int interpolation = cv::INTER_LINEAR) { // also: INTER_NEAREST, INTER_AREA, INTER_CUBIC, INTER_LANCZOS4 + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + resize(srcMat, dstMat, dstMat.size(), 0, 0, interpolation); + } + + template + void resize(S& src, D& dst, float xScale, float yScale, int interpolation = cv::INTER_LINEAR) { // also: INTER_NEAREST, INTER_AREA, INTER_CUBIC, INTER_LANCZOS4 + int dstWidth = getWidth(src) * xScale, dstHeight = getHeight(src) * yScale; + if(getWidth(dst) != dstWidth || getHeight(dst) != dstHeight) { + allocate(dst, dstWidth, dstHeight, getCvImageType(src)); + } + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + resize(src, dst, interpolation); + } + + // for contourArea() and arcLength(), see ofPolyline::getArea() and getPerimiter() + ofPolyline convexHull(const ofPolyline& polyline); + std::vector convexityDefects(const std::vector& contour); + std::vector convexityDefects(const ofPolyline& polyline); + cv::RotatedRect minAreaRect(const ofPolyline& polyline); + cv::RotatedRect fitEllipse(const ofPolyline& polyline); + void fitLine(const ofPolyline& polyline, ofVec2f& point, ofVec2f& direction); + + // kind of obscure function, draws filled polygons on the CPU + template + void fillPoly(std::vector& points, D& dst) { + cv::Mat dstMat = toCv(dst); + const cv::Point* ppt[1] = { &(points[0]) }; + int npt[] = { (int) points.size() }; + dstMat.setTo(cv::Scalar(0)); + fillPoly(dstMat, ppt, npt, 1, cv::Scalar(255)); + } + + template + void flip(S& src, D& dst, int code) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::flip(srcMat, dstMat, code); + } + + // if you're doing the same rotation multiple times, it's better to precompute + // the displacement and use remap. + template + void rotate(S& src, D& dst, double angle, ofColor fill = ofColor::black, int interpolation = cv::INTER_LINEAR) { + imitate(dst, src); + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + cv::Point2f center(srcMat.cols / 2, srcMat.rows / 2); + cv::Mat rotationMatrix = getRotationMatrix2D(center, angle, 1); + warpAffine(srcMat, dstMat, rotationMatrix, srcMat.size(), interpolation, cv::BORDER_CONSTANT, toCv(fill)); + } + + // efficient version of rotate that only operates on 0, 90, 180, 270 degrees + // the output is allocated to contain all pixels of the input. + template + void rotate90(S& src, D& dst, int angle) { + cv::Mat srcMat = toCv(src), dstMat = toCv(dst); + if(angle == 0) { + copy(src, dst); + } else if(angle == 90) { + allocate(dst, srcMat.rows, srcMat.cols, srcMat.type()); + cv::transpose(srcMat, dstMat); + cv::flip(dstMat, dstMat, 1); + } else if(angle == 180) { + imitate(dst, src); + cv::flip(srcMat, dstMat, -1); + } else if(angle == 270) { + allocate(dst, srcMat.rows, srcMat.cols, srcMat.type()); + cv::transpose(srcMat, dstMat); + cv::flip(dstMat, dstMat, 0); + } + } + + template + void transpose(S& src, D& dst) { + cv::Mat srcMat = toCv(src); + allocate(dst, srcMat.rows, srcMat.cols, srcMat.type()); + cv::Mat dstMat = toCv(dst); + cv::transpose(srcMat, dstMat); + } + + // finds the 3x4 matrix that best describes the (premultiplied) affine transformation between two point clouds + ofMatrix4x4 estimateAffine3D(std::vector& from, std::vector& to, float accuracy = .99); + ofMatrix4x4 estimateAffine3D(std::vector& from, std::vector& to, std::vector& outliers, float accuracy = .99); +} diff --git a/addons/ofxCv/libs/ofxCv/src/Calibration.cpp b/addons/ofxCv/libs/ofxCv/src/Calibration.cpp new file mode 100644 index 00000000000..242161174fa --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Calibration.cpp @@ -0,0 +1,494 @@ +#include "ofxCv/Calibration.h" +#include "ofxCv/Helpers.h" +#include "ofFileUtils.h" +#include "ofGraphics.h" +#include "ofMesh.h" + +namespace ofxCv { + + using namespace cv; + + void Intrinsics::setup(float focalLength, cv::Size imageSize, cv::Size2f sensorSize, cv::Point2d principalPoint) { + float focalPixels = (focalLength / sensorSize.width) * imageSize.width; + float fx = focalPixels; // focal length in pixels on x + float fy = focalPixels; // focal length in pixels on y + float cx = imageSize.width * principalPoint.x; // image center in pixels on x + float cy = imageSize.height * principalPoint.y; // image center in pixels on y + Mat cameraMatrix = (Mat1d(3, 3) << + fx, 0, cx, + 0, fy, cy, + 0, 0, 1); + setup(cameraMatrix, imageSize, sensorSize); + } + void Intrinsics::setup(Mat cameraMatrix, cv::Size imageSize, cv::Size2f sensorSize) { + this->cameraMatrix = cameraMatrix; + this->imageSize = imageSize; + this->sensorSize = sensorSize; + updateValues(); + } + + void Intrinsics::updateValues() { + calibrationMatrixValues(cameraMatrix, + imageSize, + sensorSize.width, sensorSize.height, + fov.x, fov.y, + focalLength, + principalPoint, // sets principalPoint in mm + aspectRatio); + } + + void Intrinsics::setImageSize(cv::Size imgSize) { + imageSize = imgSize; + } + + Mat Intrinsics::getCameraMatrix() const { + return cameraMatrix; + } + + cv::Size Intrinsics::getImageSize() const { + return imageSize; + } + + cv::Size2f Intrinsics::getSensorSize() const { + return sensorSize; + } + + cv::Point2d Intrinsics::getFov() const { + return fov; + } + + double Intrinsics::getFocalLength() const { + return focalLength; + } + + double Intrinsics::getAspectRatio() const { + return aspectRatio; + } + + Point2d Intrinsics::getPrincipalPoint() const { + return principalPoint; + } + + void Intrinsics::loadProjectionMatrix(float nearDist, float farDist, cv::Point2d viewportOffset) const { + ofViewport(viewportOffset.x, viewportOffset.y, imageSize.width, imageSize.height); + ofSetMatrixMode(OF_MATRIX_PROJECTION); + ofLoadIdentityMatrix(); + float w = imageSize.width; + float h = imageSize.height; + float fx = cameraMatrix.at(0, 0); + float fy = cameraMatrix.at(1, 1); + float cx = principalPoint.x; + float cy = principalPoint.y; + + ofMatrix4x4 frustum; + frustum.makeFrustumMatrix( + nearDist * (-cx) / fx, nearDist * (w - cx) / fx, + nearDist * (cy) / fy, nearDist * (cy - h) / fy, + nearDist, farDist); + ofMultMatrix(frustum); + + ofSetMatrixMode(OF_MATRIX_MODELVIEW); + ofLoadIdentityMatrix(); + + ofMatrix4x4 lookAt; + lookAt.makeLookAtViewMatrix(ofVec3f(0,0,0), ofVec3f(0,0,1), ofVec3f(0,-1,0)); + ofMultMatrix(lookAt); + } + + Calibration::Calibration() : + patternType(CHESSBOARD), + patternSize(cv::Size(10, 7)), // based on Chessboard_A4.pdf, assuming world units are centimeters + subpixelSize(cv::Size(11,11)), + squareSize(2.5), + reprojectionError(0), + distCoeffs(Mat::zeros(8, 1, CV_64F)), + fillFrame(true), + ready(false) { + + } + + void Calibration::save(string filename, bool absolute) const { + if(!ready){ + ofLog(OF_LOG_ERROR, "Calibration::save() failed, because your calibration isn't ready yet!"); + } + FileStorage fs(ofToDataPath(filename, absolute), FileStorage::WRITE); + cv::Size imageSize = distortedIntrinsics.getImageSize(); + cv::Size sensorSize = distortedIntrinsics.getSensorSize(); + Mat cameraMatrix = distortedIntrinsics.getCameraMatrix(); + fs << "cameraMatrix" << cameraMatrix; + fs << "imageSize_width" << imageSize.width; + fs << "imageSize_height" << imageSize.height; + fs << "sensorSize_width" << sensorSize.width; + fs << "sensorSize_height" << sensorSize.height; + fs << "distCoeffs" << distCoeffs; + fs << "reprojectionError" << reprojectionError; + fs << "features" << "["; + for(int i = 0; i < (int)imagePoints.size(); i++) { + fs << "[:" << imagePoints[i] << "]"; + } + fs << "]"; + } + + void Calibration::load(string filename, bool absolute) { + imagePoints.clear(); + FileStorage fs(ofToDataPath(filename, absolute), FileStorage::READ); + cv::Size imageSize; + cv::Size2f sensorSize; + Mat cameraMatrix; + fs["cameraMatrix"] >> cameraMatrix; + fs["imageSize_width"] >> imageSize.width; + fs["imageSize_height"] >> imageSize.height; + fs["sensorSize_width"] >> sensorSize.width; + fs["sensorSize_height"] >> sensorSize.height; + fs["distCoeffs"] >> distCoeffs; + fs["reprojectionError"] >> reprojectionError; + FileNode features = fs["features"]; + for(FileNodeIterator it = features.begin(); it != features.end(); it++) { + vector cur; + (*it) >> cur; + imagePoints.push_back(cur); + } + addedImageSize = imageSize; + distortedIntrinsics.setup(cameraMatrix, imageSize, sensorSize); + updateUndistortion(); + ready = true; + } + void Calibration::setIntrinsics(Intrinsics& distortedIntrinsics){ + this->distortedIntrinsics = distortedIntrinsics; + this->addedImageSize = distortedIntrinsics.getImageSize(); + updateUndistortion(); + this->ready = true; + } + void Calibration::setDistortionCoefficients(float k1, float k2, float p1, float p2, float k3, float k4, float k5, float k6) { + distCoeffs.at(0) = k1; + distCoeffs.at(1) = k2; + distCoeffs.at(2) = p1; + distCoeffs.at(3) = p2; + distCoeffs.at(4) = k3; + distCoeffs.at(5) = k4; + distCoeffs.at(6) = k5; + distCoeffs.at(7) = k6; + } + void Calibration::reset(){ + this->ready = false; + this->reprojectionError = 0.0; + this->imagePoints.clear(); + this->objectPoints.clear(); + this->perViewErrors.clear(); + } + void Calibration::setPatternType(CalibrationPattern patternType) { + this->patternType = patternType; + } + void Calibration::setPatternSize(int xCount, int yCount) { + patternSize = cv::Size(xCount, yCount); + } + void Calibration::setSquareSize(float squareSize) { + this->squareSize = squareSize; + } + void Calibration::setFillFrame(bool fillFrame) { + this->fillFrame = fillFrame; + } + void Calibration::setSubpixelSize(int subpixelSize) { + subpixelSize = MAX(subpixelSize,2); + this->subpixelSize = cv::Size(subpixelSize,subpixelSize); + } + bool Calibration::add(Mat img) { + addedImageSize = img.size(); + + vector pointBuf; + + // find corners + bool found = findBoard(img, pointBuf); + + if (found) + imagePoints.push_back(pointBuf); + else + ofLog(OF_LOG_ERROR, "Calibration::add() failed, maybe your patternSize is wrong or the image has poor lighting?"); + return found; + } + bool Calibration::findBoard(Mat img, vector& pointBuf, bool refine) { + bool found=false; + if(patternType == CHESSBOARD) { + // no CV_CALIB_CB_FAST_CHECK, because it breaks on dark images (e.g., dark IR images from kinect) + int chessFlags = CV_CALIB_CB_ADAPTIVE_THRESH;// | CV_CALIB_CB_NORMALIZE_IMAGE; + found = findChessboardCorners(img, patternSize, pointBuf, chessFlags); + + // improve corner accuracy + if(found) { + if(img.type() != CV_8UC1) { + copyGray(img, grayMat); + } else { + grayMat = img; + } + + if(refine) { + // the 11x11 dictates the smallest image space square size allowed + // in other words, if your smallest square is 11x11 pixels, then set this to 11x11 + cornerSubPix(grayMat, pointBuf, subpixelSize, cv::Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1 )); + } + } + } +#ifdef USING_OPENCV_2_3 + else { + int flags = (patternType == CIRCLES_GRID ? CALIB_CB_SYMMETRIC_GRID : CALIB_CB_ASYMMETRIC_GRID); // + CALIB_CB_CLUSTERING + found = findCirclesGrid(img, patternSize, pointBuf, flags); + } +#endif + return found; + } + bool Calibration::clean(float minReprojectionError) { + int removed = 0; + for(int i = size() - 1; i >= 0; i--) { + if(getReprojectionError(i) > minReprojectionError) { + objectPoints.erase(objectPoints.begin() + i); + imagePoints.erase(imagePoints.begin() + i); + removed++; + } + } + if(size() > 0) { + if(removed > 0) { + return calibrate(); + } else { + return true; + } + } else { + ofLog(OF_LOG_ERROR, "Calibration::clean() removed the last object/image point pair"); + return false; + } + } + bool Calibration::calibrate() { + if(size() < 1) { + ofLog(OF_LOG_ERROR, "Calibration::calibrate() doesn't have any image data to calibrate from."); + if(ready) { + ofLog(OF_LOG_ERROR, "Calibration::calibrate() doesn't need to be called after Calibration::load()."); + } + return ready; + } + + Mat cameraMatrix = Mat::eye(3, 3, CV_64F); + + updateObjectPoints(); + + int calibFlags = 0; + float rms = calibrateCamera(objectPoints, imagePoints, addedImageSize, cameraMatrix, distCoeffs, boardRotations, boardTranslations, calibFlags); + ofLog(OF_LOG_VERBOSE, "calibrateCamera() reports RMS error of " + ofToString(rms)); + + ready = checkRange(cameraMatrix) && checkRange(distCoeffs); + + if(!ready) { + ofLog(OF_LOG_ERROR, "Calibration::calibrate() failed to calibrate the camera"); + } + + distortedIntrinsics.setup(cameraMatrix, addedImageSize); + updateReprojectionError(); + updateUndistortion(); + + return ready; + } + + bool Calibration::isReady(){ + return ready; + } + + bool Calibration::calibrateFromDirectory(string directory) { + ofDirectory dirList; + ofImage cur; + dirList.listDir(directory); + for(int i = 0; i < (int)dirList.size(); i++) { + cur.load(dirList.getPath(i)); + if(!add(toCv(cur))) { + ofLog(OF_LOG_ERROR, "Calibration::add() failed on " + dirList.getPath(i)); + } + } + return calibrate(); + } + void Calibration::undistort(Mat img, int interpolationMode) { + img.copyTo(undistortBuffer); + undistort(undistortBuffer, img, interpolationMode); + } + void Calibration::undistort(Mat src, Mat dst, int interpolationMode) { + remap(src, dst, undistortMapX, undistortMapY, interpolationMode); + } + + ofVec2f Calibration::undistort(ofVec2f& src) const { + ofVec2f dst; + Mat matSrc = Mat(1, 1, CV_32FC2, &src.x); + Mat matDst = Mat(1, 1, CV_32FC2, &dst.x);; + undistortPoints(matSrc, matDst, distortedIntrinsics.getCameraMatrix(), distCoeffs); + return dst; + } + + void Calibration::undistort(vector& src, vector& dst) const { + int n = src.size(); + dst.resize(n); + Mat matSrc = Mat(n, 1, CV_32FC2, &src[0].x); + Mat matDst = Mat(n, 1, CV_32FC2, &dst[0].x); + undistortPoints(matSrc, matDst, distortedIntrinsics.getCameraMatrix(), distCoeffs); + } + + bool Calibration::getTransformation(Calibration& dst, Mat& rotation, Mat& translation) { + //if(imagePoints.size() == 0 || dst.imagePoints.size() == 0) { + if(!ready) { + ofLog(OF_LOG_ERROR, "getTransformation() requires both Calibration objects to have just been calibrated"); + return false; + } + if(imagePoints.size() != dst.imagePoints.size() || patternSize != dst.patternSize) { + ofLog(OF_LOG_ERROR, "getTransformation() requires both Calibration objects to be trained simultaneously on the same board"); + return false; + } + Mat fundamentalMatrix, essentialMatrix; + Mat cameraMatrix = distortedIntrinsics.getCameraMatrix(); + Mat dstCameraMatrix = dst.getDistortedIntrinsics().getCameraMatrix(); + // uses CALIB_FIX_INTRINSIC by default + stereoCalibrate(objectPoints, + imagePoints, dst.imagePoints, + cameraMatrix, distCoeffs, + dstCameraMatrix, dst.distCoeffs, + distortedIntrinsics.getImageSize(), rotation, translation, + essentialMatrix, fundamentalMatrix); + return true; + } + float Calibration::getReprojectionError() const { + return reprojectionError; + } + float Calibration::getReprojectionError(int i) const { + return perViewErrors[i]; + } + const Intrinsics& Calibration::getDistortedIntrinsics() const { + return distortedIntrinsics; + } + const Intrinsics& Calibration::getUndistortedIntrinsics() const { + return undistortedIntrinsics; + } + Mat Calibration::getDistCoeffs() const { + return distCoeffs; + } + int Calibration::size() const { + return imagePoints.size(); + } + cv::Size Calibration::getPatternSize() const { + return patternSize; + } + float Calibration::getSquareSize() const { + return squareSize; + } + void Calibration::customDraw() { + for(int i = 0; i < size(); i++) { + draw(i); + } + } + void Calibration::draw(int i) const { + ofPushStyle(); + ofNoFill(); + ofSetColor(ofColor::red); + for(int j = 0; j < (int)imagePoints[i].size(); j++) { + ofDrawCircle(toOf(imagePoints[i][j]), 5); + } + ofPopStyle(); + } + // this won't work until undistort() is in pixel coordinates + /* + void Calibration::drawUndistortion() const { + vector src, dst; + cv::Point2i divisions(32, 24); + for(int y = 0; y < divisions.y; y++) { + for(int x = 0; x < divisions.x; x++) { + src.push_back(ofVec2f( + ofMap(x, -1, divisions.x, 0, addedImageSize.width), + ofMap(y, -1, divisions.y, 0, addedImageSize.height))); + } + } + undistort(src, dst); + ofMesh mesh; + mesh.setMode(OF_PRIMITIVE_LINES); + for(int i = 0; i < src.size(); i++) { + mesh.addVertex(src[i]); + mesh.addVertex(dst[i]); + } + mesh.draw(); + } + */ + void Calibration::draw3d() const { + for(int i = 0; i < size(); i++) { + draw3d(i); + } + } + void Calibration::draw3d(int i) const { + ofPushStyle(); + ofPushMatrix(); + ofNoFill(); + + applyMatrix(makeMatrix(boardRotations[i], boardTranslations[i])); + + ofSetColor(ofColor::fromHsb(255 * i / size(), 255, 255)); + + ofDrawBitmapString(ofToString(i), 0, 0); + + for(int j = 0; j < (int)objectPoints[i].size(); j++) { + ofPushMatrix(); + ofTranslate(toOf(objectPoints[i][j])); + ofDrawCircle(0, 0, .5); + ofPopMatrix(); + } + + ofMesh mesh; + mesh.setMode(OF_PRIMITIVE_LINE_STRIP); + for(int j = 0; j < (int)objectPoints[i].size(); j++) { + ofVec3f cur = toOf(objectPoints[i][j]); + mesh.addVertex(cur); + } + mesh.draw(); + + ofPopMatrix(); + ofPopStyle(); + } + void Calibration::updateObjectPoints() { + vector points = createObjectPoints(patternSize, squareSize, patternType); + objectPoints.resize(imagePoints.size(), points); + } + void Calibration::updateReprojectionError() { + vector imagePoints2; + int totalPoints = 0; + double totalErr = 0; + + perViewErrors.clear(); + perViewErrors.resize(objectPoints.size()); + + for(int i = 0; i < (int)objectPoints.size(); i++) { + projectPoints(Mat(objectPoints[i]), boardRotations[i], boardTranslations[i], distortedIntrinsics.getCameraMatrix(), distCoeffs, imagePoints2); + double err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2); + int n = objectPoints[i].size(); + perViewErrors[i] = sqrt(err * err / n); + totalErr += err * err; + totalPoints += n; + ofLog(OF_LOG_VERBOSE, "view " + ofToString(i) + " has error of " + ofToString(perViewErrors[i])); + } + + reprojectionError = sqrt(totalErr / totalPoints); + + ofLog(OF_LOG_VERBOSE, "all views have error of " + ofToString(reprojectionError)); + } + void Calibration::updateUndistortion() { + Mat undistortedCameraMatrix = getOptimalNewCameraMatrix(distortedIntrinsics.getCameraMatrix(), distCoeffs, distortedIntrinsics.getImageSize(), fillFrame ? 0 : 1); + initUndistortRectifyMap(distortedIntrinsics.getCameraMatrix(), distCoeffs, Mat(), undistortedCameraMatrix, distortedIntrinsics.getImageSize(), CV_16SC2, undistortMapX, undistortMapY); + undistortedIntrinsics.setup(undistortedCameraMatrix, distortedIntrinsics.getImageSize()); + } + + vector Calibration::createObjectPoints(cv::Size patternSize, float squareSize, CalibrationPattern patternType) { + vector corners; + switch(patternType) { + case CHESSBOARD: + case CIRCLES_GRID: + for(int i = 0; i < patternSize.height; i++) + for(int j = 0; j < patternSize.width; j++) + corners.push_back(Point3f(float(j * squareSize), float(i * squareSize), 0)); + break; + case ASYMMETRIC_CIRCLES_GRID: + for(int i = 0; i < patternSize.height; i++) + for(int j = 0; j < patternSize.width; j++) + corners.push_back(Point3f(float(((2 * j) + (i % 2)) * squareSize), float(i * squareSize), 0)); + break; + } + return corners; + } +} diff --git a/addons/ofxCv/libs/ofxCv/src/ContourFinder.cpp b/addons/ofxCv/libs/ofxCv/src/ContourFinder.cpp new file mode 100644 index 00000000000..c7d6d183a37 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/ContourFinder.cpp @@ -0,0 +1,336 @@ +#include "ofxCv/ContourFinder.h" +#include "ofxCv/Wrappers.h" +#include "ofGraphics.h" + +namespace ofxCv { + + struct CompareContourArea + { + CompareContourArea(const std::vector& areaVec) + : mAreaVec(areaVec) {} + + // Sort contour indices into decreasing order, based on a vector of + // contour areas. Later, we will use these indices to order the + // contours (which are stored in a separate vector). + bool operator()(size_t a, size_t b) const + { + return mAreaVec[a] > mAreaVec[b]; + } + + const std::vector& mAreaVec; + }; + + using namespace cv; + + ContourFinder::ContourFinder() + :autoThreshold(true) + ,invert(false) + ,simplify(true) + ,thresholdValue(128.) + ,useTargetColor(false) + ,contourFindingMode(CV_RETR_EXTERNAL) + ,sortBySize(false) { + resetMinArea(); + resetMaxArea(); + } + + void ContourFinder::findContours(Mat img) { + // threshold the image using a tracked color or just binary grayscale + if(useTargetColor) { + Scalar offset(thresholdValue, thresholdValue, thresholdValue); + Scalar base = toCv(targetColor); + if(trackingColorMode == TRACK_COLOR_RGB) { + inRange(img, base - offset, base + offset, thresh); + } else { + if(trackingColorMode == TRACK_COLOR_H) { + offset[1] = 255; + offset[2] = 255; + } + if(trackingColorMode == TRACK_COLOR_HS) { + offset[2] = 255; + } + cvtColor(img, hsvBuffer, CV_RGB2HSV); + base = toCv(convertColor(targetColor, CV_RGB2HSV)); + Scalar lowerb = base - offset; + Scalar upperb = base + offset; + inRange(hsvBuffer, lowerb, upperb, thresh); + } + } else { + copyGray(img, thresh); + } + if(autoThreshold) { + threshold(thresh, thresholdValue, invert); + } + + // run the contour finder + vector > allContours; + int simplifyMode = simplify ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE; + cv::findContours(thresh, allContours, contourFindingMode, simplifyMode); + + // filter the contours + bool needMinFilter = (minArea > 0); + bool needMaxFilter = maxAreaNorm ? (maxArea < 1) : (maxArea < numeric_limits::infinity()); + vector allIndices; + vector allAreas; + if(needMinFilter || needMaxFilter) { + double imgArea = img.rows * img.cols; + double imgMinArea = minAreaNorm ? (minArea * imgArea) : minArea; + double imgMaxArea = maxAreaNorm ? (maxArea * imgArea) : maxArea; + for(size_t i = 0; i < allContours.size(); i++) { + double curArea = contourArea(Mat(allContours[i])); + allAreas.push_back(curArea); + if((!needMinFilter || curArea >= imgMinArea) && + (!needMaxFilter || curArea <= imgMaxArea)) { + allIndices.push_back(i); + } + } + } else { + for(size_t i = 0; i < allContours.size(); i++) { + if (sortBySize) { + allAreas.push_back(contourArea(allContours[i])); + } + allIndices.push_back(i); + } + } + + if (allIndices.size() > 1 && sortBySize) { + // Sort contour indices, based on a separate vector of areas. + std::sort(allIndices.begin(), allIndices.end(), CompareContourArea(allAreas)); + } + + // generate polylines and bounding boxes from the contours + contours.clear(); + polylines.clear(); + boundingRects.clear(); + for(size_t i = 0; i < allIndices.size(); i++) { + contours.push_back(allContours[allIndices[i]]); + polylines.push_back(toOf(contours[i])); + boundingRects.push_back(boundingRect(contours[i])); + } + + // track bounding boxes + tracker.track(boundingRects); + } + + + void ContourFinder::setFindHoles(bool findHoles){ + if(findHoles){ + contourFindingMode = CV_RETR_LIST; + }else{ + contourFindingMode = CV_RETR_EXTERNAL; + } + } + + void ContourFinder::setSortBySize(bool sizeSort) { + sortBySize = sizeSort; + } + + const vector >& ContourFinder::getContours() const { + return contours; + } + + const vector& ContourFinder::getPolylines() const { + return polylines; + } + + const vector& ContourFinder::getBoundingRects() const { + return boundingRects; + } + + unsigned int ContourFinder::size() const { + return contours.size(); + } + + vector& ContourFinder::getContour(unsigned int i) { + return contours[i]; + } + + ofPolyline& ContourFinder::getPolyline(unsigned int i) { + return polylines[i]; + } + + cv::Rect ContourFinder::getBoundingRect(unsigned int i) const { + return boundingRects[i]; + } + + cv::Point2f ContourFinder::getCenter(unsigned int i) const { + cv::Rect box = getBoundingRect(i); + return cv::Point2f(box.x + box.width / 2, box.y + box.height / 2); + } + + cv::Point2f ContourFinder::getCentroid(unsigned int i) const { + Moments m = moments(contours[i]); + if(m.m00!=0){ + return cv::Point2f(m.m10 / m.m00, m.m01 / m.m00); + }else{ + return cv::Point2f(0, 0); + } + } + + cv::Point2f ContourFinder::getAverage(unsigned int i) const { + Scalar average = mean(contours[i]); + return cv::Point2f(average[0], average[1]); + } + + cv::Vec2f ContourFinder::getBalance(unsigned int i) const { + return cv::Vec2f(getCentroid(i) - getCenter(i)); + } + + double ContourFinder::getContourArea(unsigned int i) const { + return contourArea(contours[i]); + } + + double ContourFinder::getArcLength(unsigned int i) const { + return arcLength(contours[i], true); + } + + vector ContourFinder::getConvexHull(unsigned int i) const { + vector hull; + convexHull(contours[i], hull); + return hull; + } + + vector ContourFinder::getConvexityDefects(unsigned int i) const { + return convexityDefects(contours[i]); + } + + cv::RotatedRect ContourFinder::getMinAreaRect(unsigned int i) const { + return minAreaRect(contours[i]); + } + + cv::Point2f ContourFinder::getMinEnclosingCircle(unsigned int i, float& radius) const { + cv::Point2f center; + minEnclosingCircle(contours[i], center, radius); + return center; + } + + cv::RotatedRect ContourFinder::getFitEllipse(unsigned int i) const { + if(contours[i].size() < 5) { + return getMinAreaRect(i); + } + return fitEllipse(contours[i]); + } + + vector ContourFinder::getFitQuad(unsigned int i) const { + vector convexHull = getConvexHull(i); + vector quad = convexHull; + + static const unsigned int targetPoints = 4; + static const unsigned int maxIterations = 16; + static const double infinity = numeric_limits::infinity(); + double minEpsilon = 0; + double maxEpsilon = infinity; + double curEpsilon = 16; // good initial guess + + // unbounded binary search to simplify the convex hull until it's 4 points + if(quad.size() > 4) { + for(int i = 0; i <(int) maxIterations; i++) { + approxPolyDP(Mat(convexHull), quad, curEpsilon, true); + if(quad.size() == targetPoints) { + break; + } + if(quad.size() > targetPoints) { + minEpsilon = curEpsilon; + if(maxEpsilon == infinity) { + curEpsilon = curEpsilon * 2; + } else { + curEpsilon = (maxEpsilon + minEpsilon) / 2; + } + } + if(quad.size() < targetPoints) { + maxEpsilon = curEpsilon; + curEpsilon = (maxEpsilon + minEpsilon) / 2; + } + } + } + + return quad; + } + + cv::Vec2f ContourFinder::getVelocity(unsigned int i) const { + return tracker.getVelocity(i); + } + + unsigned int ContourFinder::getLabel(unsigned int i) const { + return tracker.getCurrentLabels()[i]; + } + + RectTracker& ContourFinder::getTracker() { + return tracker; + } + + void ContourFinder::setAutoThreshold(bool autoThreshold) { + this->autoThreshold = autoThreshold; + } + + void ContourFinder::setThreshold(float thresholdValue) { + this->thresholdValue = thresholdValue; + } + + void ContourFinder::setInvert(bool invert) { + this->invert = invert; + } + + void ContourFinder::setUseTargetColor(bool useTargetColor) { + this->useTargetColor = useTargetColor; + } + + void ContourFinder::setTargetColor(ofColor targetColor, TrackingColorMode trackingColorMode) { + useTargetColor = true; + this->targetColor = targetColor; + this->trackingColorMode = trackingColorMode; + } + + void ContourFinder::setSimplify(bool simplify) { + this->simplify = simplify; + } + + void ContourFinder::draw() { + ofPushStyle(); + ofNoFill(); + for(int i = 0; i < (int)polylines.size(); i++) { + polylines[i].draw(); + ofDrawRectangle(toOf(getBoundingRect(i))); + } + ofPopStyle(); + } + + void ContourFinder::resetMinArea() { + setMinArea(0); + } + + void ContourFinder::resetMaxArea() { + setMaxArea(numeric_limits::infinity()); + } + + void ContourFinder::setMinArea(float minArea) { + this->minArea = minArea; + minAreaNorm = false; + } + + void ContourFinder::setMaxArea(float maxArea) { + this->maxArea = maxArea; + maxAreaNorm = false; + } + + void ContourFinder::setMinAreaRadius(float minAreaRadius) { + minArea = PI * minAreaRadius * minAreaRadius; + minAreaNorm = false; + } + + void ContourFinder::setMaxAreaRadius(float maxAreaRadius) { + maxArea = PI * maxAreaRadius * maxAreaRadius; + maxAreaNorm = false; + } + + void ContourFinder::setMinAreaNorm(float minAreaNorm) { + minArea = minAreaNorm; + this->minAreaNorm = true; + } + + void ContourFinder::setMaxAreaNorm(float maxAreaNorm) { + maxArea = maxAreaNorm; + this->maxAreaNorm = true; + } + +} diff --git a/addons/ofxCv/libs/ofxCv/src/Distance.cpp b/addons/ofxCv/libs/ofxCv/src/Distance.cpp new file mode 100644 index 00000000000..32d4501786a --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Distance.cpp @@ -0,0 +1,183 @@ +#include "ofxCv/Distance.h" +#include + +namespace ofxCv { + + using namespace std; + + // http://www.merriampark.com/ld.htm + + class Distance { + public: + int LD (char const *s, char const *t); + private: + int Minimum (int a, int b, int c); + int *GetCellPointer (int *pOrigin, int col, int row, int nCols); + int GetAt (int *pOrigin, int col, int row, int nCols); + void PutAt (int *pOrigin, int col, int row, int nCols, int x); + }; + + const string& mostRepresentative(const vector& strs) { + int bestScore; + int besti; + int n = strs.size(); + for(int i = 0; i < n; i++) { + int curScore = 0; + for(int j = 0; j < n; j++) { + if(i != j) { + int curDistance = editDistance(strs[i], strs[j]); + curScore += curDistance * curDistance; + } + } + if(curScore < bestScore || i == 0) { + bestScore = curScore; + besti = i; + } + } + + return strs[besti]; + } + + int editDistance(const string& a, const string& b) { + Distance dist; + return dist.LD(a.c_str(), b.c_str()); + } + + //**************************** + // Get minimum of three values + //**************************** + + int Distance::Minimum (int a, int b, int c) + { + int mi; + + mi = a; + if (b < mi) { + mi = b; + } + if (c < mi) { + mi = c; + } + return mi; + + } + + //************************************************** + // Get a pointer to the specified cell of the matrix + //************************************************** + + int *Distance::GetCellPointer (int *pOrigin, int col, int row, int nCols) + { + return pOrigin + col + (row * (nCols + 1)); + } + + //***************************************************** + // Get the contents of the specified cell in the matrix + //***************************************************** + + int Distance::GetAt (int *pOrigin, int col, int row, int nCols) + { + int *pCell; + + pCell = GetCellPointer (pOrigin, col, row, nCols); + return *pCell; + + } + + //******************************************************* + // Fill the specified cell in the matrix with the value x + //******************************************************* + + void Distance::PutAt (int *pOrigin, int col, int row, int nCols, int x) + { + int *pCell; + + pCell = GetCellPointer (pOrigin, col, row, nCols); + *pCell = x; + + } + + //***************************** + // Compute Levenshtein distance + //***************************** + + int Distance::LD (char const *s, char const *t) + { + int *d; // pointer to matrix + int n; // length of s + int m; // length of t + int i; // iterates through s + int j; // iterates through t + char s_i; // ith character of s + char t_j; // jth character of t + int cost; // cost + int result; // result + int cell; // contents of target cell + int above; // contents of cell immediately above + int left; // contents of cell immediately to left + int diag; // contents of cell immediately above and to left + int sz; // number of cells in matrix + + // Step 1 + + n = strlen (s); + m = strlen (t); + if (n == 0) { + return m; + } + if (m == 0) { + return n; + } + sz = (n+1) * (m+1) * sizeof (int); + d = (int *) malloc (sz); + + // Step 2 + + for (i = 0; i <= n; i++) { + PutAt (d, i, 0, n, i); + } + + for (j = 0; j <= m; j++) { + PutAt (d, 0, j, n, j); + } + + // Step 3 + + for (i = 1; i <= n; i++) { + + s_i = s[i-1]; + + // Step 4 + + for (j = 1; j <= m; j++) { + + t_j = t[j-1]; + + // Step 5 + + if (s_i == t_j) { + cost = 0; + } + else { + cost = 1; + } + + // Step 6 + + above = GetAt (d,i-1,j, n); + left = GetAt (d,i, j-1, n); + diag = GetAt (d, i-1,j-1, n); + cell = Minimum (above + 1, left + 1, diag + cost); + PutAt (d, i, j, n, cell); + } + } + + // Step 7 + + result = GetAt (d, n, m, n); + free (d); + return result; + + } + +} diff --git a/addons/ofxCv/libs/ofxCv/src/Flow.cpp b/addons/ofxCv/libs/ofxCv/src/Flow.cpp new file mode 100644 index 00000000000..bd8dc490f97 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Flow.cpp @@ -0,0 +1,328 @@ +#include "ofxCv/Flow.h" +#include "ofGraphics.h" + +namespace ofxCv { + + using namespace cv; + + Flow::Flow() + :hasFlow(false) { + } + + Flow::~Flow(){ + } + + //call with two images + void Flow::calcOpticalFlow(Mat lastImage, Mat currentImage){ + if(lastImage.channels() == 1 && currentImage.channels() == 1) { + calcFlow(lastImage, currentImage); + } else { + copyGray(lastImage, last); + copyGray(currentImage, curr); + calcFlow(last, curr); + } + hasFlow = true; + } + + //you can add subsequent images this way without having to store + //the previous one yourself + void Flow::calcOpticalFlow(Mat nextImage){ + copyGray(nextImage, curr); + if(last.size == curr.size){ + calcFlow(last, curr); + hasFlow = true; + } + swap(curr, last); + } + + void Flow::draw(){ + if(hasFlow) { + drawFlow(ofRectangle(0, 0, getWidth(), getHeight())); + } + } + void Flow::draw(float x, float y){ + if(hasFlow){ + drawFlow(ofRectangle(x, y, getWidth(), getHeight())); + } + } + void Flow::draw(float x, float y, float width, float height){ + if(hasFlow){ + drawFlow(ofRectangle(x,y,width,height)); + } + } + void Flow::draw(ofRectangle rect){ + if(hasFlow){ + drawFlow(rect); + } + } + int Flow::getWidth() { + return curr.cols; + } + int Flow::getHeight() { + return curr.rows; + } + void Flow::resetFlow() { + last = Mat(); + curr = Mat(); + hasFlow = false; + } + + FlowPyrLK::FlowPyrLK() + :windowSize(32) + ,maxLevel(3) + ,maxFeatures(200) + ,qualityLevel(0.01) + ,minDistance(4) + ,pyramidLevels(10) + ,calcFeaturesNextFrame(true) + { + } + + FlowPyrLK::~FlowPyrLK(){ + } + + void FlowPyrLK::setWindowSize(int winsize){ + this->windowSize = winsize; + } + void FlowPyrLK::setMaxLevel(int maxLevel){ + this->maxLevel = maxLevel; + } + void FlowPyrLK::setMaxFeatures(int maxFeatures){ + this->maxFeatures = maxFeatures; + } + void FlowPyrLK::setQualityLevel(float qualityLevel){ + this->qualityLevel = qualityLevel; + } + void FlowPyrLK::setMinDistance(int minDistance){ + this->minDistance = minDistance; + } + + void FlowPyrLK::calcFlow(Mat prev, Mat next){ + if(!nextPts.empty() || calcFeaturesNextFrame){ + if(calcFeaturesNextFrame){ + calcFeaturesToTrack(prevPts, next); + calcFeaturesNextFrame = false; + }else{ + swap(prevPts, nextPts); + } + nextPts.clear(); + +#if CV_MAJOR_VERSION>=2 && (CV_MINOR_VERSION>4 || (CV_MINOR_VERSION==4 && CV_SUBMINOR_VERSION>=1)) + if (prevPyramid.empty()) { + buildOpticalFlowPyramid(prev,prevPyramid,cv::Size(windowSize, windowSize),10); + } + buildOpticalFlowPyramid(next,pyramid,cv::Size(windowSize, windowSize),10); + calcOpticalFlowPyrLK(prevPyramid, + pyramid, + prevPts, + nextPts, + status, + err, + cv::Size(windowSize, windowSize), + maxLevel); + prevPyramid = pyramid; + pyramid.clear(); +#else + calcOpticalFlowPyrLK(prev, + next, + prevPts, + nextPts, + status, + err, + cv::Size(windowSize, windowSize), + maxLevel); +#endif + status.resize(nextPts.size(),0); + }else{ + calcFeaturesToTrack(nextPts, next); + } + } + + void FlowPyrLK::calcFeaturesToTrack(vector & features, Mat next){ + goodFeaturesToTrack( + next, + features, + maxFeatures, + qualityLevel, + minDistance + ); + } + + void FlowPyrLK::resetFeaturesToTrack(){ + calcFeaturesNextFrame=true; + } + + void FlowPyrLK::setFeaturesToTrack(const vector & features){ + nextPts.resize(features.size()); + for(int i=0;i<(int)features.size();i++){ + nextPts[i]=toCv(features[i]); + } + calcFeaturesNextFrame = false; + } + + void FlowPyrLK::setFeaturesToTrack(const vector & features){ + nextPts = features; + calcFeaturesNextFrame = false; + } + + vector FlowPyrLK::getFeatures(){ + ofPolyline poly =toOf(prevPts); + return poly.getVertices(); + } + + vector FlowPyrLK::getCurrent(){ + vector ret; + for(int i = 0; i < (int)nextPts.size(); i++) { + if(status[i]){ + ret.push_back(toOf(nextPts[i])); + } + } + return ret; + } + + vector FlowPyrLK::getMotion(){ + vector ret(prevPts.size()); + for(int i = 0; i < (int)prevPts.size(); i++) { + if(status[i]){ + ret.push_back(toOf(nextPts[i])-toOf(prevPts[i])); + } + } + return ret; + } + + void FlowPyrLK::drawFlow(ofRectangle rect) { + ofVec2f offset(rect.x,rect.y); + ofVec2f scale(rect.width/getWidth(),rect.height/getHeight()); + for(int i = 0; i < (int)prevPts.size(); i++) { + if(status[i]){ + ofDrawLine(toOf(prevPts[i])*scale+offset, toOf(nextPts[i])*scale+offset); + } + } + } + + void FlowPyrLK::resetFlow(){ + Flow::resetFlow(); + resetFeaturesToTrack(); + prevPts.clear(); + } + + FlowFarneback::FlowFarneback() + :pyramidScale(0.5) + ,numLevels(4) + ,windowSize(8) + ,numIterations(2) + ,polyN(7) + ,polySigma(1.5) + ,farnebackGaussian(false) + { + } + + FlowFarneback::~FlowFarneback(){ + } + + void FlowFarneback::setPyramidScale(float scale){ + if(scale < 0.0 || scale >= 1.0){ + ofLogWarning("FlowFarneback::setPyramidScale") << "setting scale to a number outside of 0 - 1"; + } + this->pyramidScale = scale; + } + + void FlowFarneback::setNumLevels(int levels){ + this->numLevels = levels; + } + void FlowFarneback::setWindowSize(int winsize){ + this->windowSize = winsize; + } + void FlowFarneback::setNumIterations(int interations){ + this->numIterations = interations; + } + void FlowFarneback::setPolyN(int polyN){ + this->polyN = polyN; + } + void FlowFarneback::setPolySigma(float polySigma){ + this->polySigma = polySigma; + } + void FlowFarneback::setUseGaussian(bool gaussian){ + this->farnebackGaussian = gaussian; + } + + void FlowFarneback::resetFlow(){ + Flow::resetFlow(); + flow.setTo(0); + } + + void FlowFarneback::calcFlow(Mat prev, Mat next){ + int flags = 0; + if(hasFlow){ + flags = OPTFLOW_USE_INITIAL_FLOW; + } + if(farnebackGaussian){ + flags |= OPTFLOW_FARNEBACK_GAUSSIAN; + } + + calcOpticalFlowFarneback(prev, + next, + flow, + pyramidScale, + numLevels, + windowSize, + numIterations, + polyN, + polySigma, + flags); + } + Mat& FlowFarneback::getFlow() { + if(!hasFlow) { + flow = Mat::zeros(1, 1, CV_32FC2); + } + return flow; + } + ofVec2f FlowFarneback::getFlowOffset(int x, int y){ + if(!hasFlow){ + return ofVec2f(0, 0); + } + const Vec2f& vec = flow.at(y, x); + return ofVec2f(vec[0], vec[1]); + } + ofVec2f FlowFarneback::getFlowPosition(int x, int y){ + if(!hasFlow){ + return ofVec2f(0, 0); + } + const Vec2f& vec = flow.at(y, x); + return ofVec2f(x + vec[0], y + vec[1]); + } + ofVec2f FlowFarneback::getTotalFlow(){ + return getTotalFlowInRegion(ofRectangle(0,0,flow.cols, flow.rows)); + } + ofVec2f FlowFarneback::getAverageFlow(){ + return getAverageFlowInRegion(ofRectangle(0,0,flow.cols,flow.rows)); + } + + ofVec2f FlowFarneback::getAverageFlowInRegion(ofRectangle rect){ + return getTotalFlowInRegion(rect)/(rect.width*rect.height); + } + + ofVec2f FlowFarneback::getTotalFlowInRegion(ofRectangle region){ + if(!hasFlow){ + return ofVec2f(0, 0); + } + + const Scalar& sc = sum(flow(toCv(region))); + return ofVec2f(sc[0], sc[1]); + } + + void FlowFarneback::drawFlow(ofRectangle rect){ + if(!hasFlow){ + return; + } + ofVec2f offset(rect.x,rect.y); + ofVec2f scale(rect.width/flow.cols, rect.height/flow.rows); + int stepSize = 4; //TODO: make class-level parameteric + for(int y = 0; y < flow.rows; y += stepSize) { + for(int x = 0; x < flow.cols; x += stepSize) { + ofVec2f cur = ofVec2f(x, y) * scale + offset; + ofDrawLine(cur, getFlowPosition(x, y) * scale + offset); + } + } + } +} diff --git a/addons/ofxCv/libs/ofxCv/src/Helpers.cpp b/addons/ofxCv/libs/ofxCv/src/Helpers.cpp new file mode 100644 index 00000000000..cce9bee3a08 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Helpers.cpp @@ -0,0 +1,171 @@ +#include "ofxCv/Helpers.h" +#include "ofxCv/Utilities.h" +#include "ofGraphics.h" + +namespace ofxCv { + + using namespace cv; + + ofMatrix4x4 makeMatrix(Mat rotation, Mat translation) { + Mat rot3x3; + if(rotation.rows == 3 && rotation.cols == 3) { + rot3x3 = rotation; + } else { + Rodrigues(rotation, rot3x3); + } + double* rm = rot3x3.ptr(0); + double* tm = translation.ptr(0); + return ofMatrix4x4(rm[0], rm[3], rm[6], 0.0f, + rm[1], rm[4], rm[7], 0.0f, + rm[2], rm[5], rm[8], 0.0f, + tm[0], tm[1], tm[2], 1.0f); + } + + void drawMat(Mat& mat, float x, float y) { + drawMat(mat, x, y, mat.cols, mat.rows); + } + + // special case for copying into ofTexture + template + void copy(S& src, ofTexture& tex) { + imitate(tex, src); + int w = tex.getWidth(), h = tex.getHeight(); + int glType = tex.getTextureData().glInternalFormat; + Mat mat = toCv(src); + tex.loadData(mat.ptr(), w, h, glType); + } + + void drawMat(Mat& mat, float x, float y, float width, float height) { + if(mat.empty()) { + return; + } + ofTexture tex; + copy(mat, tex); + tex.draw(x, y, width, height); + } + + void applyMatrix(const ofMatrix4x4& matrix) { + glMultMatrixf((GLfloat*) matrix.getPtr()); + } + + int forceOdd(int x) { + return (x / 2) * 2 + 1; + } + + int findFirst(const Mat& arr, unsigned char target) { + for(int i = 0; i < arr.rows; i++) { + if(arr.at(i) == target) { + return i; + } + } + return 0; + } + + int findLast(const Mat& arr, unsigned char target) { + for(int i = arr.rows - 1; i >= 0; i--) { + if(arr.at(i) == target) { + return i; + } + } + return 0; + } + + float weightedAverageAngle(const vector& lines) { + float angleSum = 0; + ofVec2f start, end; + float weights = 0; + for(int i = 0; i < lines.size(); i++) { + start.set(lines[i][0], lines[i][1]); + end.set(lines[i][2], lines[i][3]); + ofVec2f diff = end - start; + float length = diff.length(); + float weight = length * length; + float angle = atan2f(diff.y, diff.x); + angleSum += angle * weight; + weights += weight; + } + return angleSum / weights; + } + + vector getConvexPolygon(const vector& convexHull, int targetPoints) { + vector result = convexHull; + + static const unsigned int maxIterations = 16; + static const double infinity = numeric_limits::infinity(); + double minEpsilon = 0; + double maxEpsilon = infinity; + double curEpsilon = 16; // good initial guess + + // unbounded binary search to simplify the convex hull until it's targetPoints + if(result.size() > targetPoints) { + for(int i = 0; i < maxIterations; i++) { + approxPolyDP(Mat(convexHull), result, curEpsilon, true); + if(result.size() == targetPoints) { + break; + } + if(result.size() > targetPoints) { + minEpsilon = curEpsilon; + if(maxEpsilon == infinity) { + curEpsilon = curEpsilon * 2; + } else { + curEpsilon = (maxEpsilon + minEpsilon) / 2; + } + } + if(result.size() < targetPoints) { + maxEpsilon = curEpsilon; + curEpsilon = (maxEpsilon + minEpsilon) / 2; + } + } + } + + return result; + } + + void drawHighlightString(string text, ofPoint position, ofColor background, ofColor foreground) { + drawHighlightString(text, position.x, position.y, background, foreground); + } + + void drawHighlightString(string text, int x, int y, ofColor background, ofColor foreground) { + vector lines = ofSplitString(text, "\n"); + int textLength = 0; + for(int i = 0; i < lines.size(); i++) { + // tabs are not rendered + int tabs = count(lines[i].begin(), lines[i].end(), '\t'); + int curLength = lines[i].length() - tabs; + // after the first line, everything is indented with one space + if(i > 0) { + curLength++; + } + if(curLength > textLength) { + textLength = curLength; + } + } + + int padding = 4; + int fontSize = 8; + float leading = 1.7; + int height = lines.size() * fontSize * leading - 1; + int width = textLength * fontSize; + +#ifdef TARGET_OPENGLES + // This needs to be refactored to support OpenGLES + // Else it will work correctly +#else + glPushAttrib(GL_DEPTH_BUFFER_BIT); + glDisable(GL_DEPTH_TEST); + ofPushStyle(); + ofSetColor(background); + ofFill(); + ofDrawRectangle(x, y, width + 2 * padding, height + 2 * padding); + ofSetColor(foreground); + ofNoFill(); + ofPushMatrix(); + ofTranslate(padding, padding); + ofDrawBitmapString(text, x + 1, y + fontSize + 2); + ofPopMatrix(); + ofPopStyle(); + glPopAttrib(); +#endif + + } +} diff --git a/addons/ofxCv/libs/ofxCv/src/Kalman.cpp b/addons/ofxCv/libs/ofxCv/src/Kalman.cpp new file mode 100644 index 00000000000..caee337c8af --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Kalman.cpp @@ -0,0 +1,131 @@ +#include "ofxCv/Kalman.h" + +namespace ofxCv { + + // based on code from: + // http://www.morethantechnical.com/2011/06/17/simple-kalman-filter-for-tracking-using-opencv-2-2-w-code/ + + using namespace cv; + + template + void KalmanPosition_::init(T smoothness, T rapidness, bool bUseAccel) { + if( bUseAccel ) { + KF.init(9, 3, 0); // 9 variables (position+velocity+accel) and 3 measurements (position) + + KF.transitionMatrix = *(Mat_(9, 9) << + 1,0,0,1,0,0,0.5,0,0, + 0,1,0,0,1,0,0,0.5,0, + 0,0,1,0,0,1,0,0,0.5, + 0,0,0,1,0,0,1,0,0, + 0,0,0,0,1,0,0,1,0, + 0,0,0,0,0,1,0,0,1, + 0,0,0,0,0,0,1,0,0, + 0,0,0,0,0,0,0,1,0, + 0,0,0,0,0,0,0,0,1); + + measurement = Mat_::zeros(3, 1); + + KF.statePre = Mat_::zeros(9, 1); + } else { + KF.init(6, 3, 0); // 6 variables (position+velocity) and 3 measurements (position) + + KF.transitionMatrix = *(Mat_(6, 6) << + 1,0,0,1,0,0, + 0,1,0,0,1,0, + 0,0,1,0,0,1, + 0,0,0,1,0,0, + 0,0,0,0,1,0, + 0,0,0,0,0,1); + + measurement = Mat_::zeros(3, 1); + + KF.statePre = Mat_::zeros(6, 1); + } + setIdentity(KF.measurementMatrix); + setIdentity(KF.processNoiseCov, Scalar::all(smoothness)); + setIdentity(KF.measurementNoiseCov, Scalar::all(rapidness)); + setIdentity(KF.errorCovPost, Scalar::all(.1)); + } + + template + void KalmanPosition_::update(const ofVec3f& p) { + // First predict, to update the internal statePre variable + prediction = KF.predict(); + + // The "correct" phase that is going to use the predicted value and our measurement + measurement(0) = p.x; + measurement(1) = p.y; + measurement(2) = p.z; + estimated = KF.correct(measurement); + } + + template + ofVec3f KalmanPosition_::getPrediction() + { + return ofVec3f(prediction(0), prediction(1), prediction(2)); + } + + template + ofVec3f KalmanPosition_::getEstimation() + { + return ofVec3f(estimated(0), estimated(1), estimated(2)); + } + + template + ofVec3f KalmanPosition_::getVelocity() + { + return ofVec3f(estimated(3), estimated(4), estimated(5)); + } + + template class KalmanPosition_; + + template + void KalmanEuler_::init(T smoothness, T rapidness, bool bUseAccel) { + KalmanPosition_::init(smoothness, rapidness, bUseAccel); + eulerPrev.x = 0.f; + eulerPrev.y = 0.f; + eulerPrev.z = 0.f; + } + + template + void KalmanEuler_::update(const ofQuaternion& q) { + // warp to appropriate dimension + ofVec3f euler = q.getEuler(); + for( int i = 0; i < 3; i++ ) { + float rev = floorf((eulerPrev[i] + 180) / 360.f) * 360; + euler[i] += rev; + if( euler[i] < -90 + rev && eulerPrev[i] > 90 + rev ) euler[i] += 360; + else if( euler[i] > 90 + rev && eulerPrev[i] < -90 + rev ) euler[i] -= 360; + } + + KalmanPosition_::update(euler); + eulerPrev = euler; + } + + template + ofQuaternion KalmanEuler_::getPrediction() + { + ofQuaternion q; + q.set(0, 0, 0, 1); + ofVec3f euler = KalmanPosition_::getPrediction(); + + q.makeRotate(euler.x, ofVec3f(1, 0, 0), euler.z, ofVec3f(0, 0, 1), euler.y, ofVec3f(0, 1, 0)); + + return q; + } + + template + ofQuaternion KalmanEuler_::getEstimation() + { + ofQuaternion q; + q.set(0, 0, 0, 1); + ofVec3f euler = KalmanPosition_::getEstimation(); + + q.makeRotate(euler.x, ofVec3f(1, 0, 0), euler.z, ofVec3f(0, 0, 1), euler.y, ofVec3f(0, 1, 0)); + + return q; + } + + template class KalmanEuler_; + +} diff --git a/addons/ofxCv/libs/ofxCv/src/ObjectFinder.cpp b/addons/ofxCv/libs/ofxCv/src/ObjectFinder.cpp new file mode 100644 index 00000000000..1358c27e0ff --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/ObjectFinder.cpp @@ -0,0 +1,168 @@ +#include "ofxCv/ObjectFinder.h" +#include "ofGraphics.h" + +namespace ofxCv { + using namespace cv; + + ObjectFinder::ObjectFinder() + :rescale(1) + ,multiScaleFactor(1.1) + ,minNeighbors(3) + ,minSizeScale(0) + ,maxSizeScale(1) + ,useHistogramEqualization(true) + ,cannyPruning(false) + ,findBiggestObject(false) + { + } + void ObjectFinder::setup(string cascadeFilename) { + cascadeFilename = ofToDataPath(cascadeFilename); + if(ofFile(cascadeFilename).exists()) { + classifier.load(cascadeFilename); + } else { + ofLogError("ObjectFinder::setup") << "Couldn't find " << cascadeFilename; + } + } + void ObjectFinder::update(cv::Mat img) { + cv::Mat gray; + if(getChannels(img) == 1) { + gray = img; + } else { + copyGray(img,gray); + } + resize(gray, graySmall, rescale, rescale); + cv::Mat graySmallMat = toCv(graySmall); + if(useHistogramEqualization) { + equalizeHist(graySmallMat, graySmallMat); + } + cv::Size minSize, maxSize; + float minSide = MIN(graySmallMat.rows, graySmallMat.cols); + if(minSizeScale > 0) { + int side = minSizeScale * minSide; + minSize = cv::Size(side, side); + } + if(maxSizeScale < 1) { + int side = maxSizeScale * minSide; + maxSize = cv::Size(side, side); + } + classifier.detectMultiScale(graySmallMat, + objects, + multiScaleFactor, + minNeighbors, + (cannyPruning ? CASCADE_DO_CANNY_PRUNING : 0) | + (findBiggestObject ? CASCADE_FIND_BIGGEST_OBJECT | CASCADE_DO_ROUGH_SEARCH : 0), + minSize, + maxSize); + for(int i = 0; i < objects.size(); i++) { + cv::Rect& rect = objects[i]; + rect.width /= rescale, rect.height /= rescale; + rect.x /= rescale, rect.y /= rescale; + } + tracker.track(objects); + } + unsigned int ObjectFinder::size() const { + return objects.size(); + } + ofRectangle ObjectFinder::getObject(unsigned int i) const { + return toOf(objects[i]); + } + ofRectangle ObjectFinder::getObjectSmoothed(unsigned int i) const { + return toOf(tracker.getSmoothed(getLabel(i))); + } + cv::Vec2f ObjectFinder::getVelocity(unsigned int i) const { + return tracker.getVelocity(i); + } + unsigned int ObjectFinder::getLabel(unsigned int i) const { + return tracker.getCurrentLabels()[i]; + } + RectTracker& ObjectFinder::getTracker() { + return tracker; + } + void ObjectFinder::draw() const { + ofPushStyle(); + ofNoFill(); + for(int i = 0; i < size(); i++) { + ofRectangle object = getObject(i); + ofDrawRectangle(object); + ofDrawBitmapStringHighlight(ofToString(getLabel(i)), object.x, object.y); + } + ofPopStyle(); + } + void ObjectFinder::setPreset(ObjectFinder::Preset preset) { + if(preset == ObjectFinder::Fast) { + setRescale(.25); + setMinNeighbors(2); + setMultiScaleFactor(1.2); + setMinSizeScale(.25); + setMaxSizeScale(.75); + setCannyPruning(true); + setFindBiggestObject(false); + } else if(preset == ObjectFinder::Accurate) { + setRescale(.5); + setMinNeighbors(6); + setMultiScaleFactor(1.02); + setMinSizeScale(.1); + setMaxSizeScale(1); + setCannyPruning(true); + setFindBiggestObject(false); + } else if(preset == ObjectFinder::Sensitive) { + setRescale(.5); + setMinNeighbors(1); + setMultiScaleFactor(1.02); + setMinSizeScale(.1); + setMaxSizeScale(1); + setCannyPruning(false); + setFindBiggestObject(false); + } + } + + void ObjectFinder::setRescale(float rescale) { + this->rescale = rescale; + } + void ObjectFinder::setMinNeighbors(int minNeighbors) { + this->minNeighbors = minNeighbors; + } + void ObjectFinder::setMultiScaleFactor(float multiScaleFactor) { + this->multiScaleFactor = multiScaleFactor; + } + void ObjectFinder::setCannyPruning(bool cannyPruning) { + this->cannyPruning = cannyPruning; + } + void ObjectFinder::setFindBiggestObject(bool findBiggestObject) { + this->findBiggestObject = findBiggestObject; + } + void ObjectFinder::setUseHistogramEqualization(bool useHistogramEqualization) { + this->useHistogramEqualization = useHistogramEqualization; + } + void ObjectFinder::setMinSizeScale(float minSizeScale) { + this->minSizeScale = minSizeScale; + } + void ObjectFinder::setMaxSizeScale(float maxSizeScale) { + this->maxSizeScale = maxSizeScale; + } + + float ObjectFinder::getRescale() const { + return rescale; + } + int ObjectFinder::getMinNeighbors() const { + return minNeighbors; + } + float ObjectFinder::getMultiScaleFactor() const { + return multiScaleFactor; + } + bool ObjectFinder::getCannyPruning() const { + return cannyPruning; + } + bool ObjectFinder::getFindBiggestObject() const { + return findBiggestObject; + } + bool ObjectFinder::getUseHistogramEqualization() const { + return useHistogramEqualization; + } + float ObjectFinder::getMinSizeScale() const { + return minSizeScale; + } + float ObjectFinder::getMaxSizeScale() const { + return maxSizeScale; + } +} diff --git a/addons/ofxCv/libs/ofxCv/src/RunningBackground.cpp b/addons/ofxCv/libs/ofxCv/src/RunningBackground.cpp new file mode 100644 index 00000000000..8518abb025d --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/RunningBackground.cpp @@ -0,0 +1,71 @@ +#include "ofxCv/RunningBackground.h" +#include "ofxCv/Wrappers.h" + +namespace ofxCv { + RunningBackground::RunningBackground() + :learningRate(.0001) + ,learningTime(900.0) + ,useLearningTime(false) + ,thresholdValue(26) + ,ignoreForeground(false) + ,needToReset(false) + ,differenceMode(ABSDIFF) { + } + void RunningBackground::update(cv::Mat frame, cv::Mat& thresholded) { + if(needToReset || accumulator.empty()) { + needToReset = false; + frame.convertTo(accumulator, CV_32F); + } + + accumulator.convertTo(background, CV_8U); + switch(differenceMode) { + case ABSDIFF: cv::absdiff(background, frame, foreground); break; + case BRIGHTER: cv::subtract(frame, background, foreground); break; + case DARKER: cv::subtract(background, frame, foreground); break; + } + ofxCv::copyGray(foreground, foregroundGray); + int thresholdMode = ignoreForeground ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY; + cv::threshold(foregroundGray, thresholded, thresholdValue, 255, thresholdMode); + + float curLearningRate = learningRate; + if(useLearningTime) { + curLearningRate = 1. - powf(1. - (thresholdValue / 255.), 1. / learningTime); + } + if(ignoreForeground) { + cv::accumulateWeighted(frame, accumulator, curLearningRate, thresholded); + cv::bitwise_not(thresholded, thresholded); + } else { + cv::accumulateWeighted(frame, accumulator, curLearningRate); + } + } + cv::Mat& RunningBackground::getBackground() { + return background; + } + cv::Mat& RunningBackground::getForeground() { + return foreground; + } + float RunningBackground::getPresence() const { + // this could be memoized to improve speed + return cv::mean(foreground)[0] / 255.; + } + void RunningBackground::setThresholdValue(unsigned int thresholdValue) { + this->thresholdValue = thresholdValue; + } + void RunningBackground::setLearningRate(double learningRate) { + this->learningRate = learningRate; + useLearningTime = false; + } + void RunningBackground::setLearningTime(double learningTime) { + this->learningTime = learningTime; + useLearningTime = true; + } + void RunningBackground::setIgnoreForeground(bool ignoreForeground) { + this->ignoreForeground = ignoreForeground; + } + void RunningBackground::setDifferenceMode(DifferenceMode differenceMode) { + this->differenceMode = differenceMode; + } + void RunningBackground::reset() { + needToReset = true; + } +} diff --git a/addons/ofxCv/libs/ofxCv/src/Tracker.cpp b/addons/ofxCv/libs/ofxCv/src/Tracker.cpp new file mode 100644 index 00000000000..0d3b91880e2 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Tracker.cpp @@ -0,0 +1,33 @@ +#include "ofxCv/Tracker.h" + +#include "ofxCv/Utilities.h" +#include "ofRectangle.h" +#include "ofVec2f.h" + +namespace ofxCv { + + float trackingDistance(const cv::Rect& a, const cv::Rect& b) { + float dx = (a.x + a.width / 2.) - (b.x + b.width / 2.); + float dy = (a.y + a.height / 2.) - (b.y + b.height / 2.); + float dw = a.width - b.width; + float dh = a.height - b.height; + float pd = sqrtf(dx * dx + dy * dy); + float sd = sqrtf(dw * dw + dh * dh); + return pd + sd; + } + + float trackingDistance(const cv::Point2f& a, const cv::Point2f& b) { + float dx = a.x - b.x; + float dy = a.y - b.y; + return sqrtf(dx * dx + dy * dy); + } + + float trackingDistance(const ofRectangle& a, const ofRectangle& b) { + return trackingDistance(toCv(a), toCv(b)); + } + + float trackingDistance(const ofVec2f& a, const ofVec2f& b) { + return trackingDistance(toCv(a), toCv(b)); + } + +} \ No newline at end of file diff --git a/addons/ofxCv/libs/ofxCv/src/Utilities.cpp b/addons/ofxCv/libs/ofxCv/src/Utilities.cpp new file mode 100644 index 00000000000..6d1fa8b0a5d --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Utilities.cpp @@ -0,0 +1,141 @@ +#include "ofxCv/Utilities.h" + +#include "ofMath.h" + +// vs2010 support (this should be added to the OF core) +#if (_MSC_VER) +#include +#endif + +namespace ofxCv { + + using namespace cv; + + Mat toCv(Mat& mat) { + return mat; + } + + Point2f toCv(ofVec2f vec) { + return Point2f(vec.x, vec.y); + } + + Point3f toCv(ofVec3f vec) { + return Point3f(vec.x, vec.y, vec.z); + } + + cv::Rect toCv(ofRectangle rect) { + return cv::Rect(rect.x, rect.y, rect.width, rect.height); + } + + Mat toCv(ofMesh& mesh) { + vector& vertices = mesh.getVertices(); + return Mat(1, vertices.size(), CV_32FC3, &vertices[0]); + } + + vector toCv(const ofPolyline& polyline) { + // if polyline.getVertices() were const, this could wrap toCv(vec) + vector contour(polyline.size()); + for(int i = 0; i < polyline.size(); i++) { + contour[i].x = polyline[i].x; + contour[i].y = polyline[i].y; + } + return contour; + } + + vector toCv(const vector& points) { + vector out(points.size()); + for(int i = 0; i < points.size(); i++) { + out[i].x = points[i].x; + out[i].y = points[i].y; + } + return out; + } + + vector toCv(const vector& points) { + vector out(points.size()); + for(int i = 0; i < points.size(); i++) { + out[i].x = points[i].x; + out[i].y = points[i].y; + out[i].z = points[i].z; + } + return out; + } + + Scalar toCv(ofColor color) { + return Scalar(color.r, color.g, color.b, color.a); + } + + ofVec2f toOf(Point2f point) { + return ofVec2f(point.x, point.y); + } + + ofVec3f toOf(Point3f point) { + return ofVec3f(point.x, point.y, point.z); + } + + ofRectangle toOf(cv::Rect rect) { + return ofRectangle(rect.x, rect.y, rect.width, rect.height); + } + + ofPolyline toOf(cv::RotatedRect rect) { + vector corners(4); + rect.points(&corners[0]); + ofPolyline polyline = toOf(corners); + return polyline; + } + + float getMaxVal(int cvDepth) { + switch(cvDepth) { + case CV_8U: return numeric_limits::max(); + case CV_16U: return numeric_limits::max(); + + case CV_8S: return numeric_limits::max(); + case CV_16S: return numeric_limits::max(); + case CV_32S: return numeric_limits::max(); + + case CV_32F: return 1; + case CV_64F: default: return 1; + } + } + + float getMaxVal(const Mat& mat) { + return getMaxVal(mat.depth()); + } + + // for some reason, cvtColor handles this info internally rather than having + // a single helper function. so we have to create a helper function to aid + // in doing the allocationg ofxCv::convertColor() +#define mkcase(x, y) {case x: return y;} + int getTargetChannelsFromCode(int conversionCode) { + switch(conversionCode) { + mkcase(CV_RGB2RGBA,4) mkcase(CV_RGBA2RGB,3) mkcase(CV_RGB2BGRA,4) + mkcase(CV_RGBA2BGR,3) mkcase(CV_BGR2RGB,3) mkcase(CV_BGRA2RGBA,4) + mkcase(CV_BGR2GRAY,1) mkcase(CV_RGB2GRAY,1) mkcase(CV_GRAY2RGB,3) + mkcase(CV_GRAY2RGBA,4) mkcase(CV_BGRA2GRAY,1) mkcase(CV_RGBA2GRAY,1) + mkcase(CV_BGR5652BGR,3) mkcase(CV_BGR5652RGB,3) mkcase(CV_BGR5652BGRA,4) + mkcase(CV_BGR5652RGBA,4) mkcase(CV_BGR5652GRAY,1) mkcase(CV_BGR5552BGR,3) + mkcase(CV_BGR5552RGB,3) mkcase(CV_BGR5552BGRA,4) mkcase(CV_BGR5552RGBA,4) + mkcase(CV_BGR5552GRAY,1) mkcase(CV_BGR2XYZ,3) mkcase(CV_RGB2XYZ,3) + mkcase(CV_XYZ2BGR,3) mkcase(CV_XYZ2RGB,3) mkcase(CV_BGR2YCrCb,3) + mkcase(CV_RGB2YCrCb,3) mkcase(CV_YCrCb2BGR,3) mkcase(CV_YCrCb2RGB,3) + mkcase(CV_BGR2HSV,3) mkcase(CV_RGB2HSV,3) mkcase(CV_BGR2Lab,3) + mkcase(CV_RGB2Lab,3) mkcase(CV_BayerGB2BGR,3) mkcase(CV_BayerBG2RGB,3) + mkcase(CV_BayerGB2RGB,3) mkcase(CV_BayerRG2RGB,3) mkcase(CV_BGR2Luv,3) + mkcase(CV_RGB2Luv,3) mkcase(CV_BGR2HLS,3) mkcase(CV_RGB2HLS,3) + mkcase(CV_HSV2BGR,3) mkcase(CV_HSV2RGB,3) mkcase(CV_Lab2BGR,3) + mkcase(CV_Lab2RGB,3) mkcase(CV_Luv2BGR,3) mkcase(CV_Luv2RGB,3) + mkcase(CV_HLS2BGR,3) mkcase(CV_HLS2RGB,3) mkcase(CV_BayerBG2RGB_VNG,3) + mkcase(CV_BayerGB2RGB_VNG,3) mkcase(CV_BayerRG2RGB_VNG,3) + mkcase(CV_BayerGR2RGB_VNG,3) mkcase(CV_BGR2HSV_FULL,3) + mkcase(CV_RGB2HSV_FULL,3) mkcase(CV_BGR2HLS_FULL,3) + mkcase(CV_RGB2HLS_FULL,3) mkcase(CV_HSV2BGR_FULL,3) + mkcase(CV_HSV2RGB_FULL,3) mkcase(CV_HLS2BGR_FULL,3) + mkcase(CV_HLS2RGB_FULL,3) mkcase(CV_LBGR2Lab,3) mkcase(CV_LRGB2Lab,3) + mkcase(CV_LBGR2Luv,3) mkcase(CV_LRGB2Luv,3) mkcase(CV_Lab2LBGR,4) + mkcase(CV_Lab2LRGB,4) mkcase(CV_Luv2LBGR,4) mkcase(CV_Luv2LRGB,4) + mkcase(CV_BGR2YUV,3) mkcase(CV_RGB2YUV,3) mkcase(CV_YUV2BGR,3) + mkcase(CV_YUV2RGB,3) + default: return 0; + } + } +} \ No newline at end of file diff --git a/addons/ofxCv/libs/ofxCv/src/Wrappers.cpp b/addons/ofxCv/libs/ofxCv/src/Wrappers.cpp new file mode 100644 index 00000000000..61839c41508 --- /dev/null +++ b/addons/ofxCv/libs/ofxCv/src/Wrappers.cpp @@ -0,0 +1,123 @@ +#include "ofxCv/Wrappers.h" + +namespace ofxCv { + + using namespace cv; + + void loadMat(Mat& mat, string filename) { + FileStorage fs(ofToDataPath(filename), FileStorage::READ); + fs["Mat"] >> mat; + } + + void saveMat(Mat mat, string filename) { + FileStorage fs(ofToDataPath(filename), FileStorage::WRITE); + fs << "Mat" << mat; + } + + void saveImage(Mat& mat, string filename, ofImageQualityType qualityLevel) { + if(mat.depth() == CV_8U) { + ofPixels pix8u; + toOf(mat, pix8u); + ofSaveImage(pix8u, filename, qualityLevel); + } else if(mat.depth() == CV_16U) { + ofShortPixels pix16u; + toOf(mat, pix16u); + ofSaveImage(pix16u, filename, qualityLevel); + } else if(mat.depth() == CV_32F) { + ofFloatPixels pix32f; + toOf(mat, pix32f); + ofSaveImage(pix32f, filename, qualityLevel); + } + } + + Vec3b convertColor(Vec3b color, int code) { + Mat_ mat(1, 1, CV_8UC3); + mat(0, 0) = color; + cvtColor(mat, mat, code); + return mat(0, 0); + } + + ofColor convertColor(ofColor color, int code) { + Vec3b cvColor(color.r, color.g, color.b); + Vec3b result = convertColor(cvColor, code); + return ofColor(result[0], result[1], result[2], color.a); + } + + ofPolyline convexHull(const ofPolyline& polyline) { + vector contour = toCv(polyline); + vector hull; + convexHull(Mat(contour), hull); + return toOf(hull); + } + + // this should be replaced by c++ 2.0 api style code once available + vector convexityDefects(const vector& contour) { + vector hullIndices; + convexHull(Mat(contour), hullIndices, false, false); + vector convexityDefects; + if(hullIndices.size() > 0 && contour.size() > 0) { + CvMat contourMat = cvMat(1, contour.size(), CV_32SC2, (void*) &contour[0]); + CvMat hullMat = cvMat(1, hullIndices.size(), CV_32SC1, (void*) &hullIndices[0]); + CvMemStorage* storage = cvCreateMemStorage(0); + CvSeq* defects = cvConvexityDefects(&contourMat, &hullMat, storage); + for(int i = 0; i < defects->total; i++){ + CvConvexityDefect* cur = (CvConvexityDefect*) cvGetSeqElem(defects, i); + cv::Vec4i defect; + defect[0] = cur->depth_point->x; + defect[1] = cur->depth_point->y; + defect[2] = (cur->start->x + cur->end->x) / 2; + defect[3] = (cur->start->y + cur->end->y) / 2; + convexityDefects.push_back(defect); + } + cvReleaseMemStorage(&storage); + } + return convexityDefects; + } + + vector convexityDefects(const ofPolyline& polyline) { + vector contour2f = toCv(polyline); + vector contour2i; + Mat(contour2f).copyTo(contour2i); + return convexityDefects(contour2i); + } + + cv::RotatedRect minAreaRect(const ofPolyline& polyline) { + return minAreaRect(Mat(toCv(polyline))); + } + + cv::RotatedRect fitEllipse(const ofPolyline& polyline) { + return fitEllipse(Mat(toCv(polyline))); + } + + void fitLine(const ofPolyline& polyline, ofVec2f& point, ofVec2f& direction) { + Vec4f line; + fitLine(Mat(toCv(polyline)), line, CV_DIST_L2, 0, .01, .01); + direction.set(line[0], line[1]); + point.set(line[2], line[3]); + } + + ofMatrix4x4 estimateAffine3D(vector& from, vector& to, float accuracy) { + if(from.size() != to.size() || from.size() == 0 || to.size() == 0) { + return ofMatrix4x4(); + } + vector outliers; + return estimateAffine3D(from, to, outliers, accuracy); + } + + ofMatrix4x4 estimateAffine3D(vector& from, vector& to, vector& outliers, float accuracy) { + Mat fromMat(1, from.size(), CV_32FC3, &from[0]); + Mat toMat(1, to.size(), CV_32FC3, &to[0]); + Mat affine; + estimateAffine3D(fromMat, toMat, affine, outliers, 3, accuracy); + ofMatrix4x4 affine4x4; + affine4x4.set(affine.ptr()); + affine4x4(3, 0) = 0; + affine4x4(3, 1) = 0; + affine4x4(3, 2) = 0; + affine4x4(3, 3) = 1; + Mat affine4x4Mat(4, 4, CV_32F, affine4x4.getPtr()); + affine4x4Mat = affine4x4Mat.t(); + affine4x4.set(affine4x4Mat.ptr()); + return affine4x4; + } +} \ No newline at end of file diff --git a/addons/ofxCv/src/ofxCv.h b/addons/ofxCv/src/ofxCv.h new file mode 100644 index 00000000000..2ab482b7717 --- /dev/null +++ b/addons/ofxCv/src/ofxCv.h @@ -0,0 +1,30 @@ +#pragma once + +// cv +#include "opencv2/opencv.hpp" + +// ofxCv + +// there are three types of functions in the ofxCv namespace +#include "ofxCv/Utilities.h" // low-level utilities like imitate and toCv +#include "ofxCv/Wrappers.h" // wrappers that accept toCv-compatible objects +#include "ofxCv/Helpers.h" // helper functions that handle more complex tasks + +/* + all functions guarantee the size of the output with imitate when possible. data + is returned using arguments when an expensive copy would be required or when + you want to use a preallocated buffer, and a return value is used when the data + is small or there is probably no preallocated buffer. + */ + +// also in the namespace are a few helper classes that make common tasks easier: +#include "ofxCv/Distance.h" // edit distance +#include "ofxCv/Calibration.h" // camera calibration +#include "ofxCv/Tracker.h" // object tracking +#include "ofxCv/ContourFinder.h" // contour finding and tracking +#include "ofxCv/RunningBackground.h" // background subtraction +#include "ofxCv/Flow.h" // optical flow, from james george +#include "ofxCv/ObjectFinder.h" // object finding (e.g., face detection) +#include "ofxCv/Kalman.h" // Kalman filter for smoothing + +// <3 kyle diff --git a/examples/addons/allAddonsExample/addons.make b/examples/addons/allAddonsExample/addons.make index 4cf9257ff3b..7a8b3ea48c9 100644 --- a/examples/addons/allAddonsExample/addons.make +++ b/examples/addons/allAddonsExample/addons.make @@ -1,6 +1,7 @@ ofxAssimpModelLoader ofxNetwork ofxOpenCv +ofxCv ofxOsc ofxXmlSettings ofxAssimpModelLoader diff --git a/license.md b/license.md new file mode 100644 index 00000000000..df72e0130ba --- /dev/null +++ b/license.md @@ -0,0 +1,9 @@ +The code in this repository is available under the [MIT License](https://secure.wikimedia.org/wikipedia/en/wiki/Mit_license). + +Copyright (c) 2012- Kyle McDonald + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/ofxaddons_thumbnail.png b/ofxaddons_thumbnail.png new file mode 100644 index 00000000000..55dea072660 Binary files /dev/null and b/ofxaddons_thumbnail.png differ diff --git a/readme.md b/readme.md new file mode 100644 index 00000000000..054905510c1 --- /dev/null +++ b/readme.md @@ -0,0 +1,172 @@ +# Introduction + +ofxCv represents an alternative approach to wrapping OpenCV for openFrameworks. + +# Installation + +Either clone out the source code using git: + + > cd openFrameworks/addons/ + > git clone https://github.com/kylemcdonald/ofxCv.git + +Or download the source from GitHub [here](https://github.com/kylemcdonald/ofxCv/archive/master.zip), unzip the folder, rename it from `ofxCv-master` to `ofxCv` and place it in your `openFrameworks/addons` folder. + +To run the examples, import them into the project generator, create a new project, and open the project file in your IDE. + +# Goals + +ofxCv has a few goals driving its development. + +### Wrap complex things in a helpful way + +Sometimes this means: providing wrapper functions that require fewer arguments than the real CV functions, providing a smart interface that handles dynamic memory allocation to make things faster for you, or providing in place and out of place alternatives. + +### Present the power of OpenCv clearly + +This means naming things in an intuitive way, and, more importantly, providing classes that have methods that transform the data represented by that class. It also means providing demos of CV functions, and generally being more useful than ofxOpenCv. + +### Interoperability of openFrameworks and OpenCv + +Making it easy to work directly with CV by providing lightweight conversion functions, and providing wrappers for CV functions that do the conversions for you. + +### Elegant internal OpenCv code + +Provide clean implementations of all functions in order to provide a stepping stone to direct OpenCV use. This means using function names and variable names that follow the OpenCV documentation, and spending the time to learn proper CV usage so I can explain it clearly to others through code. Sometimes there will be heavy templating in order to make OF interoperable with OpenCV, but this should be avoided in favor of using straight OpenCV as often as possible. + +# Usage + +Sometimes this readme will fall out of date. Please refer to the examples as the primary reference in that case. + +## Project setup + +Using ofxCv requires: + +* ofxCv/libs/ofxCv/include/ Which contains all the ofxCv headers. +* ofxCv/libs/ofxCv/src/ Which contains all the ofxCv source. +* ofxCv/src/ Which ties together all of ofxCv into a single include. +* opencv/include/ The OpenCv headers, located in addons/ofxOpenCv/ +* opencv/lib/ The precompiled static OpenCv libraries, located in addons/ofxOpenCv/ + +Your linker will also need to know where the OpenCv headers are. In XCode this means modifying one line in Project.xconfig: + + HEADER_SEARCH_PATHS = $(OF_CORE_HEADERS) "../../../addons/ofxOpenCv/libs/opencv/include/" "../../../addons/ofxCv/libs/ofxCv/include/" + +Alternatively, I recommend using [OFXCodeMenu](https://github.com/openframeworks/OFXcodeMenu) to add ofxCv to your project. + +## Including ofxCv + +Inside your ofApp.h you will need one include: + + #include "ofxCv.h" + +OpenCv uses the `cv` namespace, and ofxCv uses the `ofxCv` namespace. You can automatically import them by writing this in your `.cpp` files: + + using namespace cv; + using namespace ofxCv; + +If you look inside the ofxCv source, you'll find lots of cases of `ofxCv::` and `cv::`. In some rare cases, you'll need to write `cv::` in your code. For example, on OSX `Rect` and `Point` are defined by OpenCv, but also `MacTypes.h`. So if you're using an OpenCv `Rect` or `Point` you'll need to say so explicitly with `cv::Rect` or `cv::Point` to disambiguate. + +ofxCv takes advantage of namespaces by using overloaded function names. This means that the ofxCv wrapper for `cv::Canny()` is also called `ofxCv::Canny()`. If you write simply `Canny()`, the correct function will be chosen based on the arguments you pass. + +## Working with ofxCv + +Unlike ofxOpenCv, ofxCv encourages you to use either native openFrameworks types or native OpenCv types, rather than introducing a third type like `ofxCvImage`. To work with OF and OpenCv types in a fluid way, ofxCv includes the `toCv()` and `toOf()` functions. They provide the ability to convert openFrameworks data to OpenCv data and vice versa. For large data, like images, this is done by wrapping the data rather than copying it. For small data, like vectors, this is done by copying the data. + +The rest of ofxCv is mostly helper functions (for example, `threshold()`) and wrapper classes (for example, `Calibration`). + +### toCv() and copy() + +`toCv()` is used to convert openFrameworks data to OpenCv data. For example: + + ofImage img; + img.load("image.png"); + Mat imgMat = toCv(img); + +This creates a wrapper for `img` called `imgMat`. To create a deep copy, use `clone()`: + + Mat imgMatClone = toCv(img).clone(); + +Or `copy()`, which works with any type supported by `toCv()`: + + Mat imgCopy; + copy(img, imgCopy); + +`toCv()` is similar to ofxOpenCv's `ofxCvImage::getCvImage()` method, which returns an `IplImage*`. The biggest difference is that you can't always use `toCv()` "in place" when calling OpenCv code directly. In other words, you can always write this: + + Mat imgMat = toCv(img); + cv::someFunction(imgMat, ...); + +But you should avoid using `toCv()` like this: + + cv::someFunction(toCv(img), ...); + +Because there are cases where in place usage will cause a compile error. More specifically, calling `toCv()` in place will fail if the function requires a non-const reference for that parameter. + +### imitate() + +`imitate()` is primarily used internally by ofxCv. When doing CV, you regularly want to allocate multiple buffers of similar dimensions and channels. `imitate()` follows a kind of prototype pattern, where you pass a prototype image `original` and the image to be allocated `mirror` to `imitate(mirror, original)`. `imitate()` has two big advantages: + +* It works with `Mat`, `ofImage`, `ofPixels`, `ofVideoGrabber`, and anything else that extends `ofBaseHasPixels`. +* It will only reallocate memory if necessary. This means it can be used liberally. + +If you are writing a function that returns data, the ofxCv style is to call `imitate()` on the data to be returned from inside the function, allocating it as necessary. + +### drawMat() vs. toOf() + +Sometimes you want to draw a `Mat` to the screen directly, as quickly and easily as possible, and `drawMat()` will do this for you. `drawMat()` is not the most optimal way of drawing images to the screen, because it creates a texture every time it draws. If you want to draw things efficiently, you should allocate a texture using `ofImage img;` *once* and draw it using `img.draw()`. + +1. Either use `Mat mat = toCv(img);` to treat the `ofImage` as a `Mat`, modify the `mat`, then `img.update()` to upload the modified pixels to the GPU. +2. Alternatively; call `toOf(mat, img)` each time after modifying the `Mat`. This will only reallocate the texture if necessary, e.g. when the size has changed. + + +# Working with OpenCv 2 + +OpenCv 2 is an incredibly well designed API, and ofxCv encourages you to use it directly. Here are some hints on using OpenCv. + +### OpenCv Types + +OpenCv 2 uses the `Mat` class in place of the old `IplImage`. Memory allocation, copying, and deallocation are all handled automatically. `operator=` is a shallow, reference-counted copy. A `Mat` contains a collection of `Scalar` objects. A `Scalar` contains a collection of basic types (unsigned char, bool, double, etc.). `Scalar` is a short vector for representing color or other multidimensional information. The hierarchy is: `Mat` contains `Scalar`, `Scalar` contains basic types. + +Different functions accept `Mat` in different ways: + +* `Mat` will create a lightweight copy of the underlying data. It's easy to write, and it allows you to use `toCv()` "in-place" when passing arguments to the function. +* `Mat&` allows the function to modify the header passed in. This means the function can allocate if necessary. +* `const Mat&` means that the function isn't going to modify the underlying data. This should be used instead of `Mat` when possible. It also allows "in-place" `toCv()` usage. + +### Mat creation + +If you're working with `Mat` directly, it's important to remember that OpenCv talks about `rows` and `cols` rather than `width` and `height`. This means that the arguments are "backwards" when they appear in the `Mat` constructor. Here's an example of creating a `Mat` wrapper for some grayscale `unsigned char* pixels` for which we know the `width` and `height`: + + Mat mat = Mat(height, width, CV_8UC1, pixels, 0); + +### Mat operations + +Basic mathematical operations on `Mat` objects of the same size and type can be accomplished with matrix expressions. Matrix expressions are a collection of overloaded operators that accept `Mat`, `Scalar`, and basic types. A normal mathematical operation might look like: + + float x, a, b; + ... + x = (a + b) * 10; + +A matrix operation looks similar: + + Mat x, a, b; + ... + x = (a + b) * 10; + +This will add every element of `a` and `b`, then multiply the results by 10, and finally assign the result to `x`. + +Available matrix expressions include mathematical operators `+`, `-`, `/` (per element division), `*` (matrix multiplication), `.mul()` (per-element multiplication). As well as comparison operators `!=`, `==`, `<`, `>`, `>=`, `<=` (useful for thresholding). Binary operators `&`, `|`, `^`, `~`. And a few others like `abs()`, `min()`, and `max()`. For the complete listing see the OpenCv documention or `mat.hpp`. + +# Code Style + +ofxCv tries to have a consistent code style. It's most similar to the K&R variant used for Java, and the indentation is primarily determined by XCode's auto-indent feature. + +Multiline comments are used for anything beyond two lines. + +Case statements have a `default:` fall-through with the last case. + +When two or three similar variables are initialized, commas are used instead of multiple lines. For example `Mat srcMat = toCv(src), dstMat = toCv(dst);`. This style was inherited from reading Jason Saragih's FaceTracker. + +- - -- + +*ofxCv was developed with support from [Yamaguchi Center for Arts and Media](http://ycam.jp/).* diff --git a/update-projects.py b/update-projects.py new file mode 100644 index 00000000000..73ac243990f --- /dev/null +++ b/update-projects.py @@ -0,0 +1,75 @@ +#!/bin/python + +import glob, re, shutil, fileinput, os + +def toUpper(pattern): + return pattern.group(1).upper() + +def replaceInFile(filename, pattern, replacement): + for line in fileinput.FileInput(filename, inplace=1): + print re.sub(pattern, replacement, line), + +sourceProjectName = "EmptyExample" + +# windows code::blocks +sourceCbp = "example-empty/EmptyExample.cbp" +sourceWorkspace = "example-empty/EmptyExample.workspace" + +# windows vs2010 +sourceSln = "example-empty/EmptyExample.sln" +sourceVcxproj = "example-empty/EmptyExample.vcxproj" +sourceVcxprojFilters = "example-empty/EmptyExample.vcxproj.filters" +sourceVcxprojUser = "example-empty/EmptyExample.vcxproj.user" + +# xcode osx +sourceXcconfig = "example-empty/Project.xcconfig" +sourcePlist = "example-empty/openFrameworks-Info.plist" +sourceXcodeproj = "example-empty/ofApp.xcodeproj/" + +# linux +sourceMakefile = "example-empty/Makefile" +sourceConfigs = "example-empty/config.make" +sourceAddons = "example-empty/addons.make" + +examples = glob.glob("example*") +for example in examples: + if example != "example-empty": + sansExample = re.sub("^example", "", example) + upper = re.sub("-([a-z])", toUpper, sansExample) + targetProjectName = upper + "Example" + + #linux + targetDir = "{0}/".format(example, targetProjectName) + emptyDir = "example-empty" + shutil.copy(sourceMakefile, targetDir) + shutil.copy(sourceConfigs, targetDir) + shutil.copy(sourceAddons, targetDir) + + # windows code::blocks + targetCbp = "{0}/{1}.cbp".format(example, targetProjectName) + targetWorkspace = "{0}/{1}.workspace".format(example, targetProjectName) + shutil.copy(sourceCbp, targetCbp) + shutil.copy(sourceWorkspace, targetWorkspace) + replaceInFile(targetCbp, sourceProjectName, targetProjectName) + replaceInFile(targetWorkspace, sourceProjectName, targetProjectName) + + # windows vs2010 + targetSln = "{0}/{1}.sln".format(example, targetProjectName) + targetVcxproj = "{0}/{1}.vcxproj".format(example, targetProjectName) + targetVcxprojFilters = "{0}/{1}.vcxproj.filters".format(example, targetProjectName) + targetVcxprojUser = "{0}/{1}.vcxproj.user".format(example, targetProjectName) + shutil.copy(sourceSln, targetSln) + shutil.copy(sourceVcxproj, targetVcxproj) + shutil.copy(sourceVcxprojFilters, targetVcxprojFilters) + shutil.copy(sourceVcxprojUser, targetVcxprojUser) + replaceInFile(targetSln, sourceProjectName, targetProjectName) + replaceInFile(targetVcxproj, sourceProjectName, targetProjectName) + + # xcode osx + targetXcodeproj = "{0}/ofApp.xcodeproj".format(example) + shutil.copy(sourceXcconfig, example) + shutil.copy(sourcePlist, example) + try: shutil.rmtree(targetXcodeproj) + except: pass + shutil.copytree(sourceXcodeproj, targetXcodeproj) + print "Copied into " + targetProjectName + "."