-
Notifications
You must be signed in to change notification settings - Fork 465
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
async thread that emittes data #462
Comments
Hi @p3x-robot, |
If I just hack it out and copy somehow into the Or anyone knows how to do it anyhow (hopefully with |
Hi @p3x-robot, |
Hi @NickNaso ! You are very kind. |
@NickNaso I solved it with a mutex. The worker is updating a vector of rectangles (detected faces) using a mutex and there is a function that also uses the same mutex lock and just returns to nodejs the list of detected rectangles. |
@p3x-robot Could you post your code or an example that emulate your code? |
@NickNaso There is an issue how I can skip with a stream, so I have a hack, but the rest looks like works OK. #include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
#include <mutex> // For std::unique_lock
#include <shared_mutex>
// https://stackoverflow.com/questions/55313194/do-i-have-to-lock-a-vectors-that-i-just-enumerate-or-i-only-do-it-when-i-change?noredirect=1#comment97357425_55313194
using namespace std;
using namespace cv;
std::shared_mutex _facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url, int maxDelayer)
: Napi::AsyncWorker(callback), url(url), maxDelayer(maxDelayer) {
}
~FaceDetectWorker() {
}
vector<Rect> detectFaces(Mat &img, CascadeClassifier &cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
//resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
equalizeHist( gray, smallImg );
//t = (double)getTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
//t = (double)getTickCount() - t;
//printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
running = true;
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
int delayer = 0;
while(running) {
delayer++;
//capture.read(frame);
capture >> frame;
if( frame.empty()) {
continue;
}
if (delayer >= maxDelayer) {
delayer = 0;
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectFaces(frame1, cascade);
std::unique_lock lock(_facesMutex);
faces = facesResult;
lock.unlock();
}
//waitKey(250);
//std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}
} catch (std::exception &e) {
Napi::AsyncWorker::SetError(e.what());
}
} else {
Napi::AsyncWorker::SetError("ERROR: Could not open video camera " + url);
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
int maxDelayer = 3;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>();
Napi::Number maxDelayer = info[1].As<Napi::Number>();
Napi::Function callback = info[2].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url, maxDelayer);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
std::shared_lock lock(_facesMutex);
vector<Rect> faces2 = faces;
lock.unlock();
for(int i = 0; i < faces2.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
return faceArray;
}
void FaceDetectCancel(const Napi::CallbackInfo &info) {
running = false;
} The NodeJs API: #include <napi.h>
#include "pi-worker.h"
#include "facedetect.h"
#include "arrayfire/pi-worker.h"
#include "arrayfire/test.h"
#include "genetic.h"
using namespace Genetic;
Napi::Object Init(Napi::Env env, Napi::Object exports) {
srand((unsigned) (time(0)));
exports.Set(Napi::String::New(env, "piWorker"), Napi::Function::New(env, CalculatePiAsync));
exports.Set(Napi::String::New(env, "arrayfirePiWorker"), Napi::Function::New(env, ArrayFireCalculatePiAsync));
exports.Set(Napi::String::New(env, "arrayfireTest"), Napi::Function::New(env, ArrayFireTestAsync));
exports.Set(Napi::String::New(env, "geneticGetGenes"), Napi::Function::New(env, Genetic::GetGenes));
exports.Set(Napi::String::New(env, "geneticGenerate"), Napi::Function::New(env, Genetic::Generate));
exports.Set(Napi::String::New(env, "facedetectVideo"), Napi::Function::New(env, FaceDetect));
exports.Set(Napi::String::New(env, "facedetectGet"), Napi::Function::New(env, FaceDetectGet));
exports.Set(Napi::String::New(env, "facedetectCancel"), Napi::Function::New(env, FaceDetectCancel));
return exports;
}
NODE_API_MODULE(NODE_GYP_MODULE_NAME, Init) |
Hello!
How are you?
I am trying to use a video stream and then detect faces using OpenCV.
My problem is, that there is the
AsyncWorker
, but it is just 1 execution, not a stream that is continuous.Now, for the face detection is kind of like a stream, that is emitting to NodeJS the data it sends to NodeJs the faces data, so, keep the thread running and every like about 30 FPS and emitting.
Is the
node-addon-api
is possible to do it, or I have to go down to the the low leveln-api
and write the whole program?The text was updated successfully, but these errors were encountered: