mirror of
https://gitlab.com/Shinobi-Systems/ShinobiCE.git
synced 2025-03-09 15:40:15 +00:00
Coy Cobra
This commit is contained in:
parent
157bf6feb0
commit
c8b67c57b4
45 changed files with 3076 additions and 144 deletions
2
plugins/tensorflow/.gitignore
vendored
Normal file
2
plugins/tensorflow/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
conf.json
|
||||
cascades
|
20
plugins/tensorflow/INSTALL.sh
Normal file
20
plugins/tensorflow/INSTALL.sh
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
mkdir data
|
||||
mkdir data/inception
|
||||
chmod -R 777 data
|
||||
wget https://cdn.shinobi.video/weights/inception5h.zip -O inception5h.zip
|
||||
unzip inception5h.zip -d data/inception
|
||||
if [ $(dpkg-query -W -f='${Status}' opencv_version 2>/dev/null | grep -c "ok installed") -eq 0 ]; then
|
||||
echo "Shinobi - Do ypu want to let the `opencv4nodejs` npm package install OpenCV? "
|
||||
echo "Only do this if you do not have OpenCV already or will not use a GPU (Hardware Acceleration)."
|
||||
echo "(y)es or (N)o"
|
||||
read nodejsinstall
|
||||
if [ "$nodejsinstall" = "y" ] || [ "$nodejsinstall" = "Y" ]; then
|
||||
export OPENCV4NODEJS_DISABLE_AUTOBUILD=0
|
||||
else
|
||||
export OPENCV4NODEJS_DISABLE_AUTOBUILD=1
|
||||
fi
|
||||
else
|
||||
export OPENCV4NODEJS_DISABLE_AUTOBUILD=1
|
||||
fi
|
||||
npm install opencv4nodejs moment express canvas@1.6 --unsafe-perm
|
9
plugins/tensorflow/conf.sample.json
Normal file
9
plugins/tensorflow/conf.sample.json
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"plug":"Tensorflow",
|
||||
"host":"localhost",
|
||||
"port":8080,
|
||||
"hostPort":8082,
|
||||
"key":"change_this_to_something_very_random____make_sure_to_match__/plugins/opencv/conf.json",
|
||||
"mode":"client",
|
||||
"type":"detector"
|
||||
}
|
94
plugins/tensorflow/openalpr.conf
Normal file
94
plugins/tensorflow/openalpr.conf
Normal file
|
@ -0,0 +1,94 @@
|
|||
|
||||
; Specify the path to the runtime data directory
|
||||
runtime_dir = ${CMAKE_INSTALL_PREFIX}/share/openalpr/runtime_data
|
||||
|
||||
|
||||
ocr_img_size_percent = 1.33333333
|
||||
state_id_img_size_percent = 2.0
|
||||
|
||||
; Calibrating your camera improves detection accuracy in cases where vehicle plates are captured at a steep angle
|
||||
; Use the openalpr-utils-calibrate utility to calibrate your fixed camera to adjust for an angle
|
||||
; Once done, update the prewarp config with the values obtained from the tool
|
||||
prewarp =
|
||||
|
||||
; detection will ignore plates that are too large. This is a good efficiency technique to use if the
|
||||
; plates are going to be a fixed distance away from the camera (e.g., you will never see plates that fill
|
||||
; up the entire image
|
||||
max_plate_width_percent = 100
|
||||
max_plate_height_percent = 100
|
||||
|
||||
; detection_iteration_increase is the percentage that the LBP frame increases each iteration.
|
||||
; It must be greater than 1.0. A value of 1.01 means increase by 1%, 1.10 increases it by 10% each time.
|
||||
; So a 1% increase would be ~10x slower than 10% to process, but it has a higher chance of landing
|
||||
; directly on the plate and getting a strong detection
|
||||
detection_iteration_increase = 1.1
|
||||
|
||||
; The minimum detection strength determines how sure the detection algorithm must be before signaling that
|
||||
; a plate region exists. Technically this corresponds to LBP nearest neighbors (e.g., how many detections
|
||||
; are clustered around the same area). For example, 2 = very lenient, 9 = very strict.
|
||||
detection_strictness = 3
|
||||
|
||||
; The detection doesn't necessarily need an extremely high resolution image in order to detect plates
|
||||
; Using a smaller input image should still find the plates and will do it faster
|
||||
; Tweaking the max_detection_input values will resize the input image if it is larger than these sizes
|
||||
; max_detection_input_width/height are specified in pixels
|
||||
max_detection_input_width = 1280
|
||||
max_detection_input_height = 720
|
||||
|
||||
; detector is the technique used to find license plate regions in an image. Value can be set to
|
||||
; lbpcpu - default LBP-based detector uses the system CPU
|
||||
; lbpgpu - LBP-based detector that uses Nvidia GPU to increase recognition speed.
|
||||
; lbpopencl - LBP-based detector that uses OpenCL GPU to increase recognition speed. Requires OpenCV 3.0
|
||||
; morphcpu - Experimental detector that detects white rectangles in an image. Does not require training.
|
||||
detector = lbpgpu
|
||||
|
||||
; If set to true, all results must match a postprocess text pattern if a pattern is available.
|
||||
; If not, the result is disqualified.
|
||||
must_match_pattern = 0
|
||||
|
||||
; Bypasses plate detection. If this is set to 1, the library assumes that each region provided is a likely plate area.
|
||||
skip_detection = 0
|
||||
|
||||
; Specifies the full path to an image file that constrains the detection area. Only the plate regions allowed through the mask
|
||||
; will be analyzed. The mask image must match the resolution of your image to be analyzed. The mask is black and white.
|
||||
; Black areas will be ignored, white areas will be searched. An empty value means no mask (scan the entire image)
|
||||
detection_mask_image =
|
||||
|
||||
; OpenALPR can scan the same image multiple times with different randomization. Setting this to a value larger than
|
||||
; 1 may increase accuracy, but will increase processing time linearly (e.g., analysis_count = 3 is 3x slower)
|
||||
analysis_count = 1
|
||||
|
||||
; OpenALPR detects high-contrast plate crops and uses an alternative edge detection technique. Setting this to 0.0
|
||||
; would classify ALL images as high-contrast, setting it to 1.0 would classify no images as high-contrast.
|
||||
contrast_detection_threshold = 0.3
|
||||
|
||||
max_plate_angle_degrees = 15
|
||||
|
||||
ocr_min_font_point = 6
|
||||
|
||||
; Minimum OCR confidence percent to consider.
|
||||
postprocess_min_confidence = 65
|
||||
|
||||
; Any OCR character lower than this will also add an equally likely
|
||||
; chance that the character is incorrect and will be skipped. Value is a confidence percent
|
||||
postprocess_confidence_skip_level = 80
|
||||
|
||||
|
||||
debug_general = 0
|
||||
debug_timing = 0
|
||||
debug_detector = 0
|
||||
debug_prewarp = 0
|
||||
debug_state_id = 0
|
||||
debug_plate_lines = 0
|
||||
debug_plate_corners = 0
|
||||
debug_char_segment = 0
|
||||
debug_char_analysis = 0
|
||||
debug_color_filter = 0
|
||||
debug_ocr = 0
|
||||
debug_postprocess = 0
|
||||
debug_show_images = 0
|
||||
debug_pause_on_frame = 0
|
||||
|
||||
|
||||
|
||||
|
502
plugins/tensorflow/shinobi-tensorflow.js
Normal file
502
plugins/tensorflow/shinobi-tensorflow.js
Normal file
|
@ -0,0 +1,502 @@
|
|||
//
|
||||
// Shinobi - OpenCV Plugin
|
||||
// Copyright (C) 2016-2025 Moe Alam, moeiscool
|
||||
//
|
||||
// # Donate
|
||||
//
|
||||
// If you like what I am doing here and want me to continue please consider donating :)
|
||||
// PayPal : paypal@m03.ca
|
||||
//
|
||||
process.on('uncaughtException', function (err) {
|
||||
console.error('uncaughtException',err);
|
||||
});
|
||||
var fs=require('fs');
|
||||
var cv=require('opencv4nodejs');
|
||||
var exec = require('child_process').exec;
|
||||
var moment = require('moment');
|
||||
var Canvas = require('canvas');
|
||||
var express = require('express');
|
||||
const path = require('path');
|
||||
var http = require('http'),
|
||||
app = express(),
|
||||
server = http.createServer(app);
|
||||
var config=require('./conf.json');
|
||||
if(!config.port){config.port=8080}
|
||||
if(!config.hostPort){config.hostPort=8082}
|
||||
if(config.systemLog===undefined){config.systemLog=true}
|
||||
if(config.cascadesDir===undefined){config.cascadesDir=__dirname+'/cascades/'}
|
||||
if(config.alprConfig===undefined){config.alprConfig=__dirname+'/openalpr.conf'}
|
||||
s={
|
||||
group:{},
|
||||
dir:{
|
||||
cascades : config.cascadesDir
|
||||
},
|
||||
isWin:(process.platform==='win32'),
|
||||
foundCascades : {
|
||||
|
||||
}
|
||||
}
|
||||
//default stream folder check
|
||||
if(!config.streamDir){
|
||||
if(s.isWin===false){
|
||||
config.streamDir='/dev/shm'
|
||||
}else{
|
||||
config.streamDir=config.windowsTempDir
|
||||
}
|
||||
if(!fs.existsSync(config.streamDir)){
|
||||
config.streamDir=__dirname+'/streams/'
|
||||
}else{
|
||||
config.streamDir+='/streams/'
|
||||
}
|
||||
}
|
||||
s.dir.streams=config.streamDir;
|
||||
//streams dir
|
||||
if(!fs.existsSync(s.dir.streams)){
|
||||
fs.mkdirSync(s.dir.streams);
|
||||
}
|
||||
//streams dir
|
||||
if(!fs.existsSync(s.dir.cascades)){
|
||||
fs.mkdirSync(s.dir.cascades);
|
||||
}
|
||||
s.gid=function(x){
|
||||
if(!x){x=10};var t = "";var p = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
||||
for( var i=0; i < x; i++ )
|
||||
t += p.charAt(Math.floor(Math.random() * p.length));
|
||||
return t;
|
||||
};
|
||||
s.findCascades=function(callback){
|
||||
var tmp={};
|
||||
tmp.foundCascades=[];
|
||||
fs.readdir(s.dir.cascades,function(err,files){
|
||||
files.forEach(function(cascade,n){
|
||||
if(cascade.indexOf('.xml')>-1){
|
||||
tmp.foundCascades.push(cascade.replace('.xml',''))
|
||||
}
|
||||
})
|
||||
s.cascadesInDir=tmp.foundCascades;
|
||||
callback(tmp.foundCascades)
|
||||
})
|
||||
}
|
||||
s.findCascades(function(){
|
||||
//get cascades
|
||||
})
|
||||
s.detectLicensePlate=function(buffer,d,tx){
|
||||
if(!d.mon.detector_lisence_plate_country||d.mon.detector_lisence_plate_country===''){
|
||||
d.mon.detector_lisence_plate_country='us'
|
||||
}
|
||||
d.tmpFile=s.gid(5)+'.jpg'
|
||||
if(!fs.existsSync(s.dir.streams)){
|
||||
fs.mkdirSync(s.dir.streams);
|
||||
}
|
||||
d.dir=s.dir.streams+d.ke+'/'
|
||||
if(!fs.existsSync(d.dir)){
|
||||
fs.mkdirSync(d.dir);
|
||||
}
|
||||
d.dir=s.dir.streams+d.ke+'/'+d.id+'/'
|
||||
if(!fs.existsSync(d.dir)){
|
||||
fs.mkdirSync(d.dir);
|
||||
}
|
||||
fs.writeFile(d.dir+d.tmpFile,buffer,function(err){
|
||||
if(err) return s.systemLog(err);
|
||||
exec('alpr -j --config '+config.alprConfig+' -c '+d.mon.detector_lisence_plate_country+' '+d.dir+d.tmpFile,{encoding:'utf8'},(err, scan, stderr) => {
|
||||
if(err){
|
||||
s.systemLog(err);
|
||||
}else{
|
||||
try{
|
||||
scan=JSON.parse(scan.replace('--(!)Loaded CUDA classifier','').trim())
|
||||
}catch(err){
|
||||
if(!scan||!scan.results){
|
||||
return s.systemLog(scan,err);
|
||||
}
|
||||
}
|
||||
if(scan.results.length>0){
|
||||
scan.plates=[]
|
||||
scan.mats=[]
|
||||
scan.results.forEach(function(v){
|
||||
v.candidates.forEach(function(g,n){
|
||||
if(v.candidates[n].matches_template)
|
||||
delete(v.candidates[n].matches_template)
|
||||
})
|
||||
scan.plates.push({coordinates:v.coordinates,candidates:v.candidates,confidence:v.confidence,plate:v.plate})
|
||||
var width = Math.sqrt( Math.pow(v.coordinates[1].x - v.coordinates[0].x, 2) + Math.pow(v.coordinates[1].y - v.coordinates[0].y, 2));
|
||||
var height = Math.sqrt( Math.pow(v.coordinates[2].x - v.coordinates[1].x, 2) + Math.pow(v.coordinates[2].y - v.coordinates[1].y, 2))
|
||||
scan.mats.push({
|
||||
x:v.coordinates[0].x,
|
||||
y:v.coordinates[0].y,
|
||||
width:width,
|
||||
height:height,
|
||||
tag:v.plate
|
||||
})
|
||||
})
|
||||
tx({f:'trigger',id:d.id,ke:d.ke,details:{split:true,plug:config.plug,name:'licensePlate',reason:'object',matrices:scan.mats,imgHeight:d.mon.detector_scale_y,imgWidth:d.mon.detector_scale_x,frame:d.base64}})
|
||||
}
|
||||
}
|
||||
exec('rm -rf '+d.dir+d.tmpFile,{encoding:'utf8'})
|
||||
})
|
||||
})
|
||||
}
|
||||
s.detectObject=function(buffer,d,tx){
|
||||
//detect license plate?
|
||||
if(d.mon.detector_lisence_plate==="1"){
|
||||
s.detectLicensePlate(buffer,d,tx)
|
||||
}
|
||||
cv.imdecodeAsync(buffer,(err,im) => {
|
||||
if(err){
|
||||
console.log(err)
|
||||
return
|
||||
}
|
||||
|
||||
if (!cv.xmodules.dnn) {
|
||||
throw new Error('exiting: opencv4nodejs compiled without dnn module');
|
||||
}
|
||||
|
||||
// replace with path where you unzipped inception model
|
||||
const inceptionModelPath = __dirname+'/data/inception';
|
||||
|
||||
|
||||
const modelFile = path.resolve(inceptionModelPath, 'tensorflow_inception_graph.pb');
|
||||
const classNamesFile = path.resolve(inceptionModelPath, 'imagenet_comp_graph_label_strings.txt');
|
||||
if (!fs.existsSync(modelFile) || !fs.existsSync(classNamesFile)) {
|
||||
console.log('could not find inception model');
|
||||
console.log('download the model from: https://cdn.shinobi.video/weights/inception5h.zip');
|
||||
throw new Error('exiting');
|
||||
}
|
||||
|
||||
// read classNames and store them in an array
|
||||
const classNames = fs.readFileSync(classNamesFile).toString().split('\n');
|
||||
|
||||
// initialize tensorflow inception model from modelFile
|
||||
const net = cv.readNetFromTensorflow(modelFile);
|
||||
|
||||
// inception model works with 224 x 224 images, so we resize
|
||||
// our input images and pad the image with white pixels to
|
||||
// make the images have the same width and height
|
||||
const maxImgDim = 224;
|
||||
const white = new cv.Vec(255, 255, 255);
|
||||
const imgResized = im.resizeToMax(maxImgDim).padToSquare(white);
|
||||
|
||||
// network accepts blobs as input
|
||||
const inputBlob = cv.blobFromImage(imgResized);
|
||||
net.setInput(inputBlob);
|
||||
|
||||
// forward pass input through entire network, will return
|
||||
// classification result as 1xN Mat with confidences of each class
|
||||
const outputBlob = net.forward();
|
||||
|
||||
// find all labels with a minimum confidence
|
||||
const minConfidence = 0.05;
|
||||
const locations =
|
||||
outputBlob
|
||||
.threshold(minConfidence, 1, cv.THRESH_BINARY)
|
||||
.convertTo(cv.CV_8U)
|
||||
.findNonZero();
|
||||
// locations.forEach(function(v){
|
||||
// console.log(v)
|
||||
// })
|
||||
const result =
|
||||
locations.map(pt => ({
|
||||
confidence: parseInt(outputBlob.at(0, pt.x) * 100) / 100,
|
||||
className: classNames[pt.x]
|
||||
}))
|
||||
// sort result by confidence
|
||||
.sort((r0, r1) => r1.confidence - r0.confidence)
|
||||
.map(res => `${res.className} (${res.confidence})`);
|
||||
console.log(result)
|
||||
if(result.length > 0) {
|
||||
s.cx({
|
||||
f:'trigger',
|
||||
id:d.id,
|
||||
ke:d.ke,
|
||||
name:'tensorflow',
|
||||
details:{
|
||||
plug:'tensorflow',
|
||||
name:'tensorflow',
|
||||
reason:'object',
|
||||
matrices : result
|
||||
// confidence:d.average
|
||||
},
|
||||
imgHeight:d.mon.detector_scale_y,
|
||||
imgWidth:d.mon.detector_scale_x
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
s.systemLog=function(q,w,e){
|
||||
if(!w){w=''}
|
||||
if(!e){e=''}
|
||||
if(config.systemLog===true){
|
||||
return console.log(moment().format(),q,w,e)
|
||||
}
|
||||
}
|
||||
|
||||
s.blenderRegion=function(d,cord,tx){
|
||||
d.width = d.image.width;
|
||||
d.height = d.image.height;
|
||||
if(!s.group[d.ke][d.id].canvas[cord.name]){
|
||||
if(!cord.sensitivity||isNaN(cord.sensitivity)){
|
||||
cord.sensitivity=d.mon.detector_sensitivity;
|
||||
}
|
||||
s.group[d.ke][d.id].canvas[cord.name] = new Canvas(d.width,d.height);
|
||||
s.group[d.ke][d.id].canvasContext[cord.name] = s.group[d.ke][d.id].canvas[cord.name].getContext('2d');
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].fillStyle = '#000';
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].fillRect( 0, 0,d.width,d.height);
|
||||
if(cord.points&&cord.points.length>0){
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].beginPath();
|
||||
for (var b = 0; b < cord.points.length; b++){
|
||||
cord.points[b][0]=parseFloat(cord.points[b][0]);
|
||||
cord.points[b][1]=parseFloat(cord.points[b][1]);
|
||||
if(b===0){
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].moveTo(cord.points[b][0],cord.points[b][1]);
|
||||
}else{
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].lineTo(cord.points[b][0],cord.points[b][1]);
|
||||
}
|
||||
}
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].clip();
|
||||
}
|
||||
}
|
||||
if(!s.group[d.ke][d.id].canvasContext[cord.name]){
|
||||
return
|
||||
}
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].drawImage(d.image, 0, 0, d.width, d.height);
|
||||
if(!s.group[d.ke][d.id].blendRegion[cord.name]){
|
||||
s.group[d.ke][d.id].blendRegion[cord.name] = new Canvas(d.width, d.height);
|
||||
s.group[d.ke][d.id].blendRegionContext[cord.name] = s.group[d.ke][d.id].blendRegion[cord.name].getContext('2d');
|
||||
}
|
||||
var sourceData = s.group[d.ke][d.id].canvasContext[cord.name].getImageData(0, 0, d.width, d.height);
|
||||
// create an image if the previous image doesn<73>t exist
|
||||
if (!s.group[d.ke][d.id].lastRegionImageData[cord.name]) s.group[d.ke][d.id].lastRegionImageData[cord.name] = s.group[d.ke][d.id].canvasContext[cord.name].getImageData(0, 0, d.width, d.height);
|
||||
// create a ImageData instance to receive the blended result
|
||||
var blendedData = s.group[d.ke][d.id].canvasContext[cord.name].createImageData(d.width, d.height);
|
||||
// blend the 2 images
|
||||
s.differenceAccuracy(blendedData.data,sourceData.data,s.group[d.ke][d.id].lastRegionImageData[cord.name].data);
|
||||
// draw the result in a canvas
|
||||
s.group[d.ke][d.id].blendRegionContext[cord.name].putImageData(blendedData, 0, 0);
|
||||
// store the current webcam image
|
||||
s.group[d.ke][d.id].lastRegionImageData[cord.name] = sourceData;
|
||||
blendedData = s.group[d.ke][d.id].blendRegionContext[cord.name].getImageData(0, 0, d.width, d.height);
|
||||
var i = 0;
|
||||
d.average = 0;
|
||||
while (i < (blendedData.data.length * 0.25)) {
|
||||
d.average += (blendedData.data[i * 4] + blendedData.data[i * 4 + 1] + blendedData.data[i * 4 + 2]);
|
||||
++i;
|
||||
}
|
||||
d.average = (d.average / (blendedData.data.length * 0.25))*10;
|
||||
if (d.average > parseFloat(cord.sensitivity)){
|
||||
if(d.mon.detector_use_detect_object==="1"&&d.mon.detector_second!=='1'){
|
||||
var buffer=s.group[d.ke][d.id].canvas[cord.name].toBuffer();
|
||||
s.detectObject(buffer,d,tx)
|
||||
}else{
|
||||
tx({f:'trigger',id:d.id,ke:d.ke,details:{split:true,plug:config.plug,name:cord.name,reason:'motion',confidence:d.average,frame:d.base64}})
|
||||
}
|
||||
}
|
||||
s.group[d.ke][d.id].canvasContext[cord.name].clearRect(0, 0, d.width, d.height);
|
||||
s.group[d.ke][d.id].blendRegionContext[cord.name].clearRect(0, 0, d.width, d.height);
|
||||
}
|
||||
function blobToBuffer (blob, cb) {
|
||||
if (typeof Blob === 'undefined' || !(blob instanceof Blob)) {
|
||||
throw new Error('first argument must be a Blob')
|
||||
}
|
||||
if (typeof cb !== 'function') {
|
||||
throw new Error('second argument must be a function')
|
||||
}
|
||||
|
||||
var reader = new FileReader()
|
||||
|
||||
function onLoadEnd (e) {
|
||||
reader.removeEventListener('loadend', onLoadEnd, false)
|
||||
if (e.error) cb(e.error)
|
||||
else cb(null, Buffer.from(reader.result))
|
||||
}
|
||||
|
||||
reader.addEventListener('loadend', onLoadEnd, false)
|
||||
reader.readAsArrayBuffer(blob)
|
||||
}
|
||||
function fastAbs(value) {
|
||||
return (value ^ (value >> 31)) - (value >> 31);
|
||||
}
|
||||
|
||||
function threshold(value) {
|
||||
return (value > 0x15) ? 0xFF : 0;
|
||||
}
|
||||
s.differenceAccuracy=function(target, data1, data2) {
|
||||
if (data1.length != data2.length) return null;
|
||||
var i = 0;
|
||||
while (i < (data1.length * 0.25)) {
|
||||
var average1 = (data1[4 * i] + data1[4 * i + 1] + data1[4 * i + 2]) / 3;
|
||||
var average2 = (data2[4 * i] + data2[4 * i + 1] + data2[4 * i + 2]) / 3;
|
||||
var diff = threshold(fastAbs(average1 - average2));
|
||||
target[4 * i] = diff;
|
||||
target[4 * i + 1] = diff;
|
||||
target[4 * i + 2] = diff;
|
||||
target[4 * i + 3] = 0xFF;
|
||||
++i;
|
||||
}
|
||||
}
|
||||
s.checkAreas=function(d,tx){
|
||||
if(!s.group[d.ke][d.id].cords){
|
||||
if(!d.mon.cords){d.mon.cords={}}
|
||||
s.group[d.ke][d.id].cords=Object.values(d.mon.cords);
|
||||
}
|
||||
if(d.mon.detector_frame==='1'){
|
||||
d.mon.cords.frame={name:'FULL_FRAME',s:d.mon.detector_sensitivity,points:[[0,0],[0,d.image.height],[d.image.width,d.image.height],[d.image.width,0]]};
|
||||
s.group[d.ke][d.id].cords.push(d.mon.cords.frame);
|
||||
}
|
||||
for (var b = 0; b < s.group[d.ke][d.id].cords.length; b++){
|
||||
if(!s.group[d.ke][d.id].cords[b]){return}
|
||||
s.blenderRegion(d,s.group[d.ke][d.id].cords[b],tx)
|
||||
}
|
||||
delete(d.image)
|
||||
}
|
||||
|
||||
s.MainEventController=function(d,cn,tx){
|
||||
switch(d.f){
|
||||
case'refreshPlugins':
|
||||
s.findCascades(function(cascades){
|
||||
s.cx({f:'s.tx',data:{f:'detector_cascade_list',cascades:cascades},to:'GRP_'+d.ke})
|
||||
})
|
||||
break;
|
||||
case'readPlugins':
|
||||
s.cx({f:'s.tx',data:{f:'detector_cascade_list',cascades:s.cascadesInDir},to:'GRP_'+d.ke})
|
||||
break;
|
||||
case'init_plugin_as_host':
|
||||
if(!cn){
|
||||
console.log('No CN',d)
|
||||
return
|
||||
}
|
||||
if(d.key!==config.key){
|
||||
console.log(new Date(),'Plugin Key Mismatch',cn.request.connection.remoteAddress,d)
|
||||
cn.emit('init',{ok:false})
|
||||
cn.disconnect()
|
||||
}else{
|
||||
console.log(new Date(),'Plugin Connected to Client',cn.request.connection.remoteAddress)
|
||||
cn.emit('init',{ok:true,plug:config.plug,notice:config.notice,type:config.type})
|
||||
}
|
||||
break;
|
||||
case'init_monitor':
|
||||
if(s.group[d.ke]&&s.group[d.ke][d.id]){
|
||||
s.group[d.ke][d.id].canvas={}
|
||||
s.group[d.ke][d.id].canvasContext={}
|
||||
s.group[d.ke][d.id].blendRegion={}
|
||||
s.group[d.ke][d.id].blendRegionContext={}
|
||||
s.group[d.ke][d.id].lastRegionImageData={}
|
||||
s.group[d.ke][d.id].numberOfTriggers=0
|
||||
delete(s.group[d.ke][d.id].cords)
|
||||
delete(s.group[d.ke][d.id].buffer)
|
||||
}
|
||||
break;
|
||||
case'init_aws_push':
|
||||
// console.log('init_aws')
|
||||
s.group[d.ke][d.id].aws={links:[],complete:0,total:d.total,videos:[],tx:tx}
|
||||
break;
|
||||
case'frame':
|
||||
try{
|
||||
if(!s.group[d.ke]){
|
||||
s.group[d.ke]={}
|
||||
}
|
||||
if(!s.group[d.ke][d.id]){
|
||||
s.group[d.ke][d.id]={
|
||||
canvas:{},
|
||||
canvasContext:{},
|
||||
lastRegionImageData:{},
|
||||
blendRegion:{},
|
||||
blendRegionContext:{},
|
||||
}
|
||||
}
|
||||
if(!s.group[d.ke][d.id].buffer){
|
||||
s.group[d.ke][d.id].buffer=[d.frame];
|
||||
}else{
|
||||
s.group[d.ke][d.id].buffer.push(d.frame)
|
||||
}
|
||||
if(d.frame[d.frame.length-2] === 0xFF && d.frame[d.frame.length-1] === 0xD9){
|
||||
s.group[d.ke][d.id].buffer=Buffer.concat(s.group[d.ke][d.id].buffer);
|
||||
try{
|
||||
d.mon.detector_cascades=JSON.parse(d.mon.detector_cascades)
|
||||
}catch(err){
|
||||
|
||||
}
|
||||
if(d.mon.detector_frame_save==="1"){
|
||||
d.base64=s.group[d.ke][d.id].buffer.toString('base64')
|
||||
}
|
||||
if(d.mon.detector_second==='1'&&d.objectOnly===true){
|
||||
s.detectObject(s.group[d.ke][d.id].buffer,d,tx)
|
||||
}else{
|
||||
if((d.mon.detector_pam !== '1' && d.mon.detector_use_motion === "1") || d.mon.detector_use_detect_object !== "1"){
|
||||
if((typeof d.mon.cords ==='string')&&d.mon.cords.trim()===''){
|
||||
d.mon.cords=[]
|
||||
}else{
|
||||
try{
|
||||
d.mon.cords=JSON.parse(d.mon.cords)
|
||||
}catch(err){
|
||||
// console.log('d.mon.cords',err,d)
|
||||
}
|
||||
}
|
||||
s.group[d.ke][d.id].cords=Object.values(d.mon.cords);
|
||||
d.mon.cords=d.mon.cords;
|
||||
d.image = new Canvas.Image;
|
||||
if(d.mon.detector_scale_x===''||d.mon.detector_scale_y===''){
|
||||
s.systemLog('Must set detector image size')
|
||||
return
|
||||
}else{
|
||||
d.image.width=d.mon.detector_scale_x;
|
||||
d.image.height=d.mon.detector_scale_y;
|
||||
}
|
||||
d.width=d.image.width;
|
||||
d.height=d.image.height;
|
||||
d.image.onload = function() {
|
||||
s.checkAreas(d,tx);
|
||||
}
|
||||
d.image.src = s.group[d.ke][d.id].buffer;
|
||||
}else{
|
||||
s.detectObject(s.group[d.ke][d.id].buffer,d,tx)
|
||||
}
|
||||
}
|
||||
s.group[d.ke][d.id].buffer=null;
|
||||
}
|
||||
}catch(err){
|
||||
if(err){
|
||||
s.systemLog(err)
|
||||
delete(s.group[d.ke][d.id].buffer)
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
server.listen(config.hostPort);
|
||||
//web pages and plugin api
|
||||
app.get('/', function (req, res) {
|
||||
res.end('<b>'+config.plug+'</b> for Shinobi is running')
|
||||
});
|
||||
//Conector to Shinobi
|
||||
if(config.mode==='host'){
|
||||
//start plugin as host
|
||||
var io = require('socket.io')(server);
|
||||
io.attach(server);
|
||||
s.connectedClients={};
|
||||
io.on('connection', function (cn) {
|
||||
s.connectedClients[cn.id]={id:cn.id}
|
||||
s.connectedClients[cn.id].tx = function(data){
|
||||
data.pluginKey=config.key;data.plug=config.plug;
|
||||
return io.to(cn.id).emit('ocv',data);
|
||||
}
|
||||
cn.on('f',function(d){
|
||||
s.MainEventController(d,cn,s.connectedClients[cn.id].tx)
|
||||
});
|
||||
cn.on('disconnect',function(d){
|
||||
delete(s.connectedClients[cn.id])
|
||||
})
|
||||
});
|
||||
}else{
|
||||
//start plugin as client
|
||||
if(!config.host){config.host='localhost'}
|
||||
var io = require('socket.io-client')('ws://'+config.host+':'+config.port);//connect to master
|
||||
s.cx=function(x){x.pluginKey=config.key;x.plug=config.plug;return io.emit('ocv',x)}
|
||||
io.on('connect',function(d){
|
||||
s.cx({f:'init',plug:config.plug,notice:config.notice,type:config.type});
|
||||
})
|
||||
io.on('disconnect',function(d){
|
||||
io.connect();
|
||||
})
|
||||
io.on('f',function(d){
|
||||
s.MainEventController(d,null,s.cx)
|
||||
})
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue