diff --git a/demos/common/cpp/utils/include/utils/input_wrappers.hpp b/demos/common/cpp/utils/include/utils/input_wrappers.hpp index eff38a72771..a455177385b 100644 --- a/demos/common/cpp/utils/include/utils/input_wrappers.hpp +++ b/demos/common/cpp/utils/include/utils/input_wrappers.hpp @@ -126,11 +126,13 @@ class ImageSource: public IInputSource { return false; } else { subscribedInputChannels.erase(subscribedInputChannelsIt); - mat = im; + // clone to avoid that the image is shared and changed. + mat = im.clone(); return true; } } else { - mat = im; + // clone to avoid that the image is shared and changed. + mat = im.clone(); return true; } } diff --git a/demos/common/cpp/utils/src/args_helper.cpp b/demos/common/cpp/utils/src/args_helper.cpp index 8f4bc351d61..b55d1f44bf2 100644 --- a/demos/common/cpp/utils/src/args_helper.cpp +++ b/demos/common/cpp/utils/src/args_helper.cpp @@ -81,7 +81,7 @@ std::vector parseDevices(const std::string& device_string) { const std::string::size_type colon_position = device_string.find(":"); if (colon_position != std::string::npos) { std::string device_type = device_string.substr(0, colon_position); - if (device_type == "HETERO" || device_type == "MULTI") { + if (device_type == "HETERO" || device_type == "MULTI" || device_type == "AUTO") { std::string comma_separated_devices = device_string.substr(colon_position + 1); std::vector devices = split(comma_separated_devices, ','); for (auto& device : devices) diff --git a/demos/security_barrier_camera_demo/cpp/main.cpp b/demos/security_barrier_camera_demo/cpp/main.cpp index c19087f0895..3bfa5f9b73a 100644 --- a/demos/security_barrier_camera_demo/cpp/main.cpp +++ b/demos/security_barrier_camera_demo/cpp/main.cpp @@ -121,6 +121,8 @@ struct Context { detectorsInfers.assign(detectorInferRequests); attributesInfers.assign(attributesInferRequests); platesInfers.assign(lprInferRequests); + totalInferFrameCounter = 0; + totalFrameCount = 0; } struct { @@ -172,6 +174,11 @@ struct Context { bool isVideo; std::atomic::size_type> freeDetectionInfersCount; std::atomic frameCounter; + + // Record the inferred frames count + std::atomic totalInferFrameCounter; + std::atomic totalFrameCount; + InferRequestsContainer detectorsInfers, attributesInfers, platesInfers; PerformanceMetrics metrics; }; @@ -220,15 +227,34 @@ class ClassifiersAggregator { std::mutex& printMutex = static_cast(sharedVideoFrame.get())->context.classifiersAggregatorPrintMutex; printMutex.lock(); if (FLAGS_r && !rawDetections.empty()) { - slog::debug << "Frame #: " << sharedVideoFrame->frameId << slog::endl; - slog::debug << rawDetections; + slog::debug << "ChannelId:" << sharedVideoFrame->sourceID << "," << "FrameId:" <frameId << ","; + for (auto it = rawDetections.begin(); it != rawDetections.end(); ++it) { + if(it == std::prev(rawDetections.end())) + slog::debug << *it << "\t"; + else + slog::debug << *it << ","; + } // destructor assures that none uses the container - for (const std::string& rawAttribute : rawAttributes.container) { - slog::debug << rawAttribute << slog::endl; + // Format: ChannleId,FrameId,ObjectId,ObjectLable,Prob,roi_x,roi_y,roi_width,roi_high,[Vehicle Attributes],[License Plate] + for (auto it = rawAttributes.container.begin(); it != rawAttributes.container.end(); ++it) { + auto pos = it->find(":"); + if(pos != std::string::npos) { + if(it == std::prev(rawAttributes.container.end())) + slog::debug << it->substr(pos + 1) << "\t"; + else + slog::debug << it->substr(pos + 1) << ","; + } } - for (const std::string& rawDecodedPlate : rawDecodedPlates.container) { - slog::debug << rawDecodedPlate << slog::endl; + for (auto it = rawDecodedPlates.container.begin(); it != rawDecodedPlates.container.end(); ++it) { + auto pos = it->find(":"); + if(pos != std::string::npos) { + if(it == std::prev(rawDecodedPlates.container.end())) + slog::debug << it->substr(pos + 1); + else + slog::debug << it->substr(pos + 1) << ","; + } } + slog::debug << slog::endl; } printMutex.unlock(); tryPush(static_cast(sharedVideoFrame.get())->context.resAggregatorsWorker, @@ -292,6 +318,9 @@ ReborningVideoFrame::~ReborningVideoFrame() { context.videoFramesContext.lastFrameIdsMutexes[sourceID].lock(); const auto frameId = ++context.videoFramesContext.lastframeIds[sourceID]; context.videoFramesContext.lastFrameIdsMutexes[sourceID].unlock(); + // Stop reborning if image frameId is invalid + if(!context.isVideo && frameId >= FLAGS_n_iqs) + return; std::shared_ptr reborn = std::make_shared(context, sourceID, frameId, frame); worker->push(std::make_shared(reborn)); } catch (const std::bad_weak_ptr&) {} @@ -305,6 +334,15 @@ bool Drawer::isReady() { if (std::chrono::steady_clock::now() - prevShow > showPeriod) { return true; } else { + if (!context.isVideo) { + uint32_t totalInferFrameCounter = FLAGS_ni == 0 ? FLAGS_n_iqs * context.totalFrameCount : FLAGS_ni * FLAGS_n_iqs; + if (context.totalInferFrameCounter == totalInferFrameCounter) { + try { + std::shared_ptr(context.drawersContext.drawersWorker)->stop(); + } + catch (const std::bad_weak_ptr&) {} + } + } return false; } } else { @@ -314,6 +352,15 @@ bool Drawer::isReady() { if (2 > gridMats.size()) { // buffer size return true; } else { + if (!context.isVideo) { + uint32_t totalInferFrameCounter = FLAGS_ni == 0 ? FLAGS_n_iqs * context.totalFrameCount : FLAGS_ni * FLAGS_n_iqs; + if (context.totalInferFrameCounter == totalInferFrameCounter) { + try { + std::shared_ptr(context.drawersContext.drawersWorker)->stop(); + } + catch (const std::bad_weak_ptr&) {} + } + } return false; } } else { @@ -322,6 +369,15 @@ bool Drawer::isReady() { && std::chrono::steady_clock::now() - prevShow > showPeriod) { return true; } else { + if (!context.isVideo) { + uint32_t totalInferFrameCounter = FLAGS_ni == 0 ? FLAGS_n_iqs * context.totalFrameCount : FLAGS_ni * FLAGS_n_iqs; + if (context.totalInferFrameCounter == totalInferFrameCounter) { + try { + std::shared_ptr(context.drawersContext.drawersWorker)->stop(); + } + catch (const std::bad_weak_ptr&) {} + } + } return false; } } else { @@ -378,6 +434,13 @@ void Drawer::process() { } } else { if (!context.isVideo) { + // Calculate the inference count for the inputting images. + uint32_t totalInferFrameCounter = FLAGS_ni == 0 ? FLAGS_n_iqs * context.totalFrameCount : FLAGS_ni * FLAGS_n_iqs; + if (context.totalInferFrameCounter < totalInferFrameCounter) + { + context.drawersContext.drawerMutex.unlock(); + return; + } try { std::shared_ptr(context.drawersContext.drawersWorker)->stop(); } @@ -388,6 +451,15 @@ void Drawer::process() { gridMats.emplace((--gridMats.end())->first + 1, firstGridIt->second); gridMats.erase(firstGridIt); } + if (!context.isVideo) { + uint32_t totalInferFrameCounter = FLAGS_ni == 0 ? FLAGS_n_iqs * context.totalFrameCount : FLAGS_ni * FLAGS_n_iqs; + if (context.totalInferFrameCounter == totalInferFrameCounter) { + try { + std::shared_ptr(context.drawersContext.drawersWorker)->stop(); + } + catch (const std::bad_weak_ptr&) {} + } + } context.drawersContext.drawerMutex.unlock(); } @@ -429,6 +501,7 @@ bool DetectionsProcessor::isReady() { classifiersAggregator = std::make_shared(sharedVideoFrame); std::list results; results = context.inferTasksContext.detector.getResults(*inferRequest, sharedVideoFrame->frame.size(), classifiersAggregator->rawDetections); + for (Detector::Result result : results) { switch (result.label) { case 1: @@ -489,6 +562,8 @@ void DetectionsProcessor::process() { const cv::Rect vehicleRect = *vehicleRectsIt; ov::InferRequest& attributesRequest = *attributesRequestIt; context.detectionsProcessorsContext.vehicleAttributesClassifier.setImage(attributesRequest, sharedVideoFrame->frame, vehicleRect); + // Decrease total inferred frames count by 1 when the ROI of frame has availiable attributes. + context.totalInferFrameCounter--; attributesRequest.set_callback( std::bind( @@ -508,6 +583,8 @@ void DetectionsProcessor::process() { classifiersAggregator->push( BboxAndDescr{BboxAndDescr::ObjectType::VEHICLE, rect, attributes.first + ' ' + attributes.second}); context.attributesInfers.inferRequests.lockedPushBack(attributesRequest); + // Increased the total inferred frames count by 1 when attributes classification is done. + context.totalInferFrameCounter++; }, classifiersAggregator, std::ref(attributesRequest), vehicleRect, @@ -528,7 +605,8 @@ void DetectionsProcessor::process() { const cv::Rect plateRect = *plateRectsIt; ov::InferRequest& lprRequest = *lprRequestsIt; context.detectionsProcessorsContext.lpr.setImage(lprRequest, sharedVideoFrame->frame, plateRect); - + // Decrease the total inferred frames count by 1 when the ROI of frame has license plate. + context.totalInferFrameCounter--; lprRequest.set_callback( std::bind( [](std::shared_ptr classifiersAggregator, @@ -544,6 +622,8 @@ void DetectionsProcessor::process() { } classifiersAggregator->push(BboxAndDescr{BboxAndDescr::ObjectType::PLATE, rect, std::move(result)}); context.platesInfers.inferRequests.lockedPushBack(lprRequest); + // Increased by 1 total inferred frames count by 1 when license plate recognization is done. + context.totalInferFrameCounter++; }, classifiersAggregator, std::ref(lprRequest), plateRect, @@ -562,6 +642,8 @@ void DetectionsProcessor::process() { tryPush(context.detectionsProcessorsContext.detectionsProcessorsWorker, std::make_shared(sharedVideoFrame, std::move(classifiersAggregator), std::move(vehicleRects), std::move(plateRects))); } + // Count the frames passed inference + context.totalInferFrameCounter++; } bool InferTask::isReady() { @@ -584,10 +666,10 @@ void InferTask::process() { InferRequestsContainer& detectorsInfers = context.detectorsInfers; std::reference_wrapper inferRequest = detectorsInfers.inferRequests.container.back(); detectorsInfers.inferRequests.container.pop_back(); + detectorsInfers.inferRequests.mutex.unlock(); context.inferTasksContext.detector.setImage(inferRequest, sharedVideoFrame->frame); - inferRequest.get().set_callback( std::bind( [](VideoFrame::Ptr sharedVideoFrame, @@ -628,6 +710,19 @@ void Reader::process() { context.readersContext.lastCapturedFrameIds[sourceID]++; context.readersContext.lastCapturedFrameIdsMutexes[sourceID].unlock(); try { + // Calculate the inference count for the inputting video. + uint32_t totalInferFrameCounter = 0; + if (FLAGS_ni == 0) + totalInferFrameCounter = context.totalFrameCount; + else + totalInferFrameCounter = FLAGS_ni * context.totalFrameCount; + + if (context.totalInferFrameCounter < totalInferFrameCounter) + { + // Rebron this invalid frame to end the worker at next time + std::shared_ptr(context.drawersContext.drawersWorker)->push(std::make_shared(sharedVideoFrame)); + return; + } std::shared_ptr(context.drawersContext.drawersWorker)->stop(); } catch (const std::bad_weak_ptr&) {} } @@ -667,6 +762,8 @@ int main(int argc, char* argv[]) { videoCapturSourcess.push_back(std::make_shared(videoCapture, FLAGS_loop_video)); } } + + uint32_t totalFrameCount = 0; for (const std::string& file : files) { cv::Mat frame = cv::imread(file, cv::IMREAD_COLOR); if (frame.empty()) { @@ -676,8 +773,12 @@ int main(int argc, char* argv[]) { return 1; } videoCapturSourcess.push_back(std::make_shared(videoCapture, FLAGS_loop_video)); + // Get the total frame count from this video + totalFrameCount = static_cast(videoCapture.get(cv::CAP_PROP_FRAME_COUNT)); } else { imageSourcess.push_back(std::make_shared(frame, true)); + // Get the total frame count from the inputting images + totalFrameCount++; } } uint32_t channelsNum = 0 == FLAGS_ni ? videoCapturSourcess.size() + imageSourcess.size() : FLAGS_ni; @@ -721,7 +822,6 @@ int main(int argc, char* argv[]) { } core.set_property("CPU", ov::affinity(ov::Affinity::NONE)); core.set_property("CPU", ov::streams::num((device_nstreams.count("CPU") > 0 ? ov::streams::Num(device_nstreams["CPU"]) : ov::streams::AUTO))); - device_nstreams["CPU"] = core.get_property("CPU", ov::streams::num); } @@ -795,6 +895,8 @@ int main(int argc, char* argv[]) { nireq, isVideo, nclassifiersireq, nrecognizersireq}; + // initilize the inputting frames count + context.totalFrameCount = totalFrameCount; // Create a worker after a context because the context has only weak_ptr, but the worker is going to // indirectly store ReborningVideoFrames which have a reference to the context. So there won't be a situation // when the context is destroyed and the worker still lives with its ReborningVideoFrames referring to the diff --git a/demos/security_barrier_camera_demo/cpp/net_wrappers.hpp b/demos/security_barrier_camera_demo/cpp/net_wrappers.hpp index 00519a761a9..c197c691df8 100644 --- a/demos/security_barrier_camera_demo/cpp/net_wrappers.hpp +++ b/demos/security_barrier_camera_demo/cpp/net_wrappers.hpp @@ -122,8 +122,7 @@ class Detector { } std::list getResults(ov::InferRequest& inferRequest, cv::Size upscale, std::vector& rawResults) { - // there is no big difference if InferReq of detector from another device is passed - // because the processing is the same for the same topology + // there is no big difference if InferReq of detector from another device is passed because the processing is the same for the same topology std::list results; ov::Tensor output_tensor = inferRequest.get_tensor(m_detectorOutputName); const float* const detections = output_tensor.data(); @@ -141,13 +140,21 @@ class Detector { cv::Rect rect; rect.x = static_cast(detections[i * objectSize + 3] * upscale.width); + if(rect.x < 0) + rect.x = 0; rect.y = static_cast(detections[i * objectSize + 4] * upscale.height); + if(rect.y < 0) + rect.y = 0; rect.width = static_cast(detections[i * objectSize + 5] * upscale.width) - rect.x; + if(rect.width > upscale.width) + rect.width = upscale.width; rect.height = static_cast(detections[i * objectSize + 6] * upscale.height) - rect.y; + if(rect.height > upscale.height) + rect.height = upscale.height; results.push_back(Result{label, confidence, rect}); std::ostringstream rawResultsStream; - rawResultsStream << "[" << i << "," << label << "] element, prob = " << confidence - << " (" << rect.x << "," << rect.y << ")-(" << rect.width << "," << rect.height << ")"; + rawResultsStream << i << "," << label << "," << confidence + << "," << rect.x << "," << rect.y << "," << rect.width << "," << rect.height; rawResults.push_back(rawResultsStream.str()); } return results; @@ -187,10 +194,15 @@ class VehicleAttributesClassifier { throw std::logic_error("Vehicle Attribs Network expects networks having two outputs"); } - // color is the first output - m_outputNameForColor = outputs[0].get_any_name(); - // type is the second output. - m_outputNameForType = outputs[1].get_any_name(); + // Get the names within the output layer + if(outputs[0].get_any_name().find("color") != std::string::npos) { + m_outputNameForColor = outputs[0].get_any_name(); + m_outputNameForType = outputs[1].get_any_name(); + } + else { + m_outputNameForColor = outputs[1].get_any_name(); + m_outputNameForType = outputs[0].get_any_name(); + } ov::preprocess::PrePostProcessor ppp(model); @@ -232,14 +244,14 @@ class VehicleAttributesClassifier { ov::Tensor inputTensor = inferRequest.get_tensor(m_attributesInputName); ov::Shape shape = inputTensor.get_shape(); if (m_autoResize) { - ov::Tensor frameTensor = wrapMat2Tensor(img); + ov::Tensor frameTensor = wrapMat2Tensor(img.clone()); ov::Coordinate p00({ 0, (size_t)vehicleRect.y, (size_t)vehicleRect.x, 0 }); ov::Coordinate p01({ 1, (size_t)(vehicleRect.y + vehicleRect.height), (size_t)vehicleRect.x + vehicleRect.width, 3 }); ov::Tensor roiTensor(frameTensor, p00, p01); inferRequest.set_tensor(m_attributesInputName, roiTensor); } else { - const cv::Mat& vehicleImage = img(vehicleRect); + const cv::Mat vehicleImage = img(vehicleRect).clone(); resize2tensor(vehicleImage, inputTensor); } } @@ -255,10 +267,17 @@ class VehicleAttributesClassifier { // 7 possible colors for each vehicle and we should select the one with the maximum probability ov::Tensor colorsTensor = inferRequest.get_tensor(m_outputNameForColor); const float* colorsValues = colorsTensor.data(); - + assert(7 == colorsTensor.get_size()); + if (colorsTensor.get_size() != 7) { + throw std::logic_error("Vehicle Color output size should be 7."); + } // 4 possible types for each vehicle and we should select the one with the maximum probability ov::Tensor typesTensor = inferRequest.get_tensor(m_outputNameForType); const float* typesValues = typesTensor.data(); + assert(4 == typesTensor.get_size()); + if (typesTensor.get_size() != 4) { + throw std::logic_error("Vehicle Types output size should be 4."); + } const auto color_id = std::max_element(colorsValues, colorsValues + 7) - colorsValues; const auto type_id = std::max_element(typesValues, typesValues + 4) - typesValues; diff --git a/demos/tests/cases.py b/demos/tests/cases.py index cb725ec64e2..99d75b6cb6e 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -63,7 +63,11 @@ def update_case(self, case, updated_options, with_replacement=False): if not updated_options: return new_options = case.options.copy() for key, value in updated_options.items(): - new_options[key] = value + # Modify path to the new dataset if updating option '-i' + if key == '-i': + new_options[key] = TestDataArg(value) + else: + new_options[key] = value new_case = case._replace(options=new_options) if with_replacement: self.test_cases.remove(case) @@ -77,6 +81,10 @@ def parse_output(self, output, test_case, device): if self.parser: self.parser(output, test_case, device) + def check_difference(self): + if self.parser: + return self.parser.check_difference() + def update_option(self, updated_options): for case in self.test_cases[:]: self.update_case(case, updated_options, with_replacement=True) @@ -602,7 +610,8 @@ def single_option_cases(key, *args): TestCase(options={'-no_show': None, **MONITORS, '-i': DataDirectoryArg('vehicle-license-plate-detection-barrier')}), - TestCase(options={'-m': ModelArg('vehicle-license-plate-detection-barrier-0106')}), + # Update to this model in order to obtain the ROI data + TestCase(options={'-m': ModelArg('vehicle-license-plate-detection-barrier-0123')}), single_option_cases('-m_lpr', None, ModelArg('license-plate-recognition-barrier-0001'), diff --git a/demos/tests/correctness_cases.py b/demos/tests/correctness_cases.py new file mode 100644 index 00000000000..627eba52761 --- /dev/null +++ b/demos/tests/correctness_cases.py @@ -0,0 +1,375 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from copy import deepcopy +from abc import ABC, abstractmethod + +from cases import BASE + +class CorrectnessCheckerBase(ABC): + def __init__(self, demo): + self.filename = demo.subdirectory.replace('/', '_') + '.log' + self.demo_name = demo.subdirectory.split('/')[0] + self.results = {} + self.case_index = {} + + @abstractmethod + def __call__(self, output, test_case, device, execution_time=-1): + pass + + def compare_roi(self, source_roi, dest_roi): + source = [] + dest = [] + if len(source_roi) != len(dest_roi): + return False + # Expected ROI format: able, prob, x, y, w, h,.... + for item in source_roi: + source.append(float(item)) + + for item in dest_roi: + dest.append(float(item)) + + flag = True + prob_gap = 0.1 + pos_gap = 20 + for index in range(len(source)): + if type(source[index]) == float: + if source[index] > 0 and source[index] < 1: + if abs(source[index] - dest[index]) > prob_gap: + flag = False + break + else: + if abs(source[index] - dest[index]) > pos_gap: + flag = False + break + else: + if source[index] != dest[index]: + flag = False + break + + return flag + + def check_difference(self): + flag = True + devices_list = { + "AUTO:GPU,CPU" : ["CPU", "GPU"], + "MULTI:GPU,CPU" : ["CPU", "GPU"], + "AUTO:CPU" : ["CPU"], + "AUTO:GPU" : ["GPU"], + } + # Record the results for the single device inputting like AUTO:CPU, AUTO:GPU + # { + # device 0: { target 0: {case 0: True/False, case 1: True/False, ...},...,target n:{} }, + # device 1: { target 1: {case 0: True/False, case 1: True/False, ...},...,target n:{} }, + # } + multi_correctness = {} + + # Record the results for the multi devices inputting like AUTO:GPU,CPU, MULTI:GPU,CPU + #{ + # device 0:{ + # case 0: { + # channel 0: { + # frame 0: [roi_result, attribute_result, license_plate_results], + # frame 1: [roi_result, attribute_result, license_plate_results], + # ... + # }, + # channel 1: {}, + # ......... + # }, + # case 1:{}, + # ....... + # } + # device 1:{}, + #} + multi_correctness_devices = {} + + # Record the detailed msg for the inconsistent results + # { + # device 0: {case 0: "error msg", case 1: "error msg", ...}, + # device 1: {case 0: "error msg", case 1: "error msg", ...}, + # .... + # } + multi_correctness_errmsg = {} + for device in devices_list: + multi_correctness[device] = {} + multi_correctness_errmsg[device] = {} + if 'GPU' in device and 'CPU' in device: + multi_correctness_devices[device] = {} + for target in devices_list[device]: + multi_correctness[device][target] = {} + if 'GPU' in device and 'CPU' in device: + multi_correctness_devices[device][target] = {} + for device in devices_list: + for target in devices_list[device]: + if device not in self.results or target not in self.results: + multi_correctness[device][target] = {} + multi_correctness_errmsg[device]['-1'] = "\tMiss the results from device {} or from device {}.\n".format(device, target) + continue + #if self.results[device] != self.results[target]: + # # Show the detailed inconsistent results + for case in self.results[target]: + if case not in multi_correctness[device][target]: + multi_correctness[device][target][case] = True + if case not in multi_correctness_errmsg[device]: + multi_correctness_errmsg[device][case] = '' + + #if self.results[device][case] != self.results[target][case]: + for channel in self.results[device][case]: + for frame in self.results[device][case][channel]: + if channel not in self.results[target][case] or (channel in self.results[target][case] and frame not in self.results[target][case][channel]): + multi_correctness[device][target][case] = False + multi_correctness_errmsg[device][case] += "[Device: {}- Case: {}][Not Found on {}]Channel {} - Frame {} : {}\n".format(device, case, target, channel, frame, self.results[device][case][channel][frame]) + else: + for obj in self.results[target][case][channel][frame]: + if obj not in self.results[device][case][channel][frame]: + multi_correctness[device][target][case] = False + multi_correctness_errmsg[device][case] += "[Device: {}- Case: {}][Not Found on {}]Channel {} - Frame {} : {}\n".format(device, case, device, channel, frame, self.results[target][case][channel][frame]) + else: + try: + if 'CPU' in device and 'GPU' in device: + if case not in multi_correctness_devices[device][target]: + multi_correctness_devices[device][target][case] = {} + if channel not in multi_correctness_devices[device][target][case]: + multi_correctness_devices[device][target][case][channel] = {} + if frame not in multi_correctness_devices[device][target][case][channel]: + multi_correctness_devices[device][target][case][channel][frame] = [] + + for i in range(len(self.results[device][case][channel][frame][obj])): + if i == 0: + # Compared ROI + device_vehicle_roi = self.results[device][case][channel][frame][obj][i] + target_vehicle_roi = self.results[target][case][channel][frame][obj][i] + flag_roi = self.compare_roi(device_vehicle_roi, target_vehicle_roi) + if 'CPU' in device and 'GPU' in device: + multi_correctness_devices[device][target][case][channel][frame].append(flag_roi) + else: + if not flag_roi: + multi_correctness[device][target][case] = False + tmp_msg = ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, device, channel, frame, self.results[device][case][channel][frame])) + tmp_msg += ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, target, channel, frame, self.results[target][case][channel][frame])) + multi_correctness_errmsg[device][case] += tmp_msg + else: + # Compare attribute/license plate + device_vehicle_attr = self.results[device][case][channel][frame][obj][i] + target_vehicle_attr = [] + if i < len(self.results[target][case][channel][frame][obj]): + target_vehicle_attr = self.results[target][case][channel][frame][obj][i] + if 'CPU' in device and 'GPU' in device: + multi_correctness_devices[device][target][case][channel][frame].append(device_vehicle_attr == target_vehicle_attr) + else: + if device_vehicle_attr != target_vehicle_attr: + multi_correctness[device][target][case] = False + tmp_msg = ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, device, channel, frame, self.results[device][case][channel][frame])) + tmp_msg += ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, target, channel, frame, self.results[target][case][channel][frame])) + multi_correctness_errmsg[device][case] += tmp_msg + #if 'CPU' in device and 'GPU' in device: + # print(multi_correctness_devices[device][target][case][channel][frame]) + #else: + # print(multi_correctness[device][target][case]) + #print("======================================") + if 'CPU' in device and 'GPU' in device: + # Check if correctness result between device and target for multi devices + consistent_flag = False + for flag in multi_correctness_devices[device][target][case][channel][frame]: + if flag == True: + consistent_flag = True + break + if not consistent_flag: + tmp_msg = ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, target, channel, frame, self.results[target][case][channel][frame])) + if 'CPU' in device and 'GPU' in device and target == 'CPU': + tmp_msg += ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, 'GPU', channel, frame, self.results['GPU'][case][channel][frame])) + elif 'CPU' in device and 'GPU' in device and target == 'GPU': + tmp_msg += ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, 'CPU', channel, frame, self.results['CPU'][case][channel][frame])) + tmp_msg += ("[Device: {}- Case: {} on {}] Channel {} - Frame {} : {}\n".format(device, case, device, channel, frame, self.results[device][case][channel][frame])) + multi_correctness_errmsg[device][case] += tmp_msg + except: + print("======Checking exception on Case:{} - Device: {} - Target: {} - channel: {} - frame: {}=======".format(case, device, target, channel, frame)) + print("Device {}: {}".format(device, self.results[device][case][channel][frame][obj])) + print("{}: {}".format(target, self.results[target][case][channel][frame][obj])) + if 'CPU' in device and 'GPU' in device: + multi_correctness_devices[device][target][case][channel][frame].append(False) + if target == 'CPU': + print("GPU: {}".format(self.results['GPU'][case][channel][frame][obj])) + elif target == 'GPU': + print("CPU: {}".format(self.results['CPU'][case][channel][frame][obj])) + else: + multi_correctness[device][target][case] = False + multi_correctness_errmsg[device][case] += "[Device: {}- Case: {}][Exception on {}]Channel {} - Frame {} : {}\n".format(device, case, device, channel, frame, self.results[target][case][channel][frame]) + + + + #print("=====================================") + #print(multi_correctness) + #print("=====================================") + #print(multi_correctness_errmsg) + #print("=====================================") + #for device in multi_correctness_devices: + # for target in multi_correctness_devices[device]: + # for case in multi_correctness_devices[device][target]: + # for channel in multi_correctness_devices[device][target][case]: + # print("===== Device:{} - target: {} - Case: {} - channel: {}=====".format(device, target, case, channel)) + # print(multi_correctness_devices[device][target][case][channel]) + #print("=====================================") + final_correctness_flag = True + for device in devices_list: + if 'MULTI:GPU,CPU' != device and 'AUTO:GPU,CPU' != device: + for target in multi_correctness[device]: + consistent_flag = True + err_msg = 'Inconsistent result:\n' + if len(multi_correctness[device][target]) == 0: + final_correctness_flag = False + consistent_flag = False + # Miss results for device + err_msg += '\t' + err_msg += multi_correctness_errmsg[device]['-1'] + else: + for case in multi_correctness[device][target]: + flag = multi_correctness[device][target][case] + if flag == False: + final_correctness_flag = False + consistent_flag = False + err_msg += '\t' + err_msg += multi_correctness_errmsg[device][case] + if not consistent_flag: + print("Checking device: {} - target : {} - : Fail.\n{}".format(device, devices_list[device], err_msg)) + else: + print("Checking device: {} - target : {} - : PASS.\n".format(device, devices_list[device])) + else: + consistent_flag = True + err_msg = '' + if len(multi_correctness[device][target]) == 0: + final_correctness_flag = False + consistent_flag = False + # Miss results for device + err_msg += multi_correctness_errmsg[device]['-1'] + else: + for case in multi_correctness[device]['CPU']: + if multi_correctness[device]['CPU'][case] == False: + final_correctness_flag = False + consistent_flag = False + err_msg += multi_correctness_errmsg[device][case] + #print(multi_correctness_errmsg[device][target][case]) + else: + for channel in multi_correctness_devices[device]['GPU'][case]: + for frame in multi_correctness_devices[device]['GPU'][case][channel]: + frame_flag = True + for i in range(len(multi_correctness_devices[device]['GPU'][case][channel][frame])): + gpu_frame_consistent_flag = multi_correctness_devices[device]['GPU'][case][channel][frame][i] + cpu_frame_consistent_flag = multi_correctness_devices[device]['CPU'][case][channel][frame][i] + if gpu_frame_consistent_flag == False and cpu_frame_consistent_flag == False: + frame_flag = False + #print("checking result: {}\nDevice : GPU - case: {} - channel: {} - frame: {}: {}".format(frame_flag, case, channel, frame, multi_correctness_devices[device]['GPU'][case][channel][frame])) + #print("Device : CPU - case: {} - channel: {} - frame: {}: {}".format(case, channel, frame, multi_correctness_devices[device]['CPU'][case][channel][frame])) + if not frame_flag: + final_correctness_flag = False + consistent_flag = False + err_msg += "Inconsistent result:\n\tDevice: {} - case: {} - channel: {} - frame: {}: {}\n".format(device, case, channel, frame, self.results[device][case][channel][frame]) + for target in devices_list[device]: + err_msg += "\tDevice: {} - case: {} - channel: {} - frame: {}: {}\n".format(target, case, channel, frame, self.results[target][case][channel][frame]) + for target in devices_list[device]: + err_msg += "\tDevice: {} - case: {} - channel: {} - frame: {}: {}\n".format(target, case, channel, frame, multi_correctness_devices[device][target][case][channel][frame]) + + if not consistent_flag: + print("Checking device: {} - target : {} - : Fail.\n{}".format(device, devices_list[device], err_msg)) + else: + print("Checking device: {} - target : {} - : PASS.\n".format(device, devices_list[device])) + return final_correctness_flag + + def write_to_log(self, result, test_case, device): + with open(self.filename, 'w') as f: + print(self.results, file=f) + + +class DemoSecurityBarrierCamera(CorrectnessCheckerBase): + def __call__(self, output, test_case, device, execution_time=0): + # Parsing results from raw data + # Results format + # {"device name": + # {"case index 0": + # {"channel id 0": + # {"frame id 0": + # {"object id 0":[["label", "prob", "x", "y", "width", "hight"],"Vehicle attribute", "License plate"], + # {"object id 1":["label", "prob", "x", "y", "width", "hight","Vehicle attribute", "License plate"], + # ..................... + # {"object id n":["label", "prob", "x", "y", "width", "hight","Vehicle attribute", "License plate"], + # }, + # ..... + # {"frame id n": + # ..... + # } + # }, + # ..... + # {"channel id n": + # ..... + # } + # {"case index n": + # ..... + # } + # } + # Generate case id for each device + if device not in self.case_index: + self.case_index[device] = 0 + + if device not in self.results: + self.results[device] = {} + + case_index = self.case_index[device] + if case_index not in self.results[device]: + self.results[device][case_index] = {} + + # Parsing the raw data + try: + output = [i.rstrip() for i in output.split('\n') if "DEBUG" in i and "ChannelId" in i] + for item in output: + line = item + item = item[item.find('ChannelId'):].split(',') + # Channel ID + frame_results = {} + channel = item[0].split(':')[1] + if channel not in self.results[device][case_index]: + self.results[device][case_index][channel] = frame_results + + # Frame ID + object_results = {} + frame = item[1].split(':')[1] + if frame not in self.results[device][case_index][channel]: + self.results[device][case_index][channel][frame] = object_results + + # Object ID + label_prob_pos_results = [] + objid = item[2] + if objid not in self.results[device][case_index][channel][frame]: + self.results[device][case_index][channel][frame][objid] = label_prob_pos_results + #self.results[device][case_index][channel][frame][objid] = item[3:] + roi_attr_license = ",".join(item[3:]).split('\t') + for index in range(len(roi_attr_license)): + if index == 0: + item = roi_attr_license[index].split(',') + self.results[device][case_index][channel][frame][objid].append(item) + else: + self.results[device][case_index][channel][frame][objid].append(roi_attr_license[index]) + #print("Device: {}- case: {} - channel: {} - frame: {}: {}".format(device, case_index, channel, frame, self.results[device][case_index][channel][frame])) + except IndexError: + raise IndexError ("Rawdata format is invalid:\n\t{}".format(line)) + + + self.case_index[device] += 1 + +DEMOS = [ + deepcopy(BASE['security_barrier_camera_demo/cpp']) + .update_option({'-r': None,'-ni': '16', '-n_iqs': '1', '-i': 'multi_images.mp4'}) + .add_parser(DemoSecurityBarrierCamera) +] + diff --git a/demos/tests/run_tests.py b/demos/tests/run_tests.py index e05391cd379..22a95d8e944 100755 --- a/demos/tests/run_tests.py +++ b/demos/tests/run_tests.py @@ -50,6 +50,7 @@ scopes = { 'base': importlib.import_module('cases').DEMOS, 'performance': importlib.import_module('performance_cases').DEMOS, + 'correctness': importlib.import_module('correctness_cases').DEMOS, } @@ -71,7 +72,7 @@ def parse_args(): help='list of demos to run tests for (by default, every demo is tested). ' 'For testing demos of specific implementation pass one (or more) of the next values: cpp, cpp_gapi, python.') parser.add_argument('--scope', default='base', - help='The scenario for testing demos.', choices=('base', 'performance')) + help='The scenario for testing demos.', choices=('base', 'performance', 'correctness')) parser.add_argument('--mo', type=Path, metavar='MO.PY', help='Model Optimizer entry point script') parser.add_argument('--devices', default="CPU GPU", @@ -243,6 +244,7 @@ def main(): dl_dir = prepare_models(auto_tools_dir, args.downloader_cache_dir, args.mo, global_temp_dir, demos_to_test, args.precisions) num_failures = 0 + correctness_failures = 0 try: pythonpath = f"{os.environ['PYTHONPATH']}{os.pathsep}" @@ -261,7 +263,6 @@ def main(): print(header) print() demo.set_precisions(args.precisions, model_info) - declared_model_names = set() for model_data in json.loads(subprocess.check_output( [sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'), @@ -338,8 +339,7 @@ def option_to_args(key, value): exit_msg = f'Exit code: {e.returncode}\n' elif isinstance(e, subprocess.TimeoutExpired): exit_msg = f'Command timed out after {e.timeout} seconds\n' - output += exit_msg - print(output) + print('{}\n{}'.format(output, exit_msg)) failed_tests.append(test_descr + '\n' + exit_msg) num_failures += 1 execution_time = -1 @@ -351,14 +351,19 @@ def option_to_args(key, value): write_log(header, args.log_file) write_log(test_descr, args.log_file) write_log(output, args.log_file) - + if args.scope == "correctness": + print("Demo {} correctness checking....".format(demo.subdirectory)) + if not demo.check_difference(): + correctness_failures += 1 + num_failures += 1 print() - - print("{} failures:".format(num_failures)) + if args.scope == "correctness": + print("{} correctness checking failures".format(correctness_failures)) + print("{} execution failures:".format(num_failures)) for test in failed_tests: print(test) - sys.exit(0 if num_failures == 0 else 1) + sys.exit(0 if num_failures == 0 and correctness_failures == 0 else 1) if __name__ == '__main__':